From commits-noreply at bitbucket.org Fri Oct 2 17:01:17 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 2 Oct 2009 15:01:17 +0000 (UTC) Subject: [py-svn] py-trunk commit 2036f394193f: adding internal repr for debugging Message-ID: <20091002150117.12225710AD@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1254328564 -7200 # Node ID 2036f394193fe3d3b37cf5b80ea2128f5ae9764e # Parent 48444c100e15b2acc3ffd1c515dbb7d41d928e92 adding internal repr for debugging adding an example for generating multi-args/multi python tests --- /dev/null +++ b/example/funcarg/test_multi_python.py @@ -0,0 +1,65 @@ +""" + +module containing a parametrized tests testing cross-python +serialization via the pickle module. +""" +import py + +pythonlist = ['python2.3', 'python2.4', 'python2.5', 'python2.6'] +# 'jython' 'python3.1'] + +def pytest_generate_tests(metafunc): + if 'python1' in metafunc.funcargnames: + assert 'python2' in metafunc.funcargnames + for obj in metafunc.function.multiarg.obj: + for py1 in pythonlist: + for py2 in pythonlist: + metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj), + param=(py1, py2, obj)) + + at py.test.mark.multiarg(obj=[42, {}, {1:3},]) +def test_basic_objects(python1, python2, obj): + python1.dumps(obj) + python2.load_and_is_true("obj == %s" % obj) + +def pytest_funcarg__python1(request): + tmpdir = request.getfuncargvalue("tmpdir") + picklefile = tmpdir.join("data.pickle") + return Python(request.param[0], picklefile) + +def pytest_funcarg__python2(request): + python1 = request.getfuncargvalue("python1") + return Python(request.param[1], python1.picklefile) + +def pytest_funcarg__obj(request): + return request.param[2] + +class Python: + def __init__(self, version, picklefile): + self.pythonpath = py.path.local.sysfind(version) + if not self.pythonpath: + py.test.skip("%r not found" %(version,)) + self.picklefile = picklefile + def dumps(self, obj): + dumpfile = self.picklefile.dirpath("dump.py") + dumpfile.write(py.code.Source(""" + import pickle + f = open(%r, 'wb') + s = pickle.dump(%r, f) + f.close() + """ % (str(self.picklefile), obj))) + py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile)) + + def load_and_is_true(self, expression): + loadfile = self.picklefile.dirpath("load.py") + loadfile.write(py.code.Source(""" + import pickle + f = open(%r, 'rb') + obj = pickle.load(f) + f.close() + res = eval(%r) + if not res: + raise SystemExit(1) + """ % (str(self.picklefile), expression))) + print loadfile + py.process.cmdexec("%s %s" %(self.pythonpath, loadfile)) --- a/py/test/funcargs.py +++ b/py/test/funcargs.py @@ -23,6 +23,9 @@ class CallSpec: self.id = id if param is not _notexists: self.param = param + def __repr__(self): + return "" %( + self.id, getattr(self, 'param', '?'), self.funcargs) class Metafunc: def __init__(self, function, config=None, cls=None, module=None): --- a/testing/pytest/test_funcargs.py +++ b/testing/pytest/test_funcargs.py @@ -17,6 +17,12 @@ def test_getfuncargnames(): if sys.version_info < (3,0): assert funcargs.getfuncargnames(A.f) == ['arg1'] +def test_callspec_repr(): + cs = funcargs.CallSpec({}, 'hello', 1) + repr(cs) + cs = funcargs.CallSpec({}, 'hello', funcargs._notexists) + repr(cs) + class TestFillFuncArgs: def test_funcarg_lookupfails(self, testdir): testdir.makeconftest(""" @@ -314,7 +320,6 @@ class TestRequestCachedSetup: "*3 passed*" ]) - class TestMetafunc: def test_no_funcargs(self, testdir): def function(): pass From commits-noreply at bitbucket.org Fri Oct 2 17:01:19 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 2 Oct 2009 15:01:19 +0000 (UTC) Subject: [py-svn] py-trunk commit b571b7e9a9b2: remove py.execnet, substitute py.execnet usages with "execnet" ones. Message-ID: <20091002150119.5C868710B0@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1254495537 -7200 # Node ID b571b7e9a9b2f2f44d63a58360476de7868fd312 # Parent 2036f394193fe3d3b37cf5b80ea2128f5ae9764e remove py.execnet, substitute py.execnet usages with "execnet" ones. --- a/py/execnet/gateway.py +++ /dev/null @@ -1,354 +0,0 @@ -""" -gateway code for initiating popen, socket and ssh connections. -(c) 2004-2009, Holger Krekel and others -""" - -import sys, os, inspect, socket, atexit, weakref -import py -from py.__.execnet.gateway_base import Message, Popen2IO, SocketIO -from py.__.execnet import gateway_base - -debug = False - -class GatewayCleanup: - def __init__(self): - self._activegateways = weakref.WeakKeyDictionary() - atexit.register(self.cleanup_atexit) - - def register(self, gateway): - assert gateway not in self._activegateways - self._activegateways[gateway] = True - - def unregister(self, gateway): - del self._activegateways[gateway] - - def cleanup_atexit(self): - if debug: - debug.writeslines(["="*20, "cleaning up", "=" * 20]) - debug.flush() - for gw in list(self._activegateways): - gw.exit() - #gw.join() # should work as well - -class ExecnetAPI: - def pyexecnet_gateway_init(self, gateway): - """ signal initialisation of new gateway. """ - def pyexecnet_gateway_exit(self, gateway): - """ signal exitting of gateway. """ - -class InitiatingGateway(gateway_base.BaseGateway): - """ initialize gateways on both sides of a inputoutput object. """ - # XXX put the next two global variables into an Execnet object - # which intiaties gateways and passes in appropriate values. - _cleanup = GatewayCleanup() - hook = ExecnetAPI() - - def __init__(self, io): - self._remote_bootstrap_gateway(io) - super(InitiatingGateway, self).__init__(io=io, _startcount=1) - self._initreceive() - self.hook = py._com.HookRelay(ExecnetAPI, py._com.comregistry) - self.hook.pyexecnet_gateway_init(gateway=self) - self._cleanup.register(self) - - def __repr__(self): - """ return string representing gateway type and status. """ - if hasattr(self, 'remoteaddress'): - addr = '[%s]' % (self.remoteaddress,) - else: - addr = '' - try: - r = (self._receiverthread.isAlive() and "receiving" or - "not receiving") - s = "sending" # XXX - i = len(self._channelfactory.channels()) - except AttributeError: - r = s = "uninitialized" - i = "no" - return "<%s%s %s/%s (%s active channels)>" %( - self.__class__.__name__, addr, r, s, i) - - def exit(self): - """ Try to stop all exec and IO activity. """ - try: - self._cleanup.unregister(self) - except KeyError: - return # we assume it's already happened - self._stopexec() - self._stopsend() - self.hook.pyexecnet_gateway_exit(gateway=self) - - def _remote_bootstrap_gateway(self, io, extra=''): - """ return Gateway with a asynchronously remotely - initialized counterpart Gateway (which may or may not succeed). - Note that the other sides gateways starts enumerating - its channels with even numbers while the sender - gateway starts with odd numbers. This allows to - uniquely identify channels across both sides. - """ - bootstrap = [extra] - bootstrap += [inspect.getsource(gateway_base)] - bootstrap += [io.server_stmt, - "io.write('1'.encode('ascii'))", - "SlaveGateway(io=io, _startcount=2).serve()", - ] - source = "\n".join(bootstrap) - self._trace("sending gateway bootstrap code") - #open("/tmp/bootstrap.py", 'w').write(source) - repr_source = repr(source) + "\n" - io.write(repr_source.encode('ascii')) - s = io.read(1) - assert s == "1".encode('ascii') - - def _rinfo(self, update=False): - """ return some sys/env information from remote. """ - if update or not hasattr(self, '_cache_rinfo'): - ch = self.remote_exec(rinfo_source) - self._cache_rinfo = RInfo(**ch.receive()) - return self._cache_rinfo - - def remote_exec(self, source): - """ return channel object and connect it to a remote - execution thread where the given 'source' executes - and has the sister 'channel' object in its global - namespace. - """ - source = str(py.code.Source(source)) - channel = self.newchannel() - self._send(Message.CHANNEL_OPEN(channel.id, source)) - return channel - - def remote_init_threads(self, num=None): - """ start up to 'num' threads for subsequent - remote_exec() invocations to allow concurrent - execution. - """ - if hasattr(self, '_remotechannelthread'): - raise IOError("remote threads already running") - from py.__.thread import pool - source = py.code.Source(pool, """ - execpool = WorkerPool(maxthreads=%r) - gw = channel.gateway - while 1: - task = gw._execqueue.get() - if task is None: - gw._stopsend() - execpool.shutdown() - execpool.join() - raise gw._StopExecLoop - execpool.dispatch(gw.executetask, task) - """ % num) - self._remotechannelthread = self.remote_exec(source) - - def _remote_redirect(self, stdout=None, stderr=None): - """ return a handle representing a redirection of a remote - end's stdout to a local file object. with handle.close() - the redirection will be reverted. - """ - # XXX implement a remote_exec_in_globals(...) - # to send ThreadOut implementation over - clist = [] - for name, out in ('stdout', stdout), ('stderr', stderr): - if out: - outchannel = self.newchannel() - outchannel.setcallback(getattr(out, 'write', out)) - channel = self.remote_exec(""" - import sys - outchannel = channel.receive() - ThreadOut(sys, %r).setdefaultwriter(outchannel.send) - """ % name) - channel.send(outchannel) - clist.append(channel) - for c in clist: - c.waitclose() - class Handle: - def close(_): - for name, out in ('stdout', stdout), ('stderr', stderr): - if out: - c = self.remote_exec(""" - import sys - channel.gateway._ThreadOut(sys, %r).resetdefault() - """ % name) - c.waitclose() - return Handle() - - - -class RInfo: - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - def __repr__(self): - info = ", ".join(["%s=%s" % item - for item in self.__dict__.items()]) - return "" % info - -rinfo_source = """ -import sys, os -channel.send(dict( - executable = sys.executable, - version_info = tuple([sys.version_info[i] for i in range(5)]), - platform = sys.platform, - cwd = os.getcwd(), - pid = os.getpid(), -)) -""" - -class PopenCmdGateway(InitiatingGateway): - def __init__(self, args): - from subprocess import Popen, PIPE - self._popen = p = Popen(args, stdin=PIPE, stdout=PIPE) - io = Popen2IO(p.stdin, p.stdout) - super(PopenCmdGateway, self).__init__(io=io) - - def exit(self): - super(PopenCmdGateway, self).exit() - self._popen.poll() - -popen_bootstrapline = "import sys ; exec(eval(sys.stdin.readline()))" -class PopenGateway(PopenCmdGateway): - """ This Gateway provides interaction with a newly started - python subprocess. - """ - def __init__(self, python=None): - """ instantiate a gateway to a subprocess - started with the given 'python' executable. - """ - if not python: - python = sys.executable - args = [str(python), '-c', popen_bootstrapline] - super(PopenGateway, self).__init__(args) - - def _remote_bootstrap_gateway(self, io, extra=''): - # have the subprocess use the same PYTHONPATH and py lib - x = py.path.local(py.__file__).dirpath().dirpath() - ppath = os.environ.get('PYTHONPATH', '') - plist = [str(x)] + ppath.split(':') - s = "\n".join([extra, - "import sys ; sys.path[:0] = %r" % (plist,), - "import os ; os.environ['PYTHONPATH'] = %r" % ppath, - inspect.getsource(stdouterrin_setnull), - "stdouterrin_setnull()", - "" - ]) - super(PopenGateway, self)._remote_bootstrap_gateway(io, s) - -class SocketGateway(InitiatingGateway): - """ This Gateway provides interaction with a remote process - by connecting to a specified socket. On the remote - side you need to manually start a small script - (py/execnet/script/socketserver.py) that accepts - SocketGateway connections. - """ - def __init__(self, host, port): - """ instantiate a gateway to a process accessed - via a host/port specified socket. - """ - self.host = host = str(host) - self.port = port = int(port) - self.remoteaddress = '%s:%d' % (self.host, self.port) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - sock.connect((host, port)) - except socket.gaierror: - raise HostNotFound(str(sys.exc_info()[1])) - io = SocketIO(sock) - super(SocketGateway, self).__init__(io=io) - - def new_remote(cls, gateway, hostport=None): - """ return a new (connected) socket gateway, instatiated - indirectly through the given 'gateway'. - """ - if hostport is None: - host, port = ('', 0) # XXX works on all platforms? - else: - host, port = hostport - mydir = py.path.local(__file__).dirpath() - socketserverbootstrap = py.code.Source( - mydir.join('script', 'socketserver.py').read('r'), """ - import socket - sock = bind_and_listen((%r, %r)) - port = sock.getsockname() - channel.send(port) - startserver(sock) - """ % (host, port) - ) - # execute the above socketserverbootstrap on the other side - channel = gateway.remote_exec(socketserverbootstrap) - (realhost, realport) = channel.receive() - #gateway._trace("new_remote received" - # "port=%r, hostname = %r" %(realport, hostname)) - return py.execnet.SocketGateway(host, realport) - new_remote = classmethod(new_remote) - -class HostNotFound(Exception): - pass - -class SshGateway(PopenCmdGateway): - """ This Gateway provides interaction with a remote Python process, - established via the 'ssh' command line binary. - The remote side needs to have a Python interpreter executable. - """ - - def __init__(self, sshaddress, remotepython=None, ssh_config=None): - """ instantiate a remote ssh process with the - given 'sshaddress' and remotepython version. - you may specify an ssh_config file. - """ - self.remoteaddress = sshaddress - if remotepython is None: - remotepython = "python" - args = ['ssh', '-C' ] - if ssh_config is not None: - args.extend(['-F', str(ssh_config)]) - remotecmd = '%s -c "%s"' %(remotepython, popen_bootstrapline) - args.extend([sshaddress, remotecmd]) - super(SshGateway, self).__init__(args) - - def _remote_bootstrap_gateway(self, io, s=""): - extra = "\n".join([ - str(py.code.Source(stdouterrin_setnull)), - "stdouterrin_setnull()", - s, - ]) - try: - super(SshGateway, self)._remote_bootstrap_gateway(io, extra) - except EOFError: - ret = self._popen.wait() - if ret == 255: - raise HostNotFound(self.remoteaddress) - -def stdouterrin_setnull(): - """ redirect file descriptors 0 and 1 (and possibly 2) to /dev/null. - note that this function may run remotely without py lib support. - """ - # complete confusion (this is independent from the sys.stdout - # and sys.stderr redirection that gateway.remote_exec() can do) - # note that we redirect fd 2 on win too, since for some reason that - # blocks there, while it works (sending to stderr if possible else - # ignoring) on *nix - import sys, os - if not hasattr(os, 'dup'): # jython - return - try: - devnull = os.devnull - except AttributeError: - if os.name == 'nt': - devnull = 'NUL' - else: - devnull = '/dev/null' - # stdin - sys.stdin = os.fdopen(os.dup(0), 'r', 1) - fd = os.open(devnull, os.O_RDONLY) - os.dup2(fd, 0) - os.close(fd) - - # stdout - sys.stdout = os.fdopen(os.dup(1), 'w', 1) - fd = os.open(devnull, os.O_WRONLY) - os.dup2(fd, 1) - - # stderr for win32 - if os.name == 'nt': - sys.stderr = os.fdopen(os.dup(2), 'w', 1) - os.dup2(fd, 2) - os.close(fd) --- a/py/execnet/script/loop_socketserver.py +++ /dev/null @@ -1,14 +0,0 @@ - -import os, sys -import subprocess - -if __name__ == '__main__': - directory = os.path.dirname(os.path.abspath(sys.argv[0])) - script = os.path.join(directory, 'socketserver.py') - while 1: - cmdlist = ["python", script] - cmdlist.extend(sys.argv[1:]) - text = "starting subcommand: " + " ".join(cmdlist) - print(text) - process = subprocess.Popen(cmdlist) - process.wait() --- a/doc/test/funcargs.txt +++ b/doc/test/funcargs.txt @@ -165,7 +165,7 @@ and to offer a new mysetup method: host = self.config.option.ssh if host is None: py.test.skip("specify ssh host with --ssh") - return py.execnet.SshGateway(host) + return execnet.SshGateway(host) Now any test function can use the ``mysetup.getsshconnection()`` method like this: --- a/bin-for-dist/gensetup.py +++ b/bin-for-dist/gensetup.py @@ -3,7 +3,7 @@ import sys sys.path.insert(0, sys.argv[1]) import py -toolpath = py.magic.autopath() +toolpath = py.path.local(__file__) binpath = py.path.local(py.__file__).dirpath('bin') def error(msg): --- a/testing/execnet/test_multi.py +++ /dev/null @@ -1,58 +0,0 @@ -""" - tests for - - multi channels and multi gateways - -""" - -import py - -class TestMultiChannelAndGateway: - def test_multichannel_receive_each(self): - class pseudochannel: - def receive(self): - return 12 - - pc1 = pseudochannel() - pc2 = pseudochannel() - multichannel = py.execnet.MultiChannel([pc1, pc2]) - l = multichannel.receive_each(withchannel=True) - assert len(l) == 2 - assert l == [(pc1, 12), (pc2, 12)] - l = multichannel.receive_each(withchannel=False) - assert l == [12,12] - - def test_multichannel_send_each(self): - l = [py.execnet.PopenGateway() for x in range(2)] - gm = py.execnet.MultiGateway(l) - mc = gm.remote_exec(""" - import os - channel.send(channel.receive() + 1) - """) - mc.send_each(41) - l = mc.receive_each() - assert l == [42,42] - - def test_multichannel_receive_queue_for_two_subprocesses(self): - l = [py.execnet.PopenGateway() for x in range(2)] - gm = py.execnet.MultiGateway(l) - mc = gm.remote_exec(""" - import os - channel.send(os.getpid()) - """) - queue = mc.make_receive_queue() - ch, item = queue.get(timeout=10) - ch2, item2 = queue.get(timeout=10) - assert ch != ch2 - assert ch.gateway != ch2.gateway - assert item != item2 - mc.waitclose() - - def test_multichannel_waitclose(self): - l = [] - class pseudochannel: - def waitclose(self): - l.append(0) - multichannel = py.execnet.MultiChannel([pseudochannel(), pseudochannel()]) - multichannel.waitclose() - assert len(l) == 2 - --- a/testing/execnet/test_xspec.py +++ /dev/null @@ -1,151 +0,0 @@ -import py - -XSpec = py.execnet.XSpec - -class TestXSpec: - def test_norm_attributes(self): - spec = XSpec("socket=192.168.102.2:8888//python=c:/this/python2.5//chdir=d:\hello") - assert spec.socket == "192.168.102.2:8888" - assert spec.python == "c:/this/python2.5" - assert spec.chdir == "d:\hello" - assert spec.nice is None - assert not hasattr(spec, '_xyz') - - py.test.raises(AttributeError, "spec._hello") - - spec = XSpec("socket=192.168.102.2:8888//python=python2.5//nice=3") - assert spec.socket == "192.168.102.2:8888" - assert spec.python == "python2.5" - assert spec.chdir is None - assert spec.nice == "3" - - spec = XSpec("ssh=user at host//chdir=/hello/this//python=/usr/bin/python2.5") - assert spec.ssh == "user at host" - assert spec.python == "/usr/bin/python2.5" - assert spec.chdir == "/hello/this" - - spec = XSpec("popen") - assert spec.popen == True - - def test__samefilesystem(self): - assert XSpec("popen")._samefilesystem() - assert XSpec("popen//python=123")._samefilesystem() - assert not XSpec("popen//chdir=hello")._samefilesystem() - - def test__spec_spec(self): - for x in ("popen", "popen//python=this"): - assert XSpec(x)._spec == x - - def test_samekeyword_twice_raises(self): - py.test.raises(ValueError, "XSpec('popen//popen')") - py.test.raises(ValueError, "XSpec('popen//popen=123')") - - def test_unknown_keys_allowed(self): - xspec = XSpec("hello=3") - assert xspec.hello == '3' - - def test_repr_and_string(self): - for x in ("popen", "popen//python=this"): - assert repr(XSpec(x)).find("popen") != -1 - assert str(XSpec(x)) == x - - def test_hash_equality(self): - assert XSpec("popen") == XSpec("popen") - assert hash(XSpec("popen")) == hash(XSpec("popen")) - assert XSpec("popen//python=123") != XSpec("popen") - assert hash(XSpec("socket=hello:8080")) != hash(XSpec("popen")) - -class TestMakegateway: - def test_no_type(self): - py.test.raises(ValueError, "py.execnet.makegateway('hello')") - - def test_popen(self): - gw = py.execnet.makegateway("popen") - assert gw.spec.python == None - rinfo = gw._rinfo() - assert rinfo.executable == py.std.sys.executable - assert rinfo.cwd == py.std.os.getcwd() - assert rinfo.version_info == py.std.sys.version_info - - def test_popen_nice(self): - gw = py.execnet.makegateway("popen//nice=5") - remotenice = gw.remote_exec(""" - import os - if hasattr(os, 'nice'): - channel.send(os.nice(0)) - else: - channel.send(None) - """).receive() - if remotenice is not None: - assert remotenice == 5 - - def test_popen_explicit(self): - gw = py.execnet.makegateway("popen//python=%s" % py.std.sys.executable) - assert gw.spec.python == py.std.sys.executable - rinfo = gw._rinfo() - assert rinfo.executable == py.std.sys.executable - assert rinfo.cwd == py.std.os.getcwd() - assert rinfo.version_info == py.std.sys.version_info - - def test_popen_cpython25(self): - for trypath in ('python2.5', r'C:\Python25\python.exe'): - cpython25 = py.path.local.sysfind(trypath) - if cpython25 is not None: - cpython25 = cpython25.realpath() - break - else: - py.test.skip("cpython2.5 not found") - gw = py.execnet.makegateway("popen//python=%s" % cpython25) - rinfo = gw._rinfo() - if py.std.sys.platform != "darwin": # it's confusing there - assert rinfo.executable == cpython25 - assert rinfo.cwd == py.std.os.getcwd() - assert rinfo.version_info[:2] == (2,5) - - def test_popen_cpython26(self): - for trypath in ('python2.6', r'C:\Python26\python.exe'): - cpython26 = py.path.local.sysfind(trypath) - if cpython26 is not None: - break - else: - py.test.skip("cpython2.6 not found") - gw = py.execnet.makegateway("popen//python=%s" % cpython26) - rinfo = gw._rinfo() - assert rinfo.executable == cpython26 - assert rinfo.cwd == py.std.os.getcwd() - assert rinfo.version_info[:2] == (2,6) - - def test_popen_chdir_absolute(self, testdir): - gw = py.execnet.makegateway("popen//chdir=%s" % testdir.tmpdir) - rinfo = gw._rinfo() - assert rinfo.cwd == str(testdir.tmpdir.realpath()) - - def test_popen_chdir_newsub(self, testdir): - testdir.chdir() - gw = py.execnet.makegateway("popen//chdir=hello") - rinfo = gw._rinfo() - assert rinfo.cwd == str(testdir.tmpdir.join("hello").realpath()) - - def test_ssh(self, specssh): - sshhost = specssh.ssh - gw = py.execnet.makegateway("ssh=%s" % sshhost) - rinfo = gw._rinfo() - gw2 = py.execnet.SshGateway(sshhost) - rinfo2 = gw2._rinfo() - assert rinfo.executable == rinfo2.executable - assert rinfo.cwd == rinfo2.cwd - assert rinfo.version_info == rinfo2.version_info - - def test_socket(self, specsocket): - gw = py.execnet.makegateway("socket=%s" % specsocket.socket) - rinfo = gw._rinfo() - assert rinfo.executable - assert rinfo.cwd - assert rinfo.version_info - # we cannot instantiate a second gateway - - #gw2 = py.execnet.SocketGateway(*specsocket.socket.split(":")) - #rinfo2 = gw2._rinfo() - #assert rinfo.executable == rinfo2.executable - #assert rinfo.cwd == rinfo2.cwd - #assert rinfo.version_info == rinfo2.version_info --- a/example/execnet/svn-sync-repo.py +++ b/example/execnet/svn-sync-repo.py @@ -82,7 +82,7 @@ def get_svn_youngest(repo): return int(rev) def getgateway(host, keyfile=None): - return py.execnet.SshGateway(host, identity=keyfile) + return execnet.SshGateway(host, identity=keyfile) if __name__ == '__main__': if len(sys.argv) < 3: --- a/py/test/config.py +++ b/py/test/config.py @@ -252,7 +252,8 @@ class Config(object): xspeclist.extend([xspec[i+1:]] * num) if not xspeclist: raise self.Error("MISSING test execution (tx) nodes: please specify --tx") - return [py.execnet.XSpec(x) for x in xspeclist] + import execnet + return [execnet.XSpec(x) for x in xspeclist] def getrsyncdirs(self): config = self --- a/testing/execnet/test_serializer.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- coding: utf-8 -*- -import sys -import os -import tempfile -import subprocess -import py -from py.__.execnet import serializer - - -def _find_version(suffix=""): - name = "python" + suffix - executable = py.path.local.sysfind(name) - if executable is None: - py.test.skip("can't find a %r executable" % (name,)) - return executable - -def setup_module(mod): - mod.TEMPDIR = py.path.local(tempfile.mkdtemp()) - if sys.version_info > (3, 0): - mod._py3_wrapper = PythonWrapper(py.path.local(sys.executable)) - mod._py2_wrapper = PythonWrapper(_find_version()) - else: - mod._py3_wrapper = PythonWrapper(_find_version("3")) - mod._py2_wrapper = PythonWrapper(py.path.local(sys.executable)) - mod._old_pypath = os.environ.get("PYTHONPATH") - pylib = str(py.path.local(py.__file__).dirpath().join("..")) - os.environ["PYTHONPATH"] = pylib - -def teardown_module(mod): - TEMPDIR.remove(True) - if _old_pypath is not None: - os.environ["PYTHONPATH"] = _old_pypath - - -class PythonWrapper(object): - - def __init__(self, executable): - self.executable = executable - - def dump(self, obj_rep): - script_file = TEMPDIR.join("dump.py") - script_file.write(""" -from py.__.execnet import serializer -import sys -if sys.version_info > (3, 0): # Need binary output - sys.stdout = sys.stdout.detach() -saver = serializer.Serializer(sys.stdout) -saver.save(%s)""" % (obj_rep,)) - return self.executable.sysexec(script_file) - - def load(self, data, option_args=""): - script_file = TEMPDIR.join("load.py") - script_file.write(r""" -from py.__.execnet import serializer -import sys -if sys.version_info > (3, 0): - sys.stdin = sys.stdin.detach() -options = serializer.UnserializationOptions(%s) -loader = serializer.Unserializer(sys.stdin, options) -obj = loader.load() -sys.stdout.write(type(obj).__name__ + "\n") -sys.stdout.write(repr(obj))""" % (option_args,)) - popen = subprocess.Popen([str(self.executable), str(script_file)], - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - stdout=subprocess.PIPE) - stdout, stderr = popen.communicate(data.encode("latin-1")) - ret = popen.returncode - if ret: - raise py.process.cmdexec.Error(ret, ret, str(self.executable), - stdout, stderr) - return [s.decode("ascii") for s in stdout.splitlines()] - - def __repr__(self): - return "" % (self.executable,) - - -def pytest_funcarg__py2(request): - return _py2_wrapper - -def pytest_funcarg__py3(request): - return _py3_wrapper - -def pytest_funcarg__dump(request): - py_dump = request.getfuncargvalue(request.param[0]) - return py_dump.dump - -def pytest_funcarg__load(request): - py_dump = request.getfuncargvalue(request.param[1]) - return py_dump.load - -def pytest_generate_tests(metafunc): - if 'dump' in metafunc.funcargnames and 'load' in metafunc.funcargnames: - pys = 'py2', 'py3' - for dump in pys: - for load in pys: - param = (dump, load) - conversion = '%s to %s'%param - if 'repr' not in metafunc.funcargnames: - metafunc.addcall(id=conversion, param=param) - else: - for tp, repr in simple_tests.items(): - metafunc.addcall( - id='%s:%s'%(tp, conversion), - param=param, - funcargs={'tp_name':tp, 'repr':repr}, - ) - - -simple_tests = { -# type: expected before/after repr - 'int': '4', - 'float':'3.25', - 'list': '[1, 2, 3]', - 'tuple': '(1, 2, 3)', - 'dict': '{6: 2, (1, 2, 3): 32}', -} - -def test_simple(tp_name, repr, dump, load): - p = dump(repr) - tp , v = load(p) - assert tp == tp_name - assert v == repr - - - at py.test.mark.xfail -# I'm not sure if we need the complexity. -def test_recursive_list(py2, py3): - l = [1, 2, 3] - l.append(l) - p = py2.dump(l) - tp, rep = py2.load(l) - assert tp == "list" - -def test_bigint_should_fail(): - py.test.raises(serializer.SerializationError, - serializer.Serializer(py.io.BytesIO()).save, - 123456678900) - -def test_bytes(py2, py3): - p = py3.dump("b'hi'") - tp, v = py2.load(p) - assert tp == "str" - assert v == "'hi'" - tp, v = py3.load(p) - assert tp == "bytes" - assert v == "b'hi'" - -def test_string(py2, py3): - p = py2.dump("'xyz'") - tp, s = py2.load(p) - assert tp == "str" - assert s == "'xyz'" - tp, s = py3.load(p) - assert tp == "bytes" - assert s == "b'xyz'" - tp, s = py3.load(p, "True") - assert tp == "str" - assert s == "'xyz'" - p = py3.dump("'xyz'") - tp, s = py2.load(p, True) - assert tp == "str" - assert s == "'xyz'" - -def test_unicode(py2, py3): - p = py2.dump("u'hi'") - tp, s = py2.load(p) - assert tp == "unicode" - assert s == "u'hi'" - tp, s = py3.load(p) - assert tp == "str" - assert s == "'hi'" - p = py3.dump("'hi'") - tp, s = py3.load(p) - assert tp == "str" - assert s == "'hi'" - tp, s = py2.load(p) - assert tp == "unicode" - assert s == "u'hi'" --- a/py/execnet/rsync.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -1:N rsync implemenation on top of execnet. - -(c) 2006-2009, Armin Rigo, Holger Krekel, Maciej Fijalkowski -""" -import py, os, stat - -md5 = py.builtin._tryimport('hashlib', 'md5').md5 -Queue = py.builtin._tryimport('queue', 'Queue').Queue - -class RSync(object): - """ This class allows to send a directory structure (recursively) - to one or multiple remote filesystems. - - There is limited support for symlinks, which means that symlinks - pointing to the sourcetree will be send "as is" while external - symlinks will be just copied (regardless of existance of such - a path on remote side). - """ - def __init__(self, sourcedir, callback=None, verbose=True): - self._sourcedir = str(sourcedir) - self._verbose = verbose - assert callback is None or py.builtin.callable(callback) - self._callback = callback - self._channels = {} - self._receivequeue = Queue() - self._links = [] - - def filter(self, path): - return True - - def _end_of_channel(self, channel): - if channel in self._channels: - # too early! we must have got an error - channel.waitclose() - # or else we raise one - raise IOError('connection unexpectedly closed: %s ' % ( - channel.gateway,)) - - def _process_link(self, channel): - for link in self._links: - channel.send(link) - # completion marker, this host is done - channel.send(42) - - def _done(self, channel): - """ Call all callbacks - """ - finishedcallback = self._channels.pop(channel) - if finishedcallback: - finishedcallback() - - def _list_done(self, channel): - # sum up all to send - if self._callback: - s = sum([self._paths[i] for i in self._to_send[channel]]) - self._callback("list", s, channel) - - def _send_item(self, channel, data): - """ Send one item - """ - modified_rel_path, checksum = data - modifiedpath = os.path.join(self._sourcedir, *modified_rel_path) - try: - f = open(modifiedpath, 'rb') - data = f.read() - except IOError: - data = None - - # provide info to progress callback function - modified_rel_path = "/".join(modified_rel_path) - if data is not None: - self._paths[modified_rel_path] = len(data) - else: - self._paths[modified_rel_path] = 0 - if channel not in self._to_send: - self._to_send[channel] = [] - self._to_send[channel].append(modified_rel_path) - #print "sending", modified_rel_path, data and len(data) or 0, checksum - - if data is not None: - f.close() - if checksum is not None and checksum == md5(data).digest(): - data = None # not really modified - else: - self._report_send_file(channel.gateway, modified_rel_path) - channel.send(data) - - def _report_send_file(self, gateway, modified_rel_path): - if self._verbose: - print("%s <= %s" %(gateway, modified_rel_path)) - - def send(self, raises=True): - """ Sends a sourcedir to all added targets. Flag indicates - whether to raise an error or return in case of lack of - targets - """ - if not self._channels: - if raises: - raise IOError("no targets available, maybe you " - "are trying call send() twice?") - return - # normalize a trailing '/' away - self._sourcedir = os.path.dirname(os.path.join(self._sourcedir, 'x')) - # send directory structure and file timestamps/sizes - self._send_directory_structure(self._sourcedir) - - # paths and to_send are only used for doing - # progress-related callbacks - self._paths = {} - self._to_send = {} - - # send modified file to clients - while self._channels: - channel, req = self._receivequeue.get() - if req is None: - self._end_of_channel(channel) - else: - command, data = req - if command == "links": - self._process_link(channel) - elif command == "done": - self._done(channel) - elif command == "ack": - if self._callback: - self._callback("ack", self._paths[data], channel) - elif command == "list_done": - self._list_done(channel) - elif command == "send": - self._send_item(channel, data) - del data - else: - assert "Unknown command %s" % command - - def add_target(self, gateway, destdir, - finishedcallback=None, **options): - """ Adds a remote target specified via a 'gateway' - and a remote destination directory. - """ - assert finishedcallback is None or py.builtin.callable(finishedcallback) - for name in options: - assert name in ('delete',) - def itemcallback(req): - self._receivequeue.put((channel, req)) - channel = gateway.remote_exec(REMOTE_SOURCE) - channel.setcallback(itemcallback, endmarker = None) - channel.send((str(destdir), options)) - self._channels[channel] = finishedcallback - - def _broadcast(self, msg): - for channel in self._channels: - channel.send(msg) - - def _send_link(self, basename, linkpoint): - self._links.append(("link", basename, linkpoint)) - - def _send_directory(self, path): - # dir: send a list of entries - names = [] - subpaths = [] - for name in os.listdir(path): - p = os.path.join(path, name) - if self.filter(p): - names.append(name) - subpaths.append(p) - self._broadcast(names) - for p in subpaths: - self._send_directory_structure(p) - - def _send_link_structure(self, path): - linkpoint = os.readlink(path) - basename = path[len(self._sourcedir) + 1:] - if not linkpoint.startswith(os.sep): - # relative link, just send it - # XXX: do sth with ../ links - self._send_link(basename, linkpoint) - elif linkpoint.startswith(self._sourcedir): - self._send_link(basename, linkpoint[len(self._sourcedir) + 1:]) - else: - self._send_link(basename, linkpoint) - self._broadcast(None) - - def _send_directory_structure(self, path): - try: - st = os.lstat(path) - except OSError: - self._broadcast((0, 0)) - return - if stat.S_ISREG(st.st_mode): - # regular file: send a timestamp/size pair - self._broadcast((st.st_mtime, st.st_size)) - elif stat.S_ISDIR(st.st_mode): - self._send_directory(path) - elif stat.S_ISLNK(st.st_mode): - self._send_link_structure(path) - else: - raise ValueError("cannot sync %r" % (path,)) - -REMOTE_SOURCE = py.path.local(__file__).dirpath().\ - join('rsync_remote.py').open().read() + "\nf()" - --- a/py/test/dist/mypickle.py +++ b/py/test/dist/mypickle.py @@ -13,7 +13,7 @@ """ import py -from py.__.execnet.gateway_base import Channel +from execnet.gateway_base import Channel import sys, os, struct #debug = open("log-mypickle-%d" % os.getpid(), 'w') --- a/py/execnet/xspec.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -(c) 2008-2009, holger krekel -""" -import py - -class XSpec: - """ Execution Specification: key1=value1//key2=value2 ... - * keys need to be unique within the specification scope - * neither key nor value are allowed to contain "//" - * keys are not allowed to contain "=" - * keys are not allowed to start with underscore - * if no "=value" is given, assume a boolean True value - """ - # XXX allow customization, for only allow specific key names - popen = ssh = socket = python = chdir = nice = None - - def __init__(self, string): - self._spec = string - for keyvalue in string.split("//"): - i = keyvalue.find("=") - if i == -1: - key, value = keyvalue, True - else: - key, value = keyvalue[:i], keyvalue[i+1:] - if key[0] == "_": - raise AttributeError("%r not a valid XSpec key" % key) - if key in self.__dict__: - raise ValueError("duplicate key: %r in %r" %(key, string)) - setattr(self, key, value) - - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError(name) - return None - - def __repr__(self): - return "" %(self._spec,) - def __str__(self): - return self._spec - - def __hash__(self): - return hash(self._spec) - def __eq__(self, other): - return self._spec == getattr(other, '_spec', None) - def __ne__(self, other): - return self._spec != getattr(other, '_spec', None) - - def _samefilesystem(self): - return bool(self.popen and not self.chdir) - -def makegateway(spec): - if not isinstance(spec, XSpec): - spec = XSpec(spec) - if spec.popen: - gw = py.execnet.PopenGateway(python=spec.python) - elif spec.ssh: - gw = py.execnet.SshGateway(spec.ssh, remotepython=spec.python) - elif spec.socket: - assert not spec.python, "socket: specifying python executables not supported" - hostport = spec.socket.split(":") - gw = py.execnet.SocketGateway(*hostport) - else: - raise ValueError("no gateway type found for %r" % (spec._spec,)) - gw.spec = spec - if spec.chdir or spec.nice: - channel = gw.remote_exec(""" - import os - path, nice = channel.receive() - if path: - if not os.path.exists(path): - os.mkdir(path) - os.chdir(path) - if nice and hasattr(os, 'nice'): - os.nice(nice) - """) - nice = spec.nice and int(spec.nice) or 0 - channel.send((spec.chdir, nice)) - channel.waitclose() - return gw --- a/py/execnet/multi.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -Support for working with multiple channels and gateways - -(c) 2008-2009, Holger Krekel and others -""" -import py -try: - import queue -except ImportError: - import Queue as queue - -NO_ENDMARKER_WANTED = object() - -class MultiGateway: - def __init__(self, gateways): - self.gateways = gateways - def remote_exec(self, source): - channels = [] - for gw in self.gateways: - channels.append(gw.remote_exec(source)) - return MultiChannel(channels) - def exit(self): - for gw in self.gateways: - gw.exit() - -class MultiChannel: - def __init__(self, channels): - self._channels = channels - - def send_each(self, item): - for ch in self._channels: - ch.send(item) - - def receive_each(self, withchannel=False): - assert not hasattr(self, '_queue') - l = [] - for ch in self._channels: - obj = ch.receive() - if withchannel: - l.append((ch, obj)) - else: - l.append(obj) - return l - - def make_receive_queue(self, endmarker=NO_ENDMARKER_WANTED): - try: - return self._queue - except AttributeError: - self._queue = queue.Queue() - for ch in self._channels: - def putreceived(obj, channel=ch): - self._queue.put((channel, obj)) - if endmarker is NO_ENDMARKER_WANTED: - ch.setcallback(putreceived) - else: - ch.setcallback(putreceived, endmarker=endmarker) - return self._queue - - - def waitclose(self): - first = None - for ch in self._channels: - try: - ch.waitclose() - except ch.RemoteError: - if first is None: - first = py.std.sys.exc_info() - if first: - py.builtin._reraise(first[0], first[1], first[2]) - - --- a/testing/pytest/dist/test_gwmanage.py +++ b/testing/pytest/dist/test_gwmanage.py @@ -9,6 +9,7 @@ import py import os from py.__.test.dist.gwmanage import GatewayManager, HostRSync from py.__.test.plugin import hookspec +import execnet def pytest_funcarg__hookrecorder(request): _pytest = request.getfuncargvalue('_pytest') @@ -35,7 +36,7 @@ class TestGatewayManagerPopen: hm = GatewayManager(["popen"] * 2, hook) hm.makegateways() call = hookrecorder.popcall("pytest_gwmanage_newgateway") - assert call.gateway.spec == py.execnet.XSpec("popen") + assert call.gateway.spec == execnet.XSpec("popen") assert call.gateway.id == "[1]" assert call.platinfo.executable == call.gateway._rinfo().executable call = hookrecorder.popcall("pytest_gwmanage_newgateway") @@ -149,7 +150,7 @@ class TestHRSync: def test_hrsync_one_host(self, mysetup): source, dest = mysetup.source, mysetup.dest - gw = py.execnet.makegateway("popen//chdir=%s" % dest) + gw = execnet.makegateway("popen//chdir=%s" % dest) finished = [] rsync = HostRSync(source) rsync.add_target_host(gw, finished=lambda: finished.append(1)) --- a/py/path/gateway/channeltest2.py +++ b/py/path/gateway/channeltest2.py @@ -11,8 +11,8 @@ channel.send(srv.p2c(py.path.local("/tmp ''' -#gw = py.execnet.SshGateway('codespeak.net') -gw = py.execnet.PopenGateway() +#gw = execnet.SshGateway('codespeak.net') +gw = execnet.PopenGateway() gw.remote_init_threads(5) c = gw.remote_exec(SRC, stdout=py.std.sys.stdout, stderr=py.std.sys.stderr) subchannel = gw._channelfactory.new() --- a/contrib/svn-sync-repo.py +++ b/contrib/svn-sync-repo.py @@ -3,7 +3,7 @@ """ small utility for hot-syncing a svn repository through ssh. -uses py.execnet. +uses execnet. """ @@ -105,7 +105,7 @@ def get_svn_youngest(repo): return int(rev) def getgateway(host, keyfile=None): - return py.execnet.SshGateway(host, identity=keyfile) + return execnet.SshGateway(host, identity=keyfile) if __name__ == '__main__': if len(sys.argv) < 3: --- a/testing/pytest/plugin/test_pytest_terminal.py +++ b/testing/pytest/plugin/test_pytest_terminal.py @@ -105,6 +105,7 @@ class TestTerminal: ]) def test_gwmanage_events(self, testdir, linecomp): + execnet = py.test.importorskip("execnet") modcol = testdir.getmodulecol(""" def test_one(): pass @@ -113,10 +114,10 @@ class TestTerminal: rep = TerminalReporter(modcol.config, file=linecomp.stringio) class gw1: id = "X1" - spec = py.execnet.XSpec("popen") + spec = execnet.XSpec("popen") class gw2: id = "X2" - spec = py.execnet.XSpec("popen") + spec = execnet.XSpec("popen") class rinfo: version_info = (2, 5, 1, 'final', 0) executable = "hello" --- a/testing/execnet/test_gateway.py +++ /dev/null @@ -1,545 +0,0 @@ -""" -mostly functional tests of gateways. -""" -import os, sys, time -import py -from py.__.execnet import gateway_base, gateway -queue = py.builtin._tryimport('queue', 'Queue') - -TESTTIMEOUT = 10.0 # seconds - -class TestBasicRemoteExecution: - def test_correct_setup(self, gw): - assert gw._receiverthread.isAlive() - - def test_repr_doesnt_crash(self, gw): - assert isinstance(repr(gw), str) - - def test_attribute__name__(self, gw): - channel = gw.remote_exec("channel.send(__name__)") - name = channel.receive() - assert name == "__channelexec__" - - def test_correct_setup_no_py(self, gw): - channel = gw.remote_exec(""" - import sys - channel.send(list(sys.modules)) - """) - remotemodules = channel.receive() - assert 'py' not in remotemodules, ( - "py should not be imported on remote side") - - def test_remote_exec_waitclose(self, gw): - channel = gw.remote_exec('pass') - channel.waitclose(TESTTIMEOUT) - - def test_remote_exec_waitclose_2(self, gw): - channel = gw.remote_exec('def gccycle(): pass') - channel.waitclose(TESTTIMEOUT) - - def test_remote_exec_waitclose_noarg(self, gw): - channel = gw.remote_exec('pass') - channel.waitclose() - - def test_remote_exec_error_after_close(self, gw): - channel = gw.remote_exec('pass') - channel.waitclose(TESTTIMEOUT) - py.test.raises(IOError, channel.send, 0) - - def test_remote_exec_channel_anonymous(self, gw): - channel = gw.remote_exec(''' - obj = channel.receive() - channel.send(obj) - ''') - channel.send(42) - result = channel.receive() - assert result == 42 - -class TestChannelBasicBehaviour: - def test_channel_close_and_then_receive_error(self, gw): - channel = gw.remote_exec('raise ValueError') - py.test.raises(channel.RemoteError, channel.receive) - - def test_channel_finish_and_then_EOFError(self, gw): - channel = gw.remote_exec('channel.send(42)') - x = channel.receive() - assert x == 42 - py.test.raises(EOFError, channel.receive) - py.test.raises(EOFError, channel.receive) - py.test.raises(EOFError, channel.receive) - - def test_channel_close_and_then_receive_error_multiple(self, gw): - channel = gw.remote_exec('channel.send(42) ; raise ValueError') - x = channel.receive() - assert x == 42 - py.test.raises(channel.RemoteError, channel.receive) - - def test_channel__local_close(self, gw): - channel = gw._channelfactory.new() - gw._channelfactory._local_close(channel.id) - channel.waitclose(0.1) - - def test_channel__local_close_error(self, gw): - channel = gw._channelfactory.new() - gw._channelfactory._local_close(channel.id, - channel.RemoteError("error")) - py.test.raises(channel.RemoteError, channel.waitclose, 0.01) - - def test_channel_error_reporting(self, gw): - channel = gw.remote_exec('def foo():\n return foobar()\nfoo()\n') - try: - channel.receive() - except channel.RemoteError: - e = sys.exc_info()[1] - assert str(e).startswith('Traceback (most recent call last):') - assert str(e).find('NameError: global name \'foobar\' ' - 'is not defined') > -1 - else: - py.test.fail('No exception raised') - - def test_channel_syntax_error(self, gw): - # missing colon - channel = gw.remote_exec('def foo()\n return 1\nfoo()\n') - try: - channel.receive() - except channel.RemoteError: - e = sys.exc_info()[1] - assert str(e).startswith('Traceback (most recent call last):') - assert str(e).find('SyntaxError') > -1 - - def test_channel_iter(self, gw): - channel = gw.remote_exec(""" - for x in range(3): - channel.send(x) - """) - l = list(channel) - assert l == [0, 1, 2] - - def test_channel_passing_over_channel(self, gw): - channel = gw.remote_exec(''' - c = channel.gateway.newchannel() - channel.send(c) - c.send(42) - ''') - c = channel.receive() - x = c.receive() - assert x == 42 - - # check that the both sides previous channels are really gone - channel.waitclose(TESTTIMEOUT) - #assert c.id not in gw._channelfactory - newchan = gw.remote_exec(''' - assert %d not in channel.gateway._channelfactory._channels - ''' % (channel.id)) - newchan.waitclose(TESTTIMEOUT) - assert channel.id not in gw._channelfactory._channels - - def test_channel_receiver_callback(self, gw): - l = [] - #channel = gw.newchannel(receiver=l.append) - channel = gw.remote_exec(source=''' - channel.send(42) - channel.send(13) - channel.send(channel.gateway.newchannel()) - ''') - channel.setcallback(callback=l.append) - py.test.raises(IOError, channel.receive) - channel.waitclose(TESTTIMEOUT) - assert len(l) == 3 - assert l[:2] == [42,13] - assert isinstance(l[2], channel.__class__) - - def test_channel_callback_after_receive(self, gw): - l = [] - channel = gw.remote_exec(source=''' - channel.send(42) - channel.send(13) - channel.send(channel.gateway.newchannel()) - ''') - x = channel.receive() - assert x == 42 - channel.setcallback(callback=l.append) - py.test.raises(IOError, channel.receive) - channel.waitclose(TESTTIMEOUT) - assert len(l) == 2 - assert l[0] == 13 - assert isinstance(l[1], channel.__class__) - - def test_waiting_for_callbacks(self, gw): - l = [] - def callback(msg): - import time; time.sleep(0.2) - l.append(msg) - channel = gw.remote_exec(source=''' - channel.send(42) - ''') - channel.setcallback(callback) - channel.waitclose(TESTTIMEOUT) - assert l == [42] - - def test_channel_callback_stays_active(self, gw): - self.check_channel_callback_stays_active(gw, earlyfree=True) - - def check_channel_callback_stays_active(self, gw, earlyfree=True): - # with 'earlyfree==True', this tests the "sendonly" channel state. - l = [] - channel = gw.remote_exec(source=''' - try: - import thread - except ImportError: - import _thread as thread - import time - def producer(subchannel): - for i in range(5): - time.sleep(0.15) - subchannel.send(i*100) - channel2 = channel.receive() - thread.start_new_thread(producer, (channel2,)) - del channel2 - ''') - subchannel = gw.newchannel() - subchannel.setcallback(l.append) - channel.send(subchannel) - if earlyfree: - subchannel = None - counter = 100 - while len(l) < 5: - if subchannel and subchannel.isclosed(): - break - counter -= 1 - print(counter) - if not counter: - py.test.fail("timed out waiting for the answer[%d]" % len(l)) - time.sleep(0.04) # busy-wait - assert l == [0, 100, 200, 300, 400] - return subchannel - - def test_channel_callback_remote_freed(self, gw): - channel = self.check_channel_callback_stays_active(gw, earlyfree=False) - # freed automatically at the end of producer() - channel.waitclose(TESTTIMEOUT) - - def test_channel_endmarker_callback(self, gw): - l = [] - channel = gw.remote_exec(source=''' - channel.send(42) - channel.send(13) - channel.send(channel.gateway.newchannel()) - ''') - channel.setcallback(l.append, 999) - py.test.raises(IOError, channel.receive) - channel.waitclose(TESTTIMEOUT) - assert len(l) == 4 - assert l[:2] == [42,13] - assert isinstance(l[2], channel.__class__) - assert l[3] == 999 - - def test_channel_endmarker_callback_error(self, gw): - q = queue.Queue() - channel = gw.remote_exec(source=''' - raise ValueError() - ''') - channel.setcallback(q.put, endmarker=999) - val = q.get(TESTTIMEOUT) - assert val == 999 - err = channel._getremoteerror() - assert err - assert str(err).find("ValueError") != -1 - - @py.test.mark.xfail - def test_remote_redirect_stdout(self, gw): - out = py.io.TextIO() - handle = gw._remote_redirect(stdout=out) - c = gw.remote_exec("print 42") - c.waitclose(TESTTIMEOUT) - handle.close() - s = out.getvalue() - assert s.strip() == "42" - - @py.test.mark.xfail - def test_remote_exec_redirect_multi(self, gw): - num = 3 - l = [[] for x in range(num)] - channels = [gw.remote_exec("print %d" % i, - stdout=l[i].append) - for i in range(num)] - for x in channels: - x.waitclose(TESTTIMEOUT) - - for i in range(num): - subl = l[i] - assert subl - s = subl[0] - assert s.strip() == str(i) - -class TestChannelFile: - def test_channel_file_write(self, gw): - channel = gw.remote_exec(""" - f = channel.makefile() - f.write("hello world\\n") - f.close() - channel.send(42) - """) - first = channel.receive() - assert first.strip() == 'hello world' - second = channel.receive() - assert second == 42 - - def test_channel_file_write_error(self, gw): - channel = gw.remote_exec("pass") - f = channel.makefile() - channel.waitclose(TESTTIMEOUT) - py.test.raises(IOError, f.write, 'hello') - - def test_channel_file_proxyclose(self, gw): - channel = gw.remote_exec(""" - f = channel.makefile(proxyclose=True) - f.write("hello world") - f.close() - channel.send(42) - """) - first = channel.receive() - assert first.strip() == 'hello world' - py.test.raises(EOFError, channel.receive) - - def test_channel_file_read(self, gw): - channel = gw.remote_exec(""" - f = channel.makefile(mode='r') - s = f.read(2) - channel.send(s) - s = f.read(5) - channel.send(s) - """) - channel.send("xyabcde") - s1 = channel.receive() - s2 = channel.receive() - assert s1 == "xy" - assert s2 == "abcde" - - def test_channel_file_read_empty(self, gw): - channel = gw.remote_exec("pass") - f = channel.makefile(mode="r") - s = f.read(3) - assert s == "" - s = f.read(5) - assert s == "" - - def test_channel_file_readline_remote(self, gw): - channel = gw.remote_exec(""" - channel.send('123\\n45') - """) - channel.waitclose(TESTTIMEOUT) - f = channel.makefile(mode="r") - s = f.readline() - assert s == "123\n" - s = f.readline() - assert s == "45" - - def test_channel_makefile_incompatmode(self, gw): - channel = gw.newchannel() - py.test.raises(ValueError, 'channel.makefile("rw")') - - def test_confusion_from_os_write_stdout(self, gw): - channel = gw.remote_exec(""" - import os - os.write(1, 'confusion!'.encode('ascii')) - channel.send(channel.receive() * 6) - channel.send(channel.receive() * 6) - """) - channel.send(3) - res = channel.receive() - assert res == 18 - channel.send(7) - res = channel.receive() - assert res == 42 - - def test_confusion_from_os_write_stderr(self, gw): - channel = gw.remote_exec(""" - import os - os.write(2, 'test'.encode('ascii')) - channel.send(channel.receive() * 6) - channel.send(channel.receive() * 6) - """) - channel.send(3) - res = channel.receive() - assert res == 18 - channel.send(7) - res = channel.receive() - assert res == 42 - - def test__rinfo(self, gw): - rinfo = gw._rinfo() - assert rinfo.executable - assert rinfo.cwd - assert rinfo.version_info - s = repr(rinfo) - old = gw.remote_exec(""" - import os.path - cwd = os.getcwd() - channel.send(os.path.basename(cwd)) - os.chdir('..') - """).receive() - try: - rinfo2 = gw._rinfo() - assert rinfo2.cwd == rinfo.cwd - rinfo3 = gw._rinfo(update=True) - assert rinfo3.cwd != rinfo2.cwd - finally: - gw._cache_rinfo = rinfo - gw.remote_exec("import os ; os.chdir(%r)" % old).waitclose() - -def test_join_blocked_execution_gateway(): - gateway = py.execnet.PopenGateway() - channel = gateway.remote_exec(""" - import time - time.sleep(5.0) - """) - def doit(): - gateway.exit() - gateway.join(joinexec=True) - return 17 - - pool = py._thread.WorkerPool() - reply = pool.dispatch(doit) - x = reply.get(timeout=1.0) - assert x == 17 - -class TestPopenGateway: - gwtype = 'popen' - - def test_chdir_separation(self, tmpdir): - old = tmpdir.chdir() - try: - gw = py.execnet.PopenGateway() - finally: - waschangedir = old.chdir() - c = gw.remote_exec("import os ; channel.send(os.getcwd())") - x = c.receive() - assert x == str(waschangedir) - - def test_many_popen(self): - num = 4 - l = [] - for i in range(num): - l.append(py.execnet.PopenGateway()) - channels = [] - for gw in l: - channel = gw.remote_exec("""channel.send(42)""") - channels.append(channel) -## try: -## while channels: -## channel = channels.pop() -## try: -## ret = channel.receive() -## assert ret == 42 -## finally: -## channel.gateway.exit() -## finally: -## for x in channels: -## x.gateway.exit() - while channels: - channel = channels.pop() - ret = channel.receive() - assert ret == 42 - - def test_rinfo_popen(self, gw): - rinfo = gw._rinfo() - assert rinfo.executable == py.std.sys.executable - assert rinfo.cwd == py.std.os.getcwd() - assert rinfo.version_info == py.std.sys.version_info - - def test_gateway_init_event(self, _pytest): - rec = _pytest.gethookrecorder(gateway.ExecnetAPI) - gw = py.execnet.PopenGateway() - call = rec.popcall("pyexecnet_gateway_init") - assert call.gateway == gw - gw.exit() - call = rec.popcall("pyexecnet_gateway_exit") - assert call.gateway == gw - - @py.test.mark.xfail # "fix needed: dying remote process does not cause waitclose() to fail" - def test_waitclose_on_remote_killed(self): - gw = py.execnet.PopenGateway() - channel = gw.remote_exec(""" - import os - import time - channel.send(os.getpid()) - while 1: - channel.send("#" * 100) - """) - remotepid = channel.receive() - py.process.kill(remotepid) - py.test.raises(channel.RemoteError, "channel.waitclose(TESTTIMEOUT)") - py.test.raises(EOFError, channel.send, None) - py.test.raises(EOFError, channel.receive) - - at py.test.mark.xfail -def test_endmarker_delivery_on_remote_killterm(): - if not hasattr(py.std.os, 'kill'): - py.test.skip("no os.kill()") - gw = py.execnet.PopenGateway() - try: - q = queue.Queue() - channel = gw.remote_exec(source=''' - import os - os.kill(os.getpid(), 15) - ''') - channel.setcallback(q.put, endmarker=999) - val = q.get(TESTTIMEOUT) - assert val == 999 - err = channel._getremoteerror() - finally: - gw.exit() - assert "killed" in str(err) - assert "15" in str(err) - - -def test_socket_gw_host_not_found(gw): - py.test.raises(py.execnet.HostNotFound, - 'py.execnet.SocketGateway("qowieuqowe", 9000)' - ) - -class TestSshPopenGateway: - gwtype = "ssh" - - def test_sshconfig_config_parsing(self, monkeypatch): - import subprocess - l = [] - monkeypatch.setattr(subprocess, 'Popen', - lambda *args, **kwargs: l.append(args[0])) - py.test.raises(AttributeError, - """py.execnet.SshGateway("xyz", ssh_config='qwe')""") - assert len(l) == 1 - popen_args = l[0] - i = popen_args.index('-F') - assert popen_args[i+1] == "qwe" - - def test_sshaddress(self, gw, specssh): - assert gw.remoteaddress == specssh.ssh - - def test_host_not_found(self): - py.test.raises(py.execnet.HostNotFound, - "py.execnet.SshGateway('nowhere.codespeak.net')") - -class TestThreads: - def test_threads(self): - gw = py.execnet.PopenGateway() - gw.remote_init_threads(3) - c1 = gw.remote_exec("channel.send(channel.receive())") - c2 = gw.remote_exec("channel.send(channel.receive())") - c2.send(1) - res = c2.receive() - assert res == 1 - c1.send(42) - res = c1.receive() - assert res == 42 - - def test_threads_twice(self): - gw = py.execnet.PopenGateway() - gw.remote_init_threads(3) - py.test.raises(IOError, gw.remote_init_threads, 3) - - -def test_nodebug(): - from py.__.execnet import gateway_base - assert not gateway_base.debug --- a/testing/pytest/plugin/test_pytest_execnetcleanup.py +++ /dev/null @@ -1,12 +0,0 @@ -def test_execnetplugin(testdir): - reprec = testdir.inline_runsource(""" - import py - import sys - def test_hello(): - sys._gw = py.execnet.PopenGateway() - def test_world(): - assert hasattr(sys, '_gw') - assert sys._gw not in sys._gw._cleanup._activegateways - - """, "-s", "--debug") - reprec.assertoutcome(passed=2) --- a/testing/pytest/test_pickling.py +++ b/testing/pytest/test_pickling.py @@ -182,8 +182,9 @@ class TestConfigPickling: old.chdir() def test_config__setstate__wired_correctly_in_childprocess(testdir): + execnet = py.test.importorskip("execnet") from py.__.test.dist.mypickle import PickleChannel - gw = py.execnet.PopenGateway() + gw = execnet.PopenGateway() channel = gw.remote_exec(""" import py from py.__.test.dist.mypickle import PickleChannel --- a/py/test/defaultconftest.py +++ b/py/test/defaultconftest.py @@ -10,5 +10,5 @@ Generator = py.test.collect.Generator Function = py.test.collect.Function Instance = py.test.collect.Instance -pytest_plugins = "default runner capture terminal keyword xfail tmpdir execnetcleanup monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split() +pytest_plugins = "default runner capture terminal keyword xfail tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split() --- a/testing/pytest/dist/test_dsession.py +++ b/testing/pytest/dist/test_dsession.py @@ -1,8 +1,9 @@ from py.__.test.dist.dsession import DSession from py.__.test import outcome import py +import execnet -XSpec = py.execnet.XSpec +XSpec = execnet.XSpec def run(item, node, excinfo=None): runner = item.config.pluginmanager.getplugin("runner") --- a/setup.py +++ b/setup.py @@ -8,22 +8,20 @@ from setuptools import setup long_description = """ -advanced testing and development support library: +advanced testing and development support library: - `py.test`_: cross-project testing tool with many advanced features -- `py.execnet`_: ad-hoc code distribution to SSH, Socket and local sub processes -- `py.path`_: path abstractions over local and subversion files +- `py.path`_: path abstractions over local and subversion files - `py.code`_: dynamic code compile and traceback printing support -Compatibility: Linux, Win32, OSX, Python versions 2.3-2.6. +Compatibility: Linux, Win32, OSX, Python versions 2.4 through to 3.1. For questions please check out http://pylib.org/contact.html .. _`py.test`: http://pylib.org/test.html -.. _`py.execnet`: http://pylib.org/execnet.html .. _`py.path`: http://pylib.org/path.html .. _`py.code`: http://pylib.org/code.html -(c) Holger Krekel and others, 2009 +(c) Holger Krekel and others, 2009 """ trunk = 'trunk' @@ -63,8 +61,6 @@ def main(): 'py.cmdline', 'py.code', 'py.compat', - 'py.execnet', - 'py.execnet.script', 'py.io', 'py.log', 'py.path', --- a/testing/pytest/dist/test_mypickle.py +++ b/testing/pytest/dist/test_mypickle.py @@ -1,6 +1,7 @@ import py import sys +import execnet Queue = py.builtin._tryimport('queue', 'Queue').Queue @@ -117,7 +118,7 @@ def test_self_memoize(): TESTTIMEOUT = 2.0 class TestPickleChannelFunctional: def setup_class(cls): - cls.gw = py.execnet.PopenGateway() + cls.gw = execnet.PopenGateway() cls.gw.remote_init_threads(5) def test_popen_send_instance(self): --- a/testing/execnet/conftest.py +++ /dev/null @@ -1,46 +0,0 @@ -import py - -def pytest_generate_tests(metafunc): - if 'gw' in metafunc.funcargnames: - if hasattr(metafunc.cls, 'gwtype'): - gwtypes = [metafunc.cls.gwtype] - else: - gwtypes = ['popen', 'socket', 'ssh'] - for gwtype in gwtypes: - metafunc.addcall(id=gwtype, param=gwtype) - -def pytest_funcarg__gw(request): - scope = "session" - if request.param == "popen": - return request.cached_setup( - setup=py.execnet.PopenGateway, - teardown=lambda gw: gw.exit(), - extrakey=request.param, - scope=scope) - elif request.param == "socket": - return request.cached_setup( - setup=setup_socket_gateway, - teardown=teardown_socket_gateway, - extrakey=request.param, - scope=scope) - elif request.param == "ssh": - return request.cached_setup( - setup=lambda: setup_ssh_gateway(request), - teardown=lambda gw: gw.exit(), - extrakey=request.param, - scope=scope) - -def setup_socket_gateway(): - proxygw = py.execnet.PopenGateway() - gw = py.execnet.SocketGateway.new_remote(proxygw, ("127.0.0.1", 0)) - gw.proxygw = proxygw - return gw - -def teardown_socket_gateway(gw): - gw.exit() - gw.proxygw.exit() - -def setup_ssh_gateway(request): - sshhost = request.getfuncargvalue('specssh').ssh - gw = py.execnet.SshGateway(sshhost) - return gw --- a/testing/execnet/test_rsync.py +++ /dev/null @@ -1,148 +0,0 @@ -import py -from py.execnet import RSync - - -def pytest_funcarg__gw1(request): - return request.cached_setup( - setup=py.execnet.PopenGateway, - teardown=lambda val: val.exit(), - scope="module" - ) -pytest_funcarg__gw2 = pytest_funcarg__gw1 - -def pytest_funcarg__dirs(request): - t = request.getfuncargvalue('tmpdir') - class dirs: - source = t.join("source") - dest1 = t.join("dest1") - dest2 = t.join("dest2") - return dirs - -class TestRSync: - def test_notargets(self, dirs): - rsync = RSync(dirs.source) - py.test.raises(IOError, "rsync.send()") - assert rsync.send(raises=False) is None - - def test_dirsync(self, dirs, gw1, gw2): - dest = dirs.dest1 - dest2 = dirs.dest2 - source = dirs.source - - for s in ('content1', 'content2', 'content2-a-bit-longer'): - source.ensure('subdir', 'file1').write(s) - rsync = RSync(dirs.source) - rsync.add_target(gw1, dest) - rsync.add_target(gw2, dest2) - rsync.send() - assert dest.join('subdir').check(dir=1) - assert dest.join('subdir', 'file1').check(file=1) - assert dest.join('subdir', 'file1').read() == s - assert dest2.join('subdir').check(dir=1) - assert dest2.join('subdir', 'file1').check(file=1) - assert dest2.join('subdir', 'file1').read() == s - for x in dest, dest2: - fn = x.join("subdir", "file1") - fn.setmtime(0) - - source.join('subdir').remove('file1') - rsync = RSync(source) - rsync.add_target(gw2, dest2) - rsync.add_target(gw1, dest) - rsync.send() - assert dest.join('subdir', 'file1').check(file=1) - assert dest2.join('subdir', 'file1').check(file=1) - rsync = RSync(source) - rsync.add_target(gw1, dest, delete=True) - rsync.add_target(gw2, dest2) - rsync.send() - assert not dest.join('subdir', 'file1').check() - assert dest2.join('subdir', 'file1').check() - - def test_dirsync_twice(self, dirs, gw1, gw2): - source = dirs.source - source.ensure("hello") - rsync = RSync(source) - rsync.add_target(gw1, dirs.dest1) - rsync.send() - assert dirs.dest1.join('hello').check() - py.test.raises(IOError, "rsync.send()") - assert rsync.send(raises=False) is None - rsync.add_target(gw1, dirs.dest2) - rsync.send() - assert dirs.dest2.join('hello').check() - py.test.raises(IOError, "rsync.send()") - assert rsync.send(raises=False) is None - - def test_rsync_default_reporting(self, capsys, dirs, gw1): - source = dirs.source - source.ensure("hello") - rsync = RSync(source) - rsync.add_target(gw1, dirs.dest1) - rsync.send() - out, err = capsys.readouterr() - assert out.find("hello") != -1 - - def test_rsync_non_verbose(self, capsys, dirs, gw1): - source = dirs.source - source.ensure("hello") - rsync = RSync(source, verbose=False) - rsync.add_target(gw1, dirs.dest1) - rsync.send() - out, err = capsys.readouterr() - assert not out - assert not err - - def test_symlink_rsync(self, dirs, gw1): - if py.std.sys.platform == 'win32': - py.test.skip("symlinks are unsupported on Windows.") - source = dirs.source - dest = dirs.dest1 - dirs.source.ensure("existant") - source.join("rellink").mksymlinkto(source.join("existant"), absolute=0) - source.join('abslink').mksymlinkto(source.join("existant")) - - rsync = RSync(source) - rsync.add_target(gw1, dest) - rsync.send() - - assert dest.join('rellink').readlink() == dest.join("existant") - assert dest.join('abslink').readlink() == dest.join("existant") - - def test_callback(self, dirs, gw1): - dest = dirs.dest1 - source = dirs.source - source.ensure("existant").write("a" * 100) - source.ensure("existant2").write("a" * 10) - total = {} - def callback(cmd, lgt, channel): - total[(cmd, lgt)] = True - - rsync = RSync(source, callback=callback) - #rsync = RSync() - rsync.add_target(gw1, dest) - rsync.send() - - assert total == {("list", 110):True, ("ack", 100):True, ("ack", 10):True} - - def test_file_disappearing(self, dirs, gw1): - dest = dirs.dest1 - source = dirs.source - source.ensure("ex").write("a" * 100) - source.ensure("ex2").write("a" * 100) - - class DRsync(RSync): - def filter(self, x): - assert x != source - if x.endswith("ex2"): - self.x = 1 - source.join("ex2").remove() - return True - - rsync = DRsync(source) - rsync.add_target(gw1, dest) - rsync.send() - assert rsync.x == 1 - assert len(dest.listdir()) == 1 - assert len(source.listdir()) == 1 - --- a/testing/execnet/test_basics.py +++ /dev/null @@ -1,198 +0,0 @@ - -import py -import sys, os, subprocess, inspect -from py.__.execnet import gateway_base, gateway -from py.__.execnet.gateway_base import Message, Channel, ChannelFactory - -def test_subprocess_interaction(anypython): - line = gateway.popen_bootstrapline - compile(line, 'xyz', 'exec') - args = [str(anypython), '-c', line] - popen = subprocess.Popen(args, bufsize=0, stderr=subprocess.STDOUT, - stdin=subprocess.PIPE, stdout=subprocess.PIPE) - def send(line): - popen.stdin.write(line.encode('ascii')) - if sys.version_info > (3,0): # 3k still buffers - popen.stdin.flush() - def receive(): - return popen.stdout.readline().decode('ascii') - - try: - source = py.code.Source(read_write_loop, "read_write_loop()") - repr_source = repr(str(source)) + "\n" - sendline = repr_source - send(sendline) - s = receive() - assert s == "ok\n" - send("hello\n") - s = receive() - assert s == "received: hello\n" - send("world\n") - s = receive() - assert s == "received: world\n" - finally: - popen.stdin.close() - popen.stdout.close() - popen.wait() - -def read_write_loop(): - import os, sys - sys.stdout.write("ok\n") - sys.stdout.flush() - while 1: - try: - line = sys.stdin.readline() - sys.stdout.write("received: %s" % line) - sys.stdout.flush() - except (IOError, EOFError): - break - -def pytest_generate_tests(metafunc): - if 'anypython' in metafunc.funcargnames: - for name in 'python3.1', 'python2.4', 'python2.5', 'python2.6': - metafunc.addcall(id=name, param=name) - -def pytest_funcarg__anypython(request): - name = request.param - executable = py.path.local.sysfind(name) - if executable is None: - py.test.skip("no %s found" % (name,)) - return executable - -def test_io_message(anypython, tmpdir): - check = tmpdir.join("check.py") - check.write(py.code.Source(gateway_base, """ - try: - from io import BytesIO - except ImportError: - from StringIO import StringIO as BytesIO - import tempfile - temp_out = BytesIO() - temp_in = BytesIO() - io = Popen2IO(temp_out, temp_in) - for i, msg_cls in Message._types.items(): - print ("checking %s %s" %(i, msg_cls)) - for data in "hello", "hello".encode('ascii'): - msg1 = msg_cls(i, data) - msg1.writeto(io) - x = io.outfile.getvalue() - io.outfile.truncate(0) - io.outfile.seek(0) - io.infile.seek(0) - io.infile.write(x) - io.infile.seek(0) - msg2 = Message.readfrom(io) - assert msg1.channelid == msg2.channelid, (msg1, msg2) - assert msg1.data == msg2.data - print ("all passed") - """)) - #out = py.process.cmdexec("%s %s" %(executable,check)) - out = anypython.sysexec(check) - print (out) - assert "all passed" in out - -def test_popen_io(anypython, tmpdir): - check = tmpdir.join("check.py") - check.write(py.code.Source(gateway_base, """ - do_exec(Popen2IO.server_stmt, globals()) - io.write("hello".encode('ascii')) - s = io.read(1) - assert s == "x".encode('ascii') - """)) - from subprocess import Popen, PIPE - args = [str(anypython), str(check)] - proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) - proc.stdin.write("x".encode('ascii')) - stdout, stderr = proc.communicate() - print (stderr) - ret = proc.wait() - assert "hello".encode('ascii') in stdout - - -def test_rinfo_source(anypython, tmpdir): - check = tmpdir.join("check.py") - check.write(py.code.Source(""" - class Channel: - def send(self, data): - assert eval(repr(data), {}) == data - channel = Channel() - """, gateway.rinfo_source, """ - print ('all passed') - """)) - out = anypython.sysexec(check) - print (out) - assert "all passed" in out - -def test_geterrortext(anypython, tmpdir): - check = tmpdir.join("check.py") - check.write(py.code.Source(gateway_base, """ - class Arg: - pass - errortext = geterrortext((Arg, "1", 4)) - assert "Arg" in errortext - import sys - try: - raise ValueError("17") - except ValueError: - excinfo = sys.exc_info() - s = geterrortext(excinfo) - assert "17" in s - print ("all passed") - """)) - out = anypython.sysexec(check) - print (out) - assert "all passed" in out - -def test_stdouterrin_setnull(): - cap = py.io.StdCaptureFD() - from py.__.execnet.gateway import stdouterrin_setnull - stdouterrin_setnull() - import os - os.write(1, "hello".encode('ascii')) - if os.name == "nt": - os.write(2, "world") - os.read(0, 1) - out, err = cap.reset() - assert not out - assert not err - - -class TestMessage: - def test_wire_protocol(self): - for cls in Message._types.values(): - one = py.io.BytesIO() - data = '23'.encode('ascii') - cls(42, data).writeto(one) - two = py.io.BytesIO(one.getvalue()) - msg = Message.readfrom(two) - assert isinstance(msg, cls) - assert msg.channelid == 42 - assert msg.data == data - assert isinstance(repr(msg), str) - # == "" %(msg.__class__.__name__, ) - -class TestPureChannel: - def setup_method(self, method): - self.fac = ChannelFactory(None) - - def test_factory_create(self): - chan1 = self.fac.new() - assert chan1.id == 1 - chan2 = self.fac.new() - assert chan2.id == 3 - - def test_factory_getitem(self): - chan1 = self.fac.new() - assert self.fac._channels[chan1.id] == chan1 - chan2 = self.fac.new() - assert self.fac._channels[chan2.id] == chan2 - - def test_channel_timeouterror(self): - channel = self.fac.new() - py.test.raises(IOError, channel.waitclose, timeout=0.01) - - def test_channel_makefile_incompatmode(self): - channel = self.fac.new() - py.test.raises(ValueError, 'channel.makefile("rw")') - - --- a/py/execnet/script/socketserver.py +++ /dev/null @@ -1,102 +0,0 @@ -#! /usr/bin/env python - -""" - start socket based minimal readline exec server -""" -# this part of the program only executes on the server side -# - -progname = 'socket_readline_exec_server-1.2' - -import sys, socket, os -try: - import fcntl -except ImportError: - fcntl = None - -debug = 0 - -if debug: # and not os.isatty(sys.stdin.fileno()): - f = open('/tmp/execnet-socket-pyout.log', 'w') - old = sys.stdout, sys.stderr - sys.stdout = sys.stderr = f - #import py - #compile = py.code.compile - -def print_(*args): - print(" ".join(str(arg) for arg in args)) - -if sys.version_info > (3, 0): - exec("""def exec_(source, locs): - exec(source, locs)""") -else: - exec("""def exec_(source, locs): - exec source in locs""") - -def exec_from_one_connection(serversock): - print_(progname, 'Entering Accept loop', serversock.getsockname()) - clientsock,address = serversock.accept() - print_(progname, 'got new connection from %s %s' % address) - clientfile = clientsock.makefile('rb') - print_("reading line") - # rstrip so that we can use \r\n for telnet testing - source = clientfile.readline().rstrip() - clientfile.close() - g = {'clientsock' : clientsock, 'address' : address} - source = eval(source) - if source: - co = compile(source+'\n', source, 'exec') - print_(progname, 'compiled source, executing') - try: - exec_(co, g) - finally: - print_(progname, 'finished executing code') - # background thread might hold a reference to this (!?) - #clientsock.close() - -def bind_and_listen(hostport): - if isinstance(hostport, str): - host, port = hostport.split(':') - hostport = (host, int(port)) - serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - # set close-on-exec - if hasattr(fcntl, 'FD_CLOEXEC'): - old = fcntl.fcntl(serversock.fileno(), fcntl.F_GETFD) - fcntl.fcntl(serversock.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC) - # allow the address to be re-used in a reasonable amount of time - if os.name == 'posix' and sys.platform != 'cygwin': - serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - - serversock.bind(hostport) - serversock.listen(5) - return serversock - -def startserver(serversock, loop=False): - try: - while 1: - try: - exec_from_one_connection(serversock) - except (KeyboardInterrupt, SystemExit): - raise - except: - if debug: - import traceback - traceback.print_exc() - else: - excinfo = sys.exc_info() - print_("got exception", excinfo[1]) - if not loop: - break - finally: - print_("leaving socketserver execloop") - serversock.shutdown(2) - -if __name__ == '__main__': - import sys - if len(sys.argv)>1: - hostport = sys.argv[1] - else: - hostport = ':8888' - serversock = bind_and_listen(hostport) - startserver(serversock, loop=False) - --- a/bin-for-dist/test_install.py +++ b/bin-for-dist/test_install.py @@ -78,7 +78,7 @@ class VirtualEnv(object): def makegateway(self): python = self._cmd('python') - return py.execnet.makegateway("popen//python=%s" %(python,)) + return execnet.makegateway("popen//python=%s" %(python,)) def pcall(self, cmd, *args, **kw): self.ensure() --- a/py/test/looponfail/remote.py +++ b/py/test/looponfail/remote.py @@ -7,10 +7,9 @@ otherwise changes to source code can crash the controlling process which should never happen. """ - -from __future__ import generators import py import sys +import execnet from py.__.test.session import Session from py.__.test.dist.mypickle import PickleChannel from py.__.test.looponfail import util @@ -55,7 +54,7 @@ class RemoteControl(object): py.builtin.print_("RemoteControl:", msg) def initgateway(self): - return py.execnet.PopenGateway() + return execnet.PopenGateway() def setup(self, out=None): if out is None: --- a/testing/pytest/dist/test_txnode.py +++ b/testing/pytest/dist/test_txnode.py @@ -1,5 +1,6 @@ import py +import execnet from py.__.test.dist.txnode import TXNode queue = py.builtin._tryimport("queue", "Queue") Queue = queue.Queue @@ -46,8 +47,8 @@ class MySetup: config = py.test.config._reparse([]) self.config = config self.queue = Queue() - self.xspec = py.execnet.XSpec("popen") - self.gateway = py.execnet.makegateway(self.xspec) + self.xspec = execnet.XSpec("popen") + self.gateway = execnet.makegateway(self.xspec) self.id += 1 self.gateway.id = str(self.id) self.node = TXNode(self.gateway, self.config, putevent=self.queue.put) --- a/example/execnet/popen_read_multiple.py +++ b/example/execnet/popen_read_multiple.py @@ -9,7 +9,7 @@ NUM_PROCESSES = 5 channels = [] for i in range(NUM_PROCESSES): - gw = py.execnet.PopenGateway() # or use SSH or socket gateways + gw = execnet.PopenGateway() # or use SSH or socket gateways channel = gw.remote_exec(""" import time secs = channel.receive() @@ -19,7 +19,7 @@ for i in range(NUM_PROCESSES): channels.append(channel) print "*** instantiated subprocess", gw -mc = py.execnet.MultiChannel(channels) +mc = execnet.MultiChannel(channels) queue = mc.make_receive_queue() print "***", "verifying that timeout on receiving results from blocked subprocesses works" --- a/py/path/gateway/channeltest.py +++ b/py/path/gateway/channeltest.py @@ -52,7 +52,7 @@ class PathServer: if __name__ == '__main__': import py - gw = py.execnet.PopenGateway() + gw = execnet.PopenGateway() channel = gw._channelfactory.new() srv = PathServer(channel) c = gw.remote_exec(""" --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.x and 'trunk' ===================================== +* remove py.execnet code and substitute all usages with 'execnet' proper + * fix issue50 - cached_setup now caches more to expectations for test functions with multiple arguments. --- a/py/execnet/script/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- a/py/execnet/script/xx.py +++ /dev/null @@ -1,9 +0,0 @@ -import rlcompleter2 -rlcompleter2.setup() - -import register, sys -try: - hostport = sys.argv[1] -except: - hostport = ':8888' -gw = register.ServerGateway(hostport) --- a/testing/execnet/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- a/py/execnet/serializer.py +++ /dev/null @@ -1,272 +0,0 @@ -""" -Simple marshal format (based on pickle) designed to work across Python versions. -""" - -import sys -import struct - -_INPY3 = _REALLY_PY3 = sys.version_info > (3, 0) - -class SerializeError(Exception): - pass - -class SerializationError(SerializeError): - """Error while serializing an object.""" - -class UnserializableType(SerializationError): - """Can't serialize a type.""" - -class UnserializationError(SerializeError): - """Error while unserializing an object.""" - -class VersionMismatch(UnserializationError): - """Data from a previous or later format.""" - -class Corruption(UnserializationError): - """The pickle format appears to have been corrupted.""" - -if _INPY3: - def b(s): - return s.encode("ascii") -else: - b = str - -FOUR_BYTE_INT_MAX = 2147483647 - -_int4_format = struct.Struct("!i") -_float_format = struct.Struct("!d") - -# Protocol constants -VERSION_NUMBER = 1 -VERSION = b(chr(VERSION_NUMBER)) -PY2STRING = b('s') -PY3STRING = b('t') -UNICODE = b('u') -BYTES = b('b') -NEWLIST = b('l') -BUILDTUPLE = b('T') -SETITEM = b('m') -NEWDICT = b('d') -INT = b('i') -FLOAT = b('f') -STOP = b('S') - -class CrossVersionOptions(object): - pass - -class Serializer(object): - - def __init__(self, stream): - self.stream = stream - - def save(self, obj): - self.stream.write(VERSION) - self._save(obj) - self.stream.write(STOP) - - def _save(self, obj): - tp = type(obj) - try: - dispatch = self.dispatch[tp] - except KeyError: - raise UnserializableType("can't serialize %s" % (tp,)) - dispatch(self, obj) - - dispatch = {} - - def save_bytes(self, bytes_): - self.stream.write(BYTES) - self._write_byte_sequence(bytes_) - dispatch[bytes] = save_bytes - - if _INPY3: - def save_string(self, s): - self.stream.write(PY3STRING) - self._write_unicode_string(s) - else: - def save_string(self, s): - self.stream.write(PY2STRING) - self._write_byte_sequence(s) - - def save_unicode(self, s): - self.stream.write(UNICODE) - self._write_unicode_string(s) - dispatch[unicode] = save_unicode - dispatch[str] = save_string - - def _write_unicode_string(self, s): - try: - as_bytes = s.encode("utf-8") - except UnicodeEncodeError: - raise SerializationError("strings must be utf-8 encodable") - self._write_byte_sequence(as_bytes) - - def _write_byte_sequence(self, bytes_): - self._write_int4(len(bytes_), "string is too long") - self.stream.write(bytes_) - - def save_int(self, i): - self.stream.write(INT) - self._write_int4(i) - dispatch[int] = save_int - - def save_float(self, flt): - self.stream.write(FLOAT) - self.stream.write(_float_format.pack(flt)) - dispatch[float] = save_float - - def _write_int4(self, i, error="int must be less than %i" % - (FOUR_BYTE_INT_MAX,)): - if i > FOUR_BYTE_INT_MAX: - raise SerializationError(error) - self.stream.write(_int4_format.pack(i)) - - def save_list(self, L): - self.stream.write(NEWLIST) - self._write_int4(len(L), "list is too long") - for i, item in enumerate(L): - self._write_setitem(i, item) - dispatch[list] = save_list - - def _write_setitem(self, key, value): - self._save(key) - self._save(value) - self.stream.write(SETITEM) - - def save_dict(self, d): - self.stream.write(NEWDICT) - for key, value in d.items(): - self._write_setitem(key, value) - dispatch[dict] = save_dict - - def save_tuple(self, tup): - for item in tup: - self._save(item) - self.stream.write(BUILDTUPLE) - self._write_int4(len(tup), "tuple is too long") - dispatch[tuple] = save_tuple - - -class _UnserializationOptions(object): - pass - -class _Py2UnserializationOptions(_UnserializationOptions): - - def __init__(self, py3_strings_as_str=False): - self.py3_strings_as_str = py3_strings_as_str - -class _Py3UnserializationOptions(_UnserializationOptions): - - def __init__(self, py2_strings_as_str=False): - self.py2_strings_as_str = py2_strings_as_str - -if _INPY3: - UnserializationOptions = _Py3UnserializationOptions -else: - UnserializationOptions = _Py2UnserializationOptions - -class _Stop(Exception): - pass - -class Unserializer(object): - - def __init__(self, stream, options=UnserializationOptions()): - self.stream = stream - self.options = options - - def load(self): - self.stack = [] - version = ord(self.stream.read(1)) - if version != VERSION_NUMBER: - raise VersionMismatch("%i != %i" % (version, VERSION_NUMBER)) - try: - while True: - opcode = self.stream.read(1) - if not opcode: - raise EOFError - try: - loader = self.opcodes[opcode] - except KeyError: - raise Corruption("unkown opcode %s" % (opcode,)) - loader(self) - except _Stop: - if len(self.stack) != 1: - raise UnserializationError("internal unserialization error") - return self.stack[0] - else: - raise Corruption("didn't get STOP") - - opcodes = {} - - def load_int(self): - i = self._read_int4() - self.stack.append(i) - opcodes[INT] = load_int - - def load_float(self): - binary = self.stream.read(_float_format.size) - self.stack.append(_float_format.unpack(binary)[0]) - opcodes[FLOAT] = load_float - - def _read_int4(self): - return _int4_format.unpack(self.stream.read(4))[0] - - def _read_byte_string(self): - length = self._read_int4() - as_bytes = self.stream.read(length) - return as_bytes - - def load_py3string(self): - as_bytes = self._read_byte_string() - if not _INPY3 and self.options.py3_strings_as_str: - # XXX Should we try to decode into latin-1? - self.stack.append(as_bytes) - else: - self.stack.append(as_bytes.decode("utf-8")) - opcodes[PY3STRING] = load_py3string - - def load_py2string(self): - as_bytes = self._read_byte_string() - if _INPY3 and self.options.py2_strings_as_str: - s = as_bytes.decode("latin-1") - else: - s = as_bytes - self.stack.append(s) - opcodes[PY2STRING] = load_py2string - - def load_bytes(self): - s = self._read_byte_string() - self.stack.append(s) - opcodes[BYTES] = load_bytes - - def load_unicode(self): - self.stack.append(self._read_byte_string().decode("utf-8")) - opcodes[UNICODE] = load_unicode - - def load_newlist(self): - length = self._read_int4() - self.stack.append([None] * length) - opcodes[NEWLIST] = load_newlist - - def load_setitem(self): - if len(self.stack) < 3: - raise Corruption("not enough items for setitem") - value = self.stack.pop() - key = self.stack.pop() - self.stack[-1][key] = value - opcodes[SETITEM] = load_setitem - - def load_newdict(self): - self.stack.append({}) - opcodes[NEWDICT] = load_newdict - - def load_buildtuple(self): - length = self._read_int4() - tup = tuple(self.stack[-length:]) - del self.stack[-length:] - self.stack.append(tup) - opcodes[BUILDTUPLE] = load_buildtuple - - def load_stop(self): - raise _Stop - opcodes[STOP] = load_stop --- a/conftest.py +++ b/conftest.py @@ -17,15 +17,18 @@ def pytest_addoption(parser): def pytest_funcarg__specssh(request): return getspecssh(request.config) -def pytest_funcarg__specsocket(request): - return getsocketspec(request.config) +def getgspecs(config=None): + if config is None: + config = py.test.config + return [execnet.XSpec(spec) + for spec in config.getvalueorskip("gspecs")] # configuration information for tests def getgspecs(config=None): if config is None: config = py.test.config - return [py.execnet.XSpec(spec) + return [execnet.XSpec(spec) for spec in config.getvalueorskip("gspecs")] def getspecssh(config=None): --- a/py/execnet/script/shell.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -""" -a remote python shell - -for injection into startserver.py -""" -import sys, os, socket, select - -try: - clientsock -except NameError: - print("client side starting") - import sys - host, port = sys.argv[1].split(':') - port = int(port) - myself = open(os.path.abspath(sys.argv[0]), 'rU').read() - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.connect((host, port)) - sock.sendall(repr(myself)+'\n') - print("send boot string") - inputlist = [ sock, sys.stdin ] - try: - while 1: - r,w,e = select.select(inputlist, [], []) - if sys.stdin in r: - line = raw_input() - sock.sendall(line + '\n') - if sock in r: - line = sock.recv(4096) - sys.stdout.write(line) - sys.stdout.flush() - except: - import traceback - print(traceback.print_exc()) - - sys.exit(1) - -print("server side starting") -# server side -# -from traceback import print_exc -from threading import Thread - -class promptagent(Thread): - def __init__(self, clientsock): - Thread.__init__(self) - self.clientsock = clientsock - - def run(self): - print("Entering thread prompt loop") - clientfile = self.clientsock.makefile('w') - - filein = self.clientsock.makefile('r') - loc = self.clientsock.getsockname() - - while 1: - try: - clientfile.write('%s %s >>> ' % loc) - clientfile.flush() - line = filein.readline() - if len(line)==0: raise EOFError("nothing") - #print >>sys.stderr,"got line: " + line - if line.strip(): - oldout, olderr = sys.stdout, sys.stderr - sys.stdout, sys.stderr = clientfile, clientfile - try: - try: - exec(compile(line + '\n','', 'single')) - except: - print_exc() - finally: - sys.stdout=oldout - sys.stderr=olderr - clientfile.flush() - except EOFError: - e = sys.exc_info()[1] - sys.stderr.write("connection close, prompt thread returns") - break - #print >>sys.stdout, "".join(apply(format_exception,sys.exc_info())) - - self.clientsock.close() - -prompter = promptagent(clientsock) -prompter.start() -print("promptagent - thread started") --- a/py/__init__.py +++ b/py/__init__.py @@ -1,22 +1,20 @@ # -*- coding: utf-8 -*- """ -advanced testing and development support library: +advanced testing and development support library: - `py.test`_: cross-project testing tool with many advanced features -- `py.execnet`_: ad-hoc code distribution to SSH, Socket and local sub processes -- `py.path`_: path abstractions over local and subversion files +- `py.path`_: path abstractions over local and subversion files - `py.code`_: dynamic code compile and traceback printing support -Compatibility: Linux, Win32, OSX, Python versions 2.3-2.6. +Compatibility: Linux, Win32, OSX, Python versions 2.4 through to 3.1. For questions please check out http://pylib.org/contact.html .. _`py.test`: http://pylib.org/test.html -.. _`py.execnet`: http://pylib.org/execnet.html .. _`py.path`: http://pylib.org/path.html .. _`py.code`: http://pylib.org/code.html -(c) Holger Krekel and others, 2009 +(c) Holger Krekel and others, 2009 """ from py.initpkg import initpkg trunk = "trunk" @@ -159,21 +157,6 @@ initpkg(__name__, 'builtin.execfile' : ('./builtin/builtin31.py', 'execfile'), 'builtin.callable' : ('./builtin/builtin31.py', 'callable'), - # gateways into remote contexts - 'execnet.__doc__' : ('./execnet/__init__.py', '__doc__'), - 'execnet._HookSpecs' : ('./execnet/gateway_base.py', 'ExecnetAPI'), - 'execnet.SocketGateway' : ('./execnet/gateway.py', 'SocketGateway'), - 'execnet.PopenGateway' : ('./execnet/gateway.py', 'PopenGateway'), - 'execnet.SshGateway' : ('./execnet/gateway.py', 'SshGateway'), - 'execnet.HostNotFound' : ('./execnet/gateway.py', 'HostNotFound'), - 'execnet.XSpec' : ('./execnet/xspec.py', 'XSpec'), - 'execnet.makegateway' : ('./execnet/xspec.py', 'makegateway'), - 'execnet.MultiGateway' : ('./execnet/multi.py', 'MultiGateway'), - 'execnet.MultiChannel' : ('./execnet/multi.py', 'MultiChannel'), - - # execnet scripts - 'execnet.RSync' : ('./execnet/rsync.py', 'RSync'), - # input-output helping 'io.__doc__' : ('./io/__init__.py', '__doc__'), 'io.dupfile' : ('./io/capture.py', 'dupfile'), --- a/py/test/dist/gwmanage.py +++ b/py/test/dist/gwmanage.py @@ -4,7 +4,8 @@ import py import sys, os -from py.__.execnet.gateway_base import RemoteError +import execnet +from execnet.gateway_base import RemoteError class GatewayManager: RemoteError = RemoteError @@ -13,8 +14,8 @@ class GatewayManager: self.specs = [] self.hook = hook for spec in specs: - if not isinstance(spec, py.execnet.XSpec): - spec = py.execnet.XSpec(spec) + if not isinstance(spec, execnet.XSpec): + spec = execnet.XSpec(spec) if not spec.chdir and not spec.popen: spec.chdir = defaultchdir self.specs.append(spec) @@ -22,7 +23,7 @@ class GatewayManager: def makegateways(self): assert not self.gateways for spec in self.specs: - gw = py.execnet.makegateway(spec) + gw = execnet.makegateway(spec) self.gateways.append(gw) gw.id = "[%s]" % len(self.gateways) self.hook.pytest_gwmanage_newgateway( @@ -39,7 +40,7 @@ class GatewayManager: else: if remote: l.append(gw) - return py.execnet.MultiGateway(gateways=l) + return execnet.MultiGateway(gateways=l) def multi_exec(self, source, inplacelocal=True): """ remote execute code on all gateways. @@ -87,7 +88,7 @@ class GatewayManager: gw = self.gateways.pop() gw.exit() -class HostRSync(py.execnet.RSync): +class HostRSync(execnet.RSync): """ RSyncer that filters out common files """ def __init__(self, sourcedir, *args, **kwargs): --- a/doc/test/customize.txt +++ b/doc/test/customize.txt @@ -364,7 +364,7 @@ remote environment. For this you can im def pytest_gwmanage_newgateway(gateway, platinfo): """ called after a gateway is instantiated. """ -The ``gateway`` object here has a ``spec`` attribute which is an ``py.execnet.XSpec`` +The ``gateway`` object here has a ``spec`` attribute which is an ``execnet.XSpec`` object, which has attributes that map key/values as specified from a ``--txspec`` option. The platinfo object is a dictionary with information about the remote process: --- a/py/execnet/gateway_base.py +++ /dev/null @@ -1,757 +0,0 @@ -""" -base execnet gateway code, a quick overview. - -the code of this module is sent to the "other side" -as a means of bootstrapping a Gateway object -capable of receiving and executing code, -and routing data through channels. - -Gateways operate on InputOutput objects offering -a write and a read(n) method. - -Once bootstrapped a higher level protocol -based on Messages is used. Messages are serialized -to and from InputOutput objects. The details of this protocol -are locally defined in this module. There is no need -for standardizing or versioning the protocol. - -After bootstrapping the BaseGateway opens a receiver thread which -accepts encoded messages and triggers actions to interpret them. -Sending of channel data items happens directly through -write operations to InputOutput objects so there is no -separate thread. - -Code execution messages are put into an execqueue from -which they will be taken for execution. gateway.serve() -will take and execute such items, one by one. This means -that by incoming default execution is single-threaded. - -The receiver thread terminates if the remote side sends -a gateway termination message or if the IO-connection drops. -It puts an end symbol into the execqueue so -that serve() can cleanly finish as well. - -(C) 2004-2009 Holger Krekel, Armin Rigo and others -""" -import sys, os, weakref -import threading, traceback, socket, struct -try: - import queue -except ImportError: - import Queue as queue - -if sys.version_info > (3, 0): - exec("""def do_exec(co, loc): - exec(co, loc)""") - unicode = str -else: - exec("""def do_exec(co, loc): - exec co in loc""") - bytes = str - - -def str(*args): - raise EnvironmentError( - "use unicode or bytes, not cross-python ambigous 'str'") - -default_encoding = "UTF-8" -sysex = (KeyboardInterrupt, SystemExit) - -debug = 0 # open('/tmp/execnet-debug-%d' % os.getpid() , 'w') - - -# ___________________________________________________________________________ -# -# input output classes -# ___________________________________________________________________________ - -class SocketIO: - server_stmt = "io = SocketIO(clientsock)" - - error = (socket.error, EOFError) - def __init__(self, sock): - self.sock = sock - try: - sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) - sock.setsockopt(socket.SOL_IP, socket.IP_TOS, 0x10)# IPTOS_LOWDELAY - except socket.error: - e = sys.exc_info()[1] - sys.stderr.write("WARNING: cannot set socketoption") - self.readable = self.writeable = True - - def read(self, numbytes): - "Read exactly 'bytes' bytes from the socket." - buf = bytes() - while len(buf) < numbytes: - t = self.sock.recv(numbytes - len(buf)) - if not t: - raise EOFError - buf += t - return buf - - def write(self, data): - assert isinstance(data, bytes) - self.sock.sendall(data) - - def close_read(self): - if self.readable: - try: - self.sock.shutdown(0) - except socket.error: - pass - self.readable = None - def close_write(self): - if self.writeable: - try: - self.sock.shutdown(1) - except socket.error: - pass - self.writeable = None - -class Popen2IO: - server_stmt = """ -import os, sys, tempfile -io = Popen2IO(sys.stdout, sys.stdin) -sys.stdout = tempfile.TemporaryFile('w') -sys.stdin = tempfile.TemporaryFile('r') -""" - error = (IOError, OSError, EOFError) - - def __init__(self, outfile, infile): - # we need raw byte streams - self.outfile, self.infile = outfile, infile - if sys.platform == "win32": - import msvcrt - msvcrt.setmode(infile.fileno(), os.O_BINARY) - msvcrt.setmode(outfile.fileno(), os.O_BINARY) - self.readable = self.writeable = True - - def read(self, numbytes): - """Read exactly 'numbytes' bytes from the pipe. """ - try: - data = self.infile.buffer.read(numbytes) - except AttributeError: - data = self.infile.read(numbytes) - if len(data) < numbytes: - raise EOFError - return data - - def write(self, data): - """write out all data bytes. """ - assert isinstance(data, bytes) - try: - self.outfile.buffer.write(data) - except AttributeError: - self.outfile.write(data) - self.outfile.flush() - - def close_read(self): - if self.readable: - self.infile.close() - self.readable = None - - def close_write(self): - try: - self.outfile.close() - except EnvironmentError: - pass - self.writeable = None - -# ___________________________________________________________________________ -# -# Messages -# ___________________________________________________________________________ -# the header format -HDR_FORMAT = "!hhii" -HDR_SIZE = struct.calcsize(HDR_FORMAT) - -is3k = sys.version_info >= (3,0) - -class Message: - """ encapsulates Messages and their wire protocol. """ - _types = {} - def __init__(self, channelid=0, data=''): - self.channelid = channelid - self.data = data - - def writeto(self, io): - # XXX marshal.dumps doesn't work for exchanging data across Python - # version :-((( XXX check this statement wrt python2.4 through 3.1 - data = self.data - if isinstance(data, bytes): - dataformat = 1 + int(is3k) - else: - if isinstance(data, unicode): - dataformat = 3 - else: - data = repr(self.data) # argh - dataformat = 4 - data = data.encode(default_encoding) - header = struct.pack(HDR_FORMAT, self.msgtype, dataformat, - self.channelid, len(data)) - io.write(header + data) - - def readfrom(cls, io): - header = io.read(HDR_SIZE) - (msgtype, dataformat, - senderid, stringlen) = struct.unpack(HDR_FORMAT, header) - data = io.read(stringlen) - if dataformat == 1: - if is3k: - # remote was python2-str, we are 3k-text - data = data.decode(default_encoding) - elif dataformat == 2: - # remote was python3-bytes - pass - else: - data = data.decode(default_encoding) - if dataformat == 3: - pass - elif dataformat == 4: - data = eval(data, {}) # reversed argh - else: - raise ValueError("bad data format") - return cls._types[msgtype](senderid, data) - readfrom = classmethod(readfrom) - - def __repr__(self): - r = repr(self.data) - if len(r) > 50: - return "" %(self.__class__.__name__, - self.channelid, len(r)) - else: - return "" %(self.__class__.__name__, - self.channelid, self.data) - -def _setupmessages(): - class CHANNEL_OPEN(Message): - def received(self, gateway): - channel = gateway._channelfactory.new(self.channelid) - gateway._local_schedulexec(channel=channel, sourcetask=self.data) - - class CHANNEL_NEW(Message): - def received(self, gateway): - """ receive a remotely created new (sub)channel. """ - newid = self.data - newchannel = gateway._channelfactory.new(newid) - gateway._channelfactory._local_receive(self.channelid, newchannel) - - class CHANNEL_DATA(Message): - def received(self, gateway): - gateway._channelfactory._local_receive(self.channelid, self.data) - - class CHANNEL_CLOSE(Message): - def received(self, gateway): - gateway._channelfactory._local_close(self.channelid) - - class CHANNEL_CLOSE_ERROR(Message): - def received(self, gateway): - remote_error = gateway._channelfactory.RemoteError(self.data) - gateway._channelfactory._local_close(self.channelid, remote_error) - - class CHANNEL_LAST_MESSAGE(Message): - def received(self, gateway): - gateway._channelfactory._local_close(self.channelid, sendonly=True) - - classes = [CHANNEL_OPEN, CHANNEL_NEW, CHANNEL_DATA, - CHANNEL_CLOSE, CHANNEL_CLOSE_ERROR, CHANNEL_LAST_MESSAGE] - - for i, cls in enumerate(classes): - Message._types[i] = cls - cls.msgtype = i - setattr(Message, cls.__name__, cls) - -_setupmessages() - -def geterrortext(excinfo): - try: - l = traceback.format_exception(*excinfo) - errortext = "".join(l) - except sysex: - raise - except: - errortext = '%s: %s' % (excinfo[0].__name__, - excinfo[1]) - return errortext - -class RemoteError(EOFError): - """ Contains an Exceptions from the other side. """ - def __init__(self, formatted): - self.formatted = formatted - EOFError.__init__(self) - - def __str__(self): - return self.formatted - - def __repr__(self): - return "%s: %s" %(self.__class__.__name__, self.formatted) - - def warn(self): - # XXX do this better - sys.stderr.write("Warning: unhandled %r\n" % (self,)) - - -NO_ENDMARKER_WANTED = object() - -class Channel(object): - """Communication channel between two possibly remote threads of code. """ - RemoteError = RemoteError - - def __init__(self, gateway, id): - assert isinstance(id, int) - self.gateway = gateway - self.id = id - self._items = queue.Queue() - self._closed = False - self._receiveclosed = threading.Event() - self._remoteerrors = [] - - def setcallback(self, callback, endmarker=NO_ENDMARKER_WANTED): - # we first execute the callback on all already received - # items. We need to hold the receivelock to prevent - # race conditions with newly arriving items. - # after having cleared the queue we register - # the callback only if the channel is not closed already. - _callbacks = self.gateway._channelfactory._callbacks - _receivelock = self.gateway._receivelock - _receivelock.acquire() - try: - if self._items is None: - raise IOError("%r has callback already registered" %(self,)) - items = self._items - self._items = None - while 1: - try: - olditem = items.get(block=False) - except queue.Empty: - if not (self._closed or self._receiveclosed.isSet()): - _callbacks[self.id] = (callback, endmarker) - break - else: - if olditem is ENDMARKER: - items.put(olditem) # for other receivers - if endmarker is not NO_ENDMARKER_WANTED: - callback(endmarker) - break - else: - callback(olditem) - finally: - _receivelock.release() - - def __repr__(self): - flag = self.isclosed() and "closed" or "open" - return "" % (self.id, flag) - - def __del__(self): - if self.gateway is None: # can be None in tests - return - self.gateway._trace("Channel(%d).__del__" % self.id) - # no multithreading issues here, because we have the last ref to 'self' - if self._closed: - # state transition "closed" --> "deleted" - for error in self._remoteerrors: - error.warn() - elif self._receiveclosed.isSet(): - # state transition "sendonly" --> "deleted" - # the remote channel is already in "deleted" state, nothing to do - pass - else: - # state transition "opened" --> "deleted" - if self._items is None: # has_callback - Msg = Message.CHANNEL_LAST_MESSAGE - else: - Msg = Message.CHANNEL_CLOSE - self.gateway._send(Msg(self.id)) - - def _getremoteerror(self): - try: - return self._remoteerrors.pop(0) - except IndexError: - return None - - # - # public API for channel objects - # - def isclosed(self): - """ return True if the channel is closed. A closed - channel may still hold items. - """ - return self._closed - - def makefile(self, mode='w', proxyclose=False): - """ return a file-like object. - mode: 'w' for writes, 'r' for reads - proxyclose: if true file.close() will - trigger a channel.close() call. - """ - if mode == "w": - return ChannelFileWrite(channel=self, proxyclose=proxyclose) - elif mode == "r": - return ChannelFileRead(channel=self, proxyclose=proxyclose) - raise ValueError("mode %r not availabe" %(mode,)) - - def close(self, error=None): - """ close down this channel on both sides. """ - if not self._closed: - # state transition "opened/sendonly" --> "closed" - # threads warning: the channel might be closed under our feet, - # but it's never damaging to send too many CHANNEL_CLOSE messages - put = self.gateway._send - if error is not None: - put(Message.CHANNEL_CLOSE_ERROR(self.id, error)) - else: - put(Message.CHANNEL_CLOSE(self.id)) - if isinstance(error, RemoteError): - self._remoteerrors.append(error) - self._closed = True # --> "closed" - self._receiveclosed.set() - queue = self._items - if queue is not None: - queue.put(ENDMARKER) - self.gateway._channelfactory._no_longer_opened(self.id) - - def waitclose(self, timeout=None): - """ wait until this channel is closed (or the remote side - otherwise signalled that no more data was being sent). - The channel may still hold receiveable items, but not receive - more. waitclose() reraises exceptions from executing code on - the other side as channel.RemoteErrors containing a a textual - representation of the remote traceback. - """ - self._receiveclosed.wait(timeout=timeout) # wait for non-"opened" state - if not self._receiveclosed.isSet(): - raise IOError("Timeout") - error = self._getremoteerror() - if error: - raise error - - def send(self, item): - """sends the given item to the other side of the channel, - possibly blocking if the sender queue is full. - Note that an item needs to be marshallable. - """ - if self.isclosed(): - raise IOError("cannot send to %r" %(self,)) - if isinstance(item, Channel): - data = Message.CHANNEL_NEW(self.id, item.id) - else: - data = Message.CHANNEL_DATA(self.id, item) - self.gateway._send(data) - - def receive(self): - """receives an item that was sent from the other side, - possibly blocking if there is none. - Note that exceptions from the other side will be - reraised as channel.RemoteError exceptions containing - a textual representation of the remote traceback. - """ - queue = self._items - if queue is None: - raise IOError("calling receive() on channel with receiver callback") - x = queue.get() - if x is ENDMARKER: - queue.put(x) # for other receivers - raise self._getremoteerror() or EOFError() - else: - return x - - def __iter__(self): - return self - - def next(self): - try: - return self.receive() - except EOFError: - raise StopIteration - __next__ = next - -ENDMARKER = object() - -class ChannelFactory(object): - RemoteError = RemoteError - - def __init__(self, gateway, startcount=1): - self._channels = weakref.WeakValueDictionary() - self._callbacks = {} - self._writelock = threading.Lock() - self.gateway = gateway - self.count = startcount - self.finished = False - - def new(self, id=None): - """ create a new Channel with 'id' (or create new id if None). """ - self._writelock.acquire() - try: - if self.finished: - raise IOError("connexion already closed: %s" % (self.gateway,)) - if id is None: - id = self.count - self.count += 2 - channel = Channel(self.gateway, id) - self._channels[id] = channel - return channel - finally: - self._writelock.release() - - def channels(self): - return list(self._channels.values()) - - # - # internal methods, called from the receiver thread - # - def _no_longer_opened(self, id): - try: - del self._channels[id] - except KeyError: - pass - try: - callback, endmarker = self._callbacks.pop(id) - except KeyError: - pass - else: - if endmarker is not NO_ENDMARKER_WANTED: - callback(endmarker) - - def _local_close(self, id, remoteerror=None, sendonly=False): - channel = self._channels.get(id) - if channel is None: - # channel already in "deleted" state - if remoteerror: - remoteerror.warn() - else: - # state transition to "closed" state - if remoteerror: - channel._remoteerrors.append(remoteerror) - if not sendonly: # otherwise #--> "sendonly" - channel._closed = True # --> "closed" - channel._receiveclosed.set() - queue = channel._items - if queue is not None: - queue.put(ENDMARKER) - self._no_longer_opened(id) - - def _local_receive(self, id, data): - # executes in receiver thread - try: - callback, endmarker = self._callbacks[id] - except KeyError: - channel = self._channels.get(id) - queue = channel and channel._items - if queue is None: - pass # drop data - else: - queue.put(data) - else: - callback(data) # even if channel may be already closed - - def _finished_receiving(self): - self._writelock.acquire() - try: - self.finished = True - finally: - self._writelock.release() - for id in list(self._channels): - self._local_close(id, sendonly=True) - for id in list(self._callbacks): - self._no_longer_opened(id) - -class ChannelFile(object): - def __init__(self, channel, proxyclose=True): - self.channel = channel - self._proxyclose = proxyclose - - def close(self): - if self._proxyclose: - self.channel.close() - - def __repr__(self): - state = self.channel.isclosed() and 'closed' or 'open' - return '' %(self.channel.id, state) - -class ChannelFileWrite(ChannelFile): - def write(self, out): - self.channel.send(out) - - def flush(self): - pass - -class ChannelFileRead(ChannelFile): - def __init__(self, channel, proxyclose=True): - super(ChannelFileRead, self).__init__(channel, proxyclose) - self._buffer = "" - - def read(self, n): - while len(self._buffer) < n: - try: - self._buffer += self.channel.receive() - except EOFError: - self.close() - break - ret = self._buffer[:n] - self._buffer = self._buffer[n:] - return ret - - def readline(self): - i = self._buffer.find("\n") - if i != -1: - return self.read(i+1) - line = self.read(len(self._buffer)+1) - while line and line[-1] != "\n": - c = self.read(1) - if not c: - break - line += c - return line - -class BaseGateway(object): - exc_info = sys.exc_info - - class _StopExecLoop(Exception): - pass - - def __init__(self, io, _startcount=2): - """ initialize core gateway, using the given inputoutput object. - """ - self._io = io - self._channelfactory = ChannelFactory(self, _startcount) - self._receivelock = threading.RLock() - - def _initreceive(self): - self._receiverthread = threading.Thread(name="receiver", - target=self._thread_receiver) - self._receiverthread.setDaemon(1) - self._receiverthread.start() - - def _trace(self, msg): - if debug: - try: - debug.write(unicode(msg) + "\n") - debug.flush() - except sysex: - raise - except: - sys.stderr.write("exception during tracing\n") - - def _thread_receiver(self): - """ thread to read and handle Messages half-sync-half-async. """ - self._trace("starting to receive") - try: - while 1: - try: - msg = Message.readfrom(self._io) - self._trace("received <- %r" % msg) - _receivelock = self._receivelock - _receivelock.acquire() - try: - msg.received(self) - finally: - _receivelock.release() - except sysex: - break - except EOFError: - break - except: - self._trace(geterrortext(self.exc_info())) - break - finally: - # XXX we need to signal fatal error states to - # channels/callbacks, particularly ones - # where the other side just died. - self._stopexec() - try: - self._stopsend() - except IOError: - self._trace('IOError on _stopsend()') - self._channelfactory._finished_receiving() - if threading: # might be None during shutdown/finalization - self._trace('leaving %r' % threading.currentThread()) - - def _send(self, msg): - if msg is None: - self._io.close_write() - else: - try: - msg.writeto(self._io) - except: - excinfo = self.exc_info() - self._trace(geterrortext(excinfo)) - else: - self._trace('sent -> %r' % msg) - - def _stopsend(self): - self._send(None) - - def _stopexec(self): - pass - - def _local_schedulexec(self, channel, sourcetask): - channel.close("execution disallowed") - - # _____________________________________________________________________ - # - # High Level Interface - # _____________________________________________________________________ - # - def newchannel(self): - """ return new channel object. """ - return self._channelfactory.new() - - def join(self, joinexec=True): - """ Wait for all IO (and by default all execution activity) - to stop. the joinexec parameter is obsolete. - """ - current = threading.currentThread() - if self._receiverthread.isAlive(): - self._trace("joining receiver thread") - self._receiverthread.join() - -class SlaveGateway(BaseGateway): - def _stopexec(self): - self._execqueue.put(None) - - def _local_schedulexec(self, channel, sourcetask): - self._execqueue.put((channel, sourcetask)) - - def serve(self, joining=True): - self._execqueue = queue.Queue() - self._initreceive() - try: - while 1: - item = self._execqueue.get() - if item is None: - self._stopsend() - break - try: - self.executetask(item) - except self._StopExecLoop: - break - finally: - self._trace("serve") - if joining: - self.join() - - def executetask(self, item): - """ execute channel/source items. """ - channel, source = item - try: - loc = { 'channel' : channel, '__name__': '__channelexec__'} - #open("task.py", 'w').write(source) - self._trace("execution starts: %s" % repr(source)[:50]) - try: - co = compile(source+'\n', '', 'exec') - do_exec(co, loc) - finally: - self._trace("execution finished") - except sysex: - pass - except self._StopExecLoop: - channel.close() - raise - except: - excinfo = self.exc_info() - self._trace("got exception %s" % excinfo[1]) - errortext = geterrortext(excinfo) - channel.close(errortext) - else: - channel.close() - --- a/example/execnet/redirect_remote_output.py +++ b/example/execnet/redirect_remote_output.py @@ -10,7 +10,7 @@ showcasing features of the channel objec import py -gw = py.execnet.PopenGateway() +gw = execnet.PopenGateway() outchan = gw.remote_exec(""" import sys --- a/py/execnet/script/socketserverservice.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -A windows service wrapper for the py.execnet socketserver. - -To use, run: - python socketserverservice.py register - net start ExecNetSocketServer -""" - -import sys -import os -import time -import win32serviceutil -import win32service -import win32event -import win32evtlogutil -import servicemanager -import threading -import socketserver - - -appname = 'ExecNetSocketServer' - - -class SocketServerService(win32serviceutil.ServiceFramework): - _svc_name_ = appname - _svc_display_name_ = "%s" % appname - _svc_deps_ = ["EventLog"] - def __init__(self, args): - # The exe-file has messages for the Event Log Viewer. - # Register the exe-file as event source. - # - # Probably it would be better if this is done at installation time, - # so that it also could be removed if the service is uninstalled. - # Unfortunately it cannot be done in the 'if __name__ == "__main__"' - # block below, because the 'frozen' exe-file does not run this code. - # - win32evtlogutil.AddSourceToRegistry(self._svc_display_name_, - servicemanager.__file__, - "Application") - win32serviceutil.ServiceFramework.__init__(self, args) - self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) - self.WAIT_TIME = 1000 # in milliseconds - - - def SvcStop(self): - self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) - win32event.SetEvent(self.hWaitStop) - - - def SvcDoRun(self): - # Redirect stdout and stderr to prevent "IOError: [Errno 9] - # Bad file descriptor". Windows services don't have functional - # output streams. - sys.stdout = sys.stderr = open('nul', 'w') - - # Write a 'started' event to the event log... - win32evtlogutil.ReportEvent(self._svc_display_name_, - servicemanager.PYS_SERVICE_STARTED, - 0, # category - servicemanager.EVENTLOG_INFORMATION_TYPE, - (self._svc_name_, '')) - print("Begin: %s" % (self._svc_display_name_)) - - hostport = ':8888' - print('Starting py.execnet SocketServer on %s' % hostport) - serversock = socketserver.bind_and_listen(hostport) - thread = threading.Thread(target=socketserver.startserver, - args=(serversock,), - kwargs={'loop':True}) - thread.setDaemon(True) - thread.start() - - # wait to be stopped or self.WAIT_TIME to pass - while True: - result = win32event.WaitForSingleObject(self.hWaitStop, - self.WAIT_TIME) - if result == win32event.WAIT_OBJECT_0: - break - - # write a 'stopped' event to the event log. - win32evtlogutil.ReportEvent(self._svc_display_name_, - servicemanager.PYS_SERVICE_STOPPED, - 0, # category - servicemanager.EVENTLOG_INFORMATION_TYPE, - (self._svc_name_, '')) - print("End: %s" % appname) - - -if __name__ == '__main__': - # Note that this code will not be run in the 'frozen' exe-file!!! - win32serviceutil.HandleCommandLine(SocketServerService) --- a/py/execnet/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -""" ad-hoc networking mechanism """ --- a/py/execnet/script/quitserver.py +++ /dev/null @@ -1,16 +0,0 @@ -""" - - send a "quit" signal to a remote server - -""" - -import sys -import socket - -hostport = sys.argv[1] -host, port = hostport.split(':') -hostport = (host, int(port)) - -sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) -sock.connect(hostport) -sock.sendall('"raise KeyboardInterrupt"\n') --- a/example/execnet/sysinfo.py +++ b/example/execnet/sysinfo.py @@ -95,7 +95,7 @@ def error(*args): def getinfo(sshname, ssh_config=None, loginfo=sys.stdout): debug("connecting to", sshname) try: - gw = py.execnet.SshGateway(sshname, ssh_config=ssh_config) + gw = execnet.SshGateway(sshname, ssh_config=ssh_config) except IOError: error("could not get sshagteway", sshname) else: --- a/py/test/plugin/pytest_execnetcleanup.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -cleanup execnet gateways during test function runs. -""" -import py - -pytest_plugins = "xfail" - -def pytest_configure(config): - config.pluginmanager.register(Execnetcleanup()) - -class Execnetcleanup: - _gateways = None - def __init__(self, debug=False): - self._debug = debug - - def pyexecnet_gateway_init(self, gateway): - if self._gateways is not None: - self._gateways.append(gateway) - - def pyexecnet_gateway_exit(self, gateway): - if self._gateways is not None: - self._gateways.remove(gateway) - - def pytest_sessionstart(self, session): - self._gateways = [] - - def pytest_sessionfinish(self, session, exitstatus): - l = [] - for gw in self._gateways: - gw.exit() - l.append(gw) - #for gw in l: - # gw.join() - - def pytest_pyfunc_call(self, __multicall__, pyfuncitem): - if self._gateways is not None: - gateways = self._gateways[:] - res = __multicall__.execute() - while len(self._gateways) > len(gateways): - self._gateways[-1].exit() - return res --- a/py/execnet/rsync_remote.py +++ /dev/null @@ -1,92 +0,0 @@ -def f(): - import os, stat, shutil - try: - from hashlib import md5 - except ImportError: - from md5 import md5 - destdir, options = channel.receive() - modifiedfiles = [] - - def remove(path): - assert path.startswith(destdir) - try: - os.unlink(path) - except OSError: - # assume it's a dir - shutil.rmtree(path) - - def receive_directory_structure(path, relcomponents): - try: - st = os.lstat(path) - except OSError: - st = None - msg = channel.receive() - if isinstance(msg, list): - if st and not stat.S_ISDIR(st.st_mode): - os.unlink(path) - st = None - if not st: - os.makedirs(path) - entrynames = {} - for entryname in msg: - receive_directory_structure(os.path.join(path, entryname), - relcomponents + [entryname]) - entrynames[entryname] = True - if options.get('delete'): - for othername in os.listdir(path): - if othername not in entrynames: - otherpath = os.path.join(path, othername) - remove(otherpath) - elif msg is not None: - checksum = None - if st: - if stat.S_ISREG(st.st_mode): - msg_mtime, msg_size = msg - if msg_size != st.st_size: - pass - elif msg_mtime != st.st_mtime: - f = open(path, 'rb') - checksum = md5(f.read()).digest() - f.close() - else: - return # already fine - else: - remove(path) - channel.send(("send", (relcomponents, checksum))) - modifiedfiles.append((path, msg)) - receive_directory_structure(destdir, []) - - STRICT_CHECK = False # seems most useful this way for py.test - channel.send(("list_done", None)) - - for path, (time, size) in modifiedfiles: - data = channel.receive() - channel.send(("ack", path[len(destdir) + 1:])) - if data is not None: - if STRICT_CHECK and len(data) != size: - raise IOError('file modified during rsync: %r' % (path,)) - f = open(path, 'wb') - f.write(data) - f.close() - try: - os.utime(path, (time, time)) - except OSError: - pass - del data - channel.send(("links", None)) - - msg = channel.receive() - while msg is not 42: - # we get symlink - _type, relpath, linkpoint = msg - assert _type == "link" - path = os.path.join(destdir, relpath) - try: - remove(path) - except OSError: - pass - - os.symlink(os.path.join(destdir, linkpoint), path) - msg = channel.receive() - channel.send(("done", None)) - --- a/example/funcarg/mysetup2/conftest.py +++ b/example/funcarg/mysetup2/conftest.py @@ -20,5 +20,5 @@ class MySetup: host = self.config.option.ssh if host is None: py.test.skip("specify ssh host with --ssh") - return py.execnet.SshGateway(host) + return execnet.SshGateway(host) --- a/contrib/sysinfo.py +++ b/contrib/sysinfo.py @@ -95,7 +95,7 @@ def error(*args): def getinfo(sshname, ssh_config=None, loginfo=sys.stdout): debug("connecting to", sshname) try: - gw = py.execnet.SshGateway(sshname, ssh_config=ssh_config) + gw = execnet.SshGateway(sshname, ssh_config=ssh_config) except IOError: error("could not get sshagteway", sshname) else: --- a/doc/execnet.txt +++ b/doc/execnet.txt @@ -2,263 +2,11 @@ py.execnet: *elastic* distributed programming ============================================================================== -``execnet`` helps you to: +Since pylib 1.1 "py.execnet" is separated out of hte lib and now +available through the standalone `execnet standalone package`_. -* ad-hoc instantiate local or remote Python Processes -* send code for execution in one or many processes -* send and receive data between processes through channels +If you have usages of the "py.execnet.*" 1.0 API you can likely +rename all occurences of the string ``py.execnet.`` with the +string ``execnet.``. -One of it's unique features is that it uses a **zero-install** -technique: no manual installation steps are required on -remote places, only a basic working Python interpreter -and some input/output connection to it. - -There is a `EuroPython2009 talk`_ from July 2009 with -examples and some pictures. - -.. contents:: - :local: - :depth: 2 - -.. _`EuroPython2009 talk`: http://codespeak.net/download/py/ep2009-execnet.pdf - -Gateways: immediately spawn local or remote process -=================================================== - -In order to send code to a remote place or a subprocess -you need to instantiate a so-called Gateway object. -There are currently three Gateway classes: - -* :api:`py.execnet.PopenGateway` to open a subprocess - on the local machine. Useful for making use - of multiple processors to to contain code execution - in a separated environment. - -* :api:`py.execnet.SshGateway` to connect to - a remote ssh server and distribute execution to it. - -* :api:`py.execnet.SocketGateway` a way to connect to - a remote Socket based server. *Note* that this method - requires a manually started - :source:py/execnet/script/socketserver.py - script. You can run this "server script" without - having the py lib installed on the remote system - and you can setup it up as permanent service. - - -remote_exec: execute source code remotely -=================================================== - -All gateways offer remote code execution via this high level function:: - - def remote_exec(source): - """return channel object for communicating with the asynchronously - executing 'source' code which will have a corresponding 'channel' - object in its executing namespace.""" - -With `remote_exec` you send source code to the other -side and get both a local and a remote Channel_ object, -which you can use to have the local and remote site -communicate data in a structured way. Here is -an example for reading the PID:: - - >>> import py - >>> gw = py.execnet.PopenGateway() - >>> channel = gw.remote_exec(""" - ... import os - ... channel.send(os.getpid()) - ... """) - >>> remote_pid = channel.receive() - >>> remote_pid != py.std.os.getpid() - True - -.. _`Channel`: -.. _`channel-api`: -.. _`exchange data`: - -Channels: bidirectionally exchange data between hosts -======================================================= - -A channel object allows to send and receive data between -two asynchronously running programs. When calling -`remote_exec` you will get a channel object back and -the code fragment running on the other side will -see a channel object in its global namespace. - -Here is the interface of channel objects:: - - # - # API for sending and receiving anonymous values - # - channel.send(item): - sends the given item to the other side of the channel, - possibly blocking if the sender queue is full. - Note that items need to be marshallable (all basic - python types are). - - channel.receive(): - receives an item that was sent from the other side, - possibly blocking if there is none. - Note that exceptions from the other side will be - reraised as gateway.RemoteError exceptions containing - a textual representation of the remote traceback. - - channel.waitclose(timeout=None): - wait until this channel is closed. Note that a closed - channel may still hold items that will be received or - send. Note that exceptions from the other side will be - reraised as gateway.RemoteError exceptions containing - a textual representation of the remote traceback. - - channel.close(): - close this channel on both the local and the remote side. - A remote side blocking on receive() on this channel - will get woken up and see an EOFError exception. - - -.. _xspec: - - -XSpec: string specification for gateway type and configuration -=============================================================== - -``py.execnet`` supports a simple extensible format for -specifying and configuring Gateways for remote execution. -You can use a string specification to instantiate a new gateway, -for example a new SshGateway:: - - gateway = py.execnet.makegateway("ssh=myhost") - -Let's look at some examples for valid specifications. -Specification for an ssh connection to `wyvern`, running on python2.4 in the (newly created) 'mycache' subdirectory:: - - ssh=wyvern//python=python2.4//chdir=mycache - -Specification of a python2.5 subprocess; with a low CPU priority ("nice" level). Current dir will be the current dir of the instantiator (that's true for all 'popen' specifications unless they specify 'chdir'):: - - popen//python=2.5//nice=20 - -Specification of a Python Socket server process that listens on 192.168.1.4:8888; current dir will be the 'pyexecnet-cache' sub directory which is used a default for all remote processes:: - - socket=192.168.1.4:8888 - -More generally, a specification string has this general format:: - - key1=value1//key2=value2//key3=value3 - -If you omit a value, a boolean true value is assumed. Currently -the following key/values are supported: - -* ``popen`` for a PopenGateway -* ``ssh=host`` for a SshGateway -* ``socket=address:port`` for a SocketGateway -* ``python=executable`` for specifying Python executables -* ``chdir=path`` change remote working dir to given relative or absolute path -* ``nice=value`` decrease remote nice level if platforms supports it - - -Examples of py.execnet usage -=============================================================== - -Compare cwd() of Popen Gateways ----------------------------------------- - -A PopenGateway has the same working directory as the instantiatior:: - - >>> import py, os - >>> gw = py.execnet.PopenGateway() - >>> ch = gw.remote_exec("import os; channel.send(os.getcwd())") - >>> res = ch.receive() - >>> assert res == os.getcwd() - >>> gw.exit() - -Synchronously receive results from two sub processes ------------------------------------------------------ - -Use MultiChannels for receiving multiple results from remote code:: - - >>> import py - >>> ch1 = py.execnet.PopenGateway().remote_exec("channel.send(1)") - >>> ch2 = py.execnet.PopenGateway().remote_exec("channel.send(2)") - >>> mch = py.execnet.MultiChannel([ch1, ch2]) - >>> l = mch.receive_each() - >>> assert len(l) == 2 - >>> assert 1 in l - >>> assert 2 in l - -Asynchronously receive results from two sub processes ------------------------------------------------------ - -Use ``MultiChannel.make_receive_queue()`` for asynchronously receiving -multiple results from remote code. This standard Queue provides -``(channel, result)`` tuples which allows to determine where -a result comes from:: - - >>> import py - >>> ch1 = py.execnet.PopenGateway().remote_exec("channel.send(1)") - >>> ch2 = py.execnet.PopenGateway().remote_exec("channel.send(2)") - >>> mch = py.execnet.MultiChannel([ch1, ch2]) - >>> queue = mch.make_receive_queue() - >>> chan1, res1 = queue.get() # you may also specify a timeout - >>> chan2, res2 = queue.get() - >>> res1 + res2 - 3 - >>> assert chan1 in (ch1, ch2) - >>> assert chan2 in (ch1, ch2) - >>> assert chan1 != chan2 - -Receive file contents from remote SSH account ------------------------------------------------------ - -Here is a small program that you can use to retrieve -contents of remote files:: - - import py - # open a gateway to a fresh child process - gw = py.execnet.SshGateway('codespeak.net') - channel = gw.remote_exec(""" - for fn in channel: - f = open(fn, 'rb') - channel.send(f.read()) - f.close() - """) - - for fn in somefilelist: - channel.send(fn) - content = channel.receive() - # process content - - # later you can exit / close down the gateway - gw.exit() - - -Instantiate a socket server in a new subprocess ------------------------------------------------------ - -The following example opens a PopenGateway, i.e. a python -child process, and starts a socket server within that process -and then opens a second gateway to the freshly started -socketserver:: - - import py - - popengw = py.execnet.PopenGateway() - socketgw = py.execnet.SocketGateway.new_remote(popengw, ("127.0.0.1", 0)) - - print socketgw._rinfo() # print some info about the remote environment - - -Sending a module / checking if run through remote_exec --------------------------------------------------------------- - -You can pass a module object to ``remote_exec`` in which case -its source code will be sent. No dependencies will be transferred -so the module must be self-contained or only use modules that are -installed on the "other" side. Module code can detect if it is -running in a remote_exec situation by checking for the special -``__name__`` attribute like this:: - - if __name__ == '__channelexec__': - # ... call module functions ... - - +.. _`execnet standalone package`: http://codespeak.net/execnet From commits-noreply at bitbucket.org Mon Oct 5 01:44:01 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 4 Oct 2009 23:44:01 +0000 (UTC) Subject: [py-svn] apipkg commit f1e60132ead5: small refinements to readme Message-ID: <20091004234401.F065F9A7A1@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1254699834 -7200 # Node ID f1e60132ead5c39e79d356d82ebf574de83dd04b # Parent d280c8cbde69bfb30c2b2cc878bf256d4c96ed02 small refinements to readme --- a/readme.txt +++ b/readme.txt @@ -24,10 +24,11 @@ and exports two objects imported from di } } -The package is initialized with a dictionary as namespace -whose values may be further dictionaries. If the value -is a string it specifies an import location. On accessing -the according attribute the import will be performed:: +The package is initialized with a dictionary as namespace. +Namespace dictionaries contain ``name: value`` mappings +where the value may be another namespace dictionary or +a string specifying an import location. On accessing +the according attribute an import will be performed:: >>> import mypkg >>> mypkg.path @@ -37,9 +38,9 @@ the according attribute the import will >>> mypkg.sub.Class2 # '_mypkg.othermodule' gets imported now -Both classes are lazy loaded and no imports apart from -the root ``import mypkg`` are required. - +The ``mypkg.sub`` namespace and both its classes are +lazy loaded and no imports apart from the root +``import mypkg`` is required. Including apipkg in your package -------------------------------------- From commits-noreply at bitbucket.org Mon Oct 5 01:45:13 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 4 Oct 2009 23:45:13 +0000 (UTC) Subject: [py-svn] py-trunk commit 34ced857eecb: * use the MIT license for the py lib Message-ID: <20091004234513.7CADF9A7A1@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1254525064 -7200 # Node ID 34ced857eecbd95944541ef98f9fdaf3f457dd35 # Parent 24cf00dd82ff0ddb529b1545fbccd8d2c3e2ae95 * use the MIT license for the py lib * bump version to prospective 1.1.0b1 * strike some unused code from initpkg --- a/LICENSE +++ b/LICENSE @@ -1,165 +1,19 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. --- a/doc/faq.txt +++ b/doc/faq.txt @@ -46,46 +46,6 @@ have no counterpart in nose_. .. _nose: http://somethingaboutorange.com/mrl/projects/nose/0.11.1/ .. _features: test/features.html -.. _whygpl: - -Why did you choose a GPL-style license? ----------------------------------------- - -Older versions of the py lib and py.test (up until 1.0.x) -were licensed under the MIT license. Starting -with the 1.1 series Holger Krekel - being the main maintainer -and developer since several years - decided to go for -a GPL-style license mainly for these reasons: - -1. increase likelyness of flow-back, contributions and publicity. - -2. make use of the FSF_ efforts which produced a consistent and interoperable legal framework. - -3. Potentially get some money from dual-licensing to companies. - -Developers want to co-operate no matter what context they -are in, commercial, free, whatever. BSD-licenses sound like -a fit because they minimize the need for checking for -constraints from the company or legal department. They allow -to use and modify software for whatever purpose. - -However, developers wanting to produce free software for a living -often need to connect to a sustainable revenue system. When -releasing software for public use they seek means, some security -on getting something back: Contributions, recognition or money. -The GPL license tries to foster a universe of free software and -force proprietary players to contribute back. - -The py lib choose the Lesser GPL. It strikes a balance because it -allows the code to interact in proprietary contexts and increases -likelyness of flow backs. - -If you do have or get actual practical issues regarding -licensing please get in contact_. - -.. _fsf: http://www.fsf.org -.. _contact: contact.html - What's all this "magic" with py.test? ---------------------------------------- --- a/py/__init__.py +++ b/py/__init__.py @@ -1,4 +1,3 @@ - # -*- coding: utf-8 -*- """ advanced testing and development support library: @@ -17,14 +16,14 @@ For questions please check out http://py (c) Holger Krekel and others, 2009 """ from py.initpkg import initpkg -trunk = "trunk" +trunk = "1.1.0b1" version = trunk or "1.0.x" del trunk initpkg(__name__, - description = "py.test and pylib: advanced testing tool and networking lib", + description = "py.test and pylib: rapid testing and high-level path/code objects.", version = version, url = "http://pylib.org", license = "MIT license", @@ -33,7 +32,7 @@ initpkg(__name__, author_email = "holger at merlinux.eu, py-dev at codespeak.net", long_description = globals()['__doc__'], classifiers = [ - "Development Status :: 5 - Production/Stable", + "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", --- a/AUTHORS +++ b/AUTHORS @@ -8,3 +8,15 @@ Armin Rigo, arigo at tunes org Maciek Fijalkowski, fijal at genesilico pl Brian Dorsey, briandorsey at gmail com merlinux GmbH, Germany, office at merlinux eu + +Contributors include:: + +Chris Lamb +Harald Armin Massa +Ralf Schmitt +Martijn Faassen +Ian Bicking +Jan Balster +Grig Gheorghiu +Bob Ippolito +Christian Tismer --- a/setup.py +++ b/setup.py @@ -24,13 +24,13 @@ For questions please check out http://py (c) Holger Krekel and others, 2009 """ -trunk = 'trunk' +trunk = None def main(): setup( name='py', - description='py.test and pylib: advanced testing tool and networking lib', + description='py.test and pylib: rapid testing and high-level path/code objects.', long_description = long_description, - version= trunk or 'trunk', + version= trunk or '1.1.0b1', url='http://pylib.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], @@ -45,7 +45,7 @@ def main(): 'py.svnwcrevert = py.cmdline:pysvnwcrevert', 'py.test = py.cmdline:pytest', 'py.which = py.cmdline:pywhich']}, - classifiers=['Development Status :: 5 - Production/Stable', + classifiers=['Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.x and 'trunk' ===================================== +* use MIT license for pylib, add some contributors + * remove py.execnet code and substitute all usages with 'execnet' proper * fix issue50 - cached_setup now caches more to expectations From commits-noreply at bitbucket.org Mon Oct 5 01:45:15 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 4 Oct 2009 23:45:15 +0000 (UTC) Subject: [py-svn] py-trunk commit c09184574eca: * remove unused py._thread namespace, rewrite the one usage Message-ID: <20091004234515.C64619A7A2@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1254592668 -7200 # Node ID c09184574eca41143f883ea1f091cb3d4b54bb7b # Parent de4155a6b4e06fcbb18513bfe2f06db48c4856c5 * remove unused py._thread namespace, rewrite the one usage * remove unused py/test/web directory --- a/_py/thread/pool.py +++ /dev/null @@ -1,208 +0,0 @@ -import threading -import time -import sys -import py - -queue = py.builtin._tryimport('queue', 'Queue') - -ERRORMARKER = object() - -class Reply(object): - """ reply instances provide access to the result - of a function execution that got dispatched - through WorkerPool.dispatch() - """ - _excinfo = None - def __init__(self, task): - self.task = task - self._queue = queue.Queue() - - def _set(self, result): - self._queue.put(result) - - def _setexcinfo(self, excinfo): - self._excinfo = excinfo - self._queue.put(ERRORMARKER) - - def _get_with_timeout(self, timeout): - # taken from python2.3's Queue.get() - # we want to run on python2.2 here - delay = 0.0005 # 500 us -> initial delay of 1 ms - endtime = time.time() + timeout - while 1: - try: - return self._queue.get_nowait() - except queue.Empty: - remaining = endtime - time.time() - if remaining <= 0: #time is over and no element arrived - raise IOError("timeout waiting for task %r" %(self.task,)) - delay = min(delay * 2, remaining, .05) - time.sleep(delay) #reduce CPU usage by using a sleep - - def get(self, timeout=None): - """ get the result object from an asynchronous function execution. - if the function execution raised an exception, - then calling get() will reraise that exception - including its traceback. - """ - if self._queue is None: - raise EOFError("reply has already been delivered") - if timeout is not None: - result = self._get_with_timeout(timeout) - else: - result = self._queue.get() - if result is ERRORMARKER: - self._queue = None - excinfo = self._excinfo - py.builtin._reraise(excinfo[0], excinfo[1], excinfo[2]) - return result - -class WorkerThread(threading.Thread): - def __init__(self, pool): - threading.Thread.__init__(self) - self._queue = queue.Queue() - self._pool = pool - self.setDaemon(1) - - def _run_once(self): - reply = self._queue.get() - if reply is SystemExit: - return False - assert self not in self._pool._ready - task = reply.task - try: - func, args, kwargs = task - result = func(*args, **kwargs) - except (SystemExit, KeyboardInterrupt): - return False - except: - reply._setexcinfo(sys.exc_info()) - else: - reply._set(result) - # at this point, reply, task and all other local variables go away - return True - - def run(self): - try: - while self._run_once(): - self._pool._ready[self] = True - finally: - del self._pool._alive[self] - try: - del self._pool._ready[self] - except KeyError: - pass - - def send(self, task): - reply = Reply(task) - self._queue.put(reply) - return reply - - def stop(self): - self._queue.put(SystemExit) - -class WorkerPool(object): - """ A WorkerPool allows to dispatch function executions - to threads. Each Worker Thread is reused for multiple - function executions. The dispatching operation - takes care to create and dispatch to existing - threads. - - You need to call shutdown() to signal - the WorkerThreads to terminate and join() - in order to wait until all worker threads - have terminated. - """ - _shuttingdown = False - def __init__(self, maxthreads=None): - """ init WorkerPool instance which may - create up to `maxthreads` worker threads. - """ - self.maxthreads = maxthreads - self._ready = {} - self._alive = {} - - def dispatch(self, func, *args, **kwargs): - """ return Reply object for the asynchronous dispatch - of the given func(*args, **kwargs) in a - separate worker thread. - """ - if self._shuttingdown: - raise IOError("WorkerPool is already shutting down") - try: - thread, _ = self._ready.popitem() - except KeyError: # pop from empty list - if self.maxthreads and len(self._alive) >= self.maxthreads: - raise IOError("can't create more than %d threads." % - (self.maxthreads,)) - thread = self._newthread() - return thread.send((func, args, kwargs)) - - def _newthread(self): - thread = WorkerThread(self) - self._alive[thread] = True - thread.start() - return thread - - def shutdown(self): - """ signal all worker threads to terminate. - call join() to wait until all threads termination. - """ - if not self._shuttingdown: - self._shuttingdown = True - for t in list(self._alive): - t.stop() - - def join(self, timeout=None): - """ wait until all worker threads have terminated. """ - current = threading.currentThread() - deadline = delta = None - if timeout is not None: - deadline = time.time() + timeout - for thread in list(self._alive): - if deadline: - delta = deadline - time.time() - if delta <= 0: - raise IOError("timeout while joining threads") - thread.join(timeout=delta) - if thread.isAlive(): - raise IOError("timeout while joining threads") - -class NamedThreadPool: - def __init__(self, **kw): - self._namedthreads = {} - for name, value in kw.items(): - self.start(name, value) - - def __repr__(self): - return "" %(self._namedthreads) - - def get(self, name=None): - if name is None: - l = [] - for x in self._namedthreads.values(): - l.extend(x) - return l - else: - return self._namedthreads.get(name, []) - - def getstarted(self, name=None): - return [t for t in self.get(name) if t.isAlive()] - - def prunestopped(self, name=None): - if name is None: - for name in self.names(): - self.prunestopped(name) - else: - self._namedthreads[name] = self.getstarted(name) - - def names(self): - return self._namedthreads.keys() - - def start(self, name, func): - l = self._namedthreads.setdefault(name, []) - thread = threading.Thread(name="%s%d" % (name, len(l)), - target=func) - thread.start() - l.append(thread) - --- a/_py/thread/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- a/_py/test/web/exception.py +++ /dev/null @@ -1,6 +0,0 @@ -class CSSError(Exception): - """raised when there's a problem with the CSS""" - -class HTMLError(Exception): - """raised when there's a problem with the HTML""" - --- a/_py/thread/io.py +++ /dev/null @@ -1,83 +0,0 @@ - -try: - from _thread import get_ident -except ImportError: - from thread import get_ident - -class ThreadOut(object): - """ A file like object that diverts writing operations - to per-thread writefuncs. - This is a py lib internal class and not meant for outer use - or modification. - """ - def __new__(cls, obj, attrname): - """ Divert file output to per-thread writefuncs. - the given obj and attrname describe the destination - of the file. - """ - current = getattr(obj, attrname) - if isinstance(current, cls): - current._used += 1 - return current - self = object.__new__(cls) - self._tid2out = {} - self._used = 1 - self._oldout = getattr(obj, attrname) - self._defaultwriter = self._oldout.write - self._address = (obj, attrname) - setattr(obj, attrname, self) - return self - - def isatty(self): - # XXX - return False - - def setdefaultwriter(self, writefunc): - self._defaultwriter = writefunc - - def resetdefault(self): - self._defaultwriter = self._oldout.write - - def softspace(): - def fget(self): - return self._get()[0] - def fset(self, value): - self._get()[0] = value - return property(fget, fset, None, "software attribute") - softspace = softspace() - - def deinstall(self): - self._used -= 1 - x = self._used - if x <= 0: - obj, attrname = self._address - setattr(obj, attrname, self._oldout) - - def setwritefunc(self, writefunc, tid=None): - if tid is None: - tid = get_ident() - self._tid2out[tid] = [0, writefunc] - - def delwritefunc(self, tid=None, ignoremissing=True): - if tid is None: - tid = get_ident() - try: - del self._tid2out[tid] - except KeyError: - if not ignoremissing: - raise - - def _get(self): - tid = get_ident() - try: - return self._tid2out[tid] - except KeyError: - return getattr(self._defaultwriter, 'softspace', 0), self._defaultwriter - - def write(self, data): - softspace, out = self._get() - out(data) - - def flush(self): - pass - --- a/testing/thread/test_io.py +++ /dev/null @@ -1,72 +0,0 @@ - -import py -import sys - -WorkerPool = py._thread.WorkerPool -ThreadOut = py._thread.ThreadOut - -def test_threadout_install_deinstall(): - old = sys.stdout - out = ThreadOut(sys, 'stdout') - out.deinstall() - assert old == sys.stdout - -class TestThreadOut: - def test_threadout_one(self): - out = ThreadOut(sys, 'stdout') - try: - l = [] - out.setwritefunc(l.append) - py.builtin.print_(42,13) - x = l.pop(0) - assert x == '42' - x = l.pop(0) - assert x == ' ' - x = l.pop(0) - assert x == '13' - finally: - out.deinstall() - - def test_threadout_multi_and_default(self): - out = ThreadOut(sys, 'stdout') - try: - num = 3 - defaults = [] - def f(l): - out.setwritefunc(l.append) - sys.stdout.write(str(id(l))) - out.delwritefunc() - print(1) - out.setdefaultwriter(defaults.append) - pool = WorkerPool() - listlist = [] - for x in range(num): - l = [] - listlist.append(l) - pool.dispatch(f, l) - pool.shutdown() - for name, value in out.__dict__.items(): - sys.stderr.write("%s: %s" %(name, value)) - pool.join(2.0) - for i in range(num): - item = listlist[i] - assert item ==[str(id(item))] - assert not out._tid2out - assert defaults - expect = ['1' for x in range(num)] - defaults = [x for x in defaults if x.strip()] - assert defaults == expect - finally: - out.deinstall() - - def test_threadout_nested(self): - out1 = ThreadOut(sys, 'stdout') - try: - # we want ThreadOuts to coexist - last = sys.stdout - out = ThreadOut(sys, 'stdout') - assert last == sys.stdout - out.deinstall() - assert last == sys.stdout - finally: - out1.deinstall() --- a/_py/test/web/post_multipart.py +++ /dev/null @@ -1,58 +0,0 @@ -import httplib, mimetypes - -"""Copied from the cookbook - - see ActiveState's ASPN - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306 -""" - -def post_multipart(host, selector, fields, files): - """ - Post fields and files to an http host as multipart/form-data. - fields is a sequence of (name, value) elements for regular form fields. - files is a sequence of (name, filename, value) elements for data to be - uploaded as files - - Return the server's response page. - """ - content_type, body = encode_multipart_formdata(fields, files) - h = httplib.HTTP(host) - h.putrequest('POST', selector) - h.putheader('content-type', content_type) - h.putheader('content-length', str(len(body))) - h.endheaders() - h.send(body) - errcode, errmsg, headers = h.getreply() - return h.file.read() - -def encode_multipart_formdata(fields, files): - """ - fields is a sequence of (name, value) elements for regular form fields. - files is a sequence of (name, filename, value) elements for data to be - uploaded as files - - Return (content_type, body) ready for httplib.HTTP instance - """ - BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' - CRLF = '\r\n' - L = [] - for (key, value) in fields: - L.append('--' + BOUNDARY) - L.append('Content-Disposition: form-data; name="%s"' % key) - L.append('') - L.append(value) - for (key, filename, value) in files: - L.append('--' + BOUNDARY) - L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % - (key, filename)) - L.append('Content-Type: %s' % get_content_type(filename)) - L.append('') - L.append(value) - L.append('--' + BOUNDARY + '--') - L.append('') - body = CRLF.join(L) - content_type = 'multipart/form-data; boundary=%s' % BOUNDARY - return content_type, body - -def get_content_type(filename): - return mimetypes.guess_type(filename)[0] or 'application/octet-stream' --- a/testing/thread/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- a/_py/test/web/webcheck.py +++ /dev/null @@ -1,41 +0,0 @@ -import py -import re -from exception import * -from post_multipart import post_multipart -#import css_checker - -def check_html(string): - """check an HTML string for wellformedness and validity""" - tempdir = py.test.ensuretemp('check_html') - filename = 'temp%s.html' % (hash(string), ) - tempfile = tempdir.join(filename) - tempfile.write(string) - ret = post_multipart('validator.w3.org', '/check', [], - [('uploaded_file', 'somehtml.html', string)]) - is_valid = get_validation_result_from_w3_html(ret) - return is_valid - -reg_validation_result = re.compile( - '<(h2|td)[^>]*class="(in)?valid"[^>]*>([^<]*)<', re.M | re.S) -def get_validation_result_from_w3_html(html): - match = reg_validation_result.search(html) - valid = match.group(1) is None - text = match.group(2).strip() - if not valid: - temp = py.test.ensuretemp('/w3_results_%s.html' % hash(html), dir=0) - temp.write(html) - raise HTMLError( - "The html is not valid. See the report file at '%s'" % temp) - return valid - -#def check_css(string, basepath, htmlpath='/'): -# """check the CSS of an HTML string -# -# check whether an HTML string contains CSS rels, and if so check whether -# any classes defined in the HTML actually have a matching CSS selector -# """ -# c = css_checker.css_checker(string, basepath, htmlpath) -# # raises a CSSError when failing, this is done from the tester class to -# # allow being more verbose than just 'something went wrong' -# return c.check() - --- a/setup.py +++ b/setup.py @@ -65,9 +65,7 @@ def main(): '_py.test', '_py.test.dist', '_py.test.looponfail', - '_py.test.plugin', - '_py.test.web', - '_py.thread'], + '_py.test.plugin',], package_data={'py': ['bin/_findpy.py', 'bin/env.cmd', 'bin/env.py', @@ -86,8 +84,8 @@ def main(): 'bin/win32/py.rest.cmd', 'bin/win32/py.svnwcrevert.cmd', 'bin/win32/py.test.cmd', - 'bin/win32/py.which.cmd', - 'rest/rest.sty.template']}, + 'bin/win32/py.which.cmd',], + '_py': ['rest/rest.sty.template']}, zip_safe=True, ) --- a/testing/pytest/looponfail/test_util.py +++ b/testing/pytest/looponfail/test_util.py @@ -51,14 +51,11 @@ def test_pycremoval(tmpdir): assert not pycfile.check() -def test_waitonchange(tmpdir): +def test_waitonchange(tmpdir, monkeypatch): tmp = tmpdir sd = StatRecorder([tmp]) - wp = py._thread.WorkerPool(1) - reply = wp.dispatch(sd.waitonchange, checkinterval=0.2) - py.std.time.sleep(0.05) - tmp.ensure("newfile.py") - reply.get(timeout=0.5) - wp.shutdown() - + l = [True, False] + monkeypatch.setattr(StatRecorder, 'check', lambda self: l.pop()) + sd.waitonchange(checkinterval=0.2) + assert not l --- a/testing/thread/test_pool.py +++ /dev/null @@ -1,94 +0,0 @@ - -import py -import sys -from _py.thread.pool import queue - -WorkerPool = py._thread.WorkerPool -ThreadOut = py._thread.ThreadOut - -def test_some(): - pool = WorkerPool() - q = queue.Queue() - num = 4 - - def f(i): - q.put(i) - while q.qsize(): - py.std.time.sleep(0.01) - for i in range(num): - pool.dispatch(f, i) - for i in range(num): - q.get() - assert len(pool._alive) == 4 - pool.shutdown() - # XXX I replaced the following join() with a time.sleep(1), which seems - # to fix the test on Windows, and doesn't break it on Linux... Completely - # unsure what the idea is, though, so it would be nice if someone with some - # more understanding of what happens here would either fix this better, or - # remove this comment... - # pool.join(timeout=1.0) - py.std.time.sleep(1) - assert len(pool._alive) == 0 - assert len(pool._ready) == 0 - -def test_get(): - pool = WorkerPool() - def f(): - return 42 - reply = pool.dispatch(f) - result = reply.get() - assert result == 42 - -def test_get_timeout(): - pool = WorkerPool() - def f(): - py.std.time.sleep(0.2) - return 42 - reply = pool.dispatch(f) - py.test.raises(IOError, "reply.get(timeout=0.01)") - -def test_get_excinfo(): - pool = WorkerPool() - def f(): - raise ValueError("42") - reply = pool.dispatch(f) - excinfo = py.test.raises(ValueError, "reply.get(1.0)") - py.test.raises(EOFError, "reply.get(1.0)") - -def test_maxthreads(): - pool = WorkerPool(maxthreads=1) - def f(): - py.std.time.sleep(0.5) - try: - pool.dispatch(f) - py.test.raises(IOError, pool.dispatch, f) - finally: - pool.shutdown() - -def test_join_timeout(): - pool = WorkerPool() - q = queue.Queue() - def f(): - q.get() - reply = pool.dispatch(f) - pool.shutdown() - py.test.raises(IOError, pool.join, 0.01) - q.put(None) - reply.get(timeout=1.0) - pool.join(timeout=0.1) - -def test_pool_clean_shutdown(): - capture = py.io.StdCaptureFD() - pool = WorkerPool() - def f(): - pass - pool.dispatch(f) - pool.dispatch(f) - pool.shutdown() - pool.join(timeout=1.0) - assert not pool._alive - assert not pool._ready - out, err = capture.reset() - print(out) - sys.stderr.write(err + "\n") - assert err == '' From commits-noreply at bitbucket.org Mon Oct 5 01:45:15 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 4 Oct 2009 23:45:15 +0000 (UTC) Subject: [py-svn] py-trunk commit de4155a6b4e0: rewrote the initpkg mechanism and moved py lib implementation files to Message-ID: <20091004234515.AAAF29A7A1@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1254527259 -7200 # Node ID de4155a6b4e06fcbb18513bfe2f06db48c4856c5 # Parent 34ced857eecbd95944541ef98f9fdaf3f457dd35 rewrote the initpkg mechanism and moved py lib implementation files to _py/... with py/__init__.py containing pointers into them The new apipkg is only around 70 lines of code and allows us to get rid of the infamous "py.__." by regular non-magical "_py." imports. It is also available as a separately installable package, see http://bitbucket.org/hpk42/apipkg --- /dev/null +++ b/_py/io/terminalwriter.py @@ -0,0 +1,264 @@ +""" + +Helper functions for writing to terminals and files. + +""" + + +import sys, os +import py + +def _getdimensions(): + import termios,fcntl,struct + call = fcntl.ioctl(0,termios.TIOCGWINSZ,"\000"*8) + height,width = struct.unpack( "hhhh", call ) [:2] + return height, width + +if sys.platform == 'win32': + # ctypes access to the Windows console + + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + FOREGROUND_BLUE = 0x0001 # text color contains blue. + FOREGROUND_GREEN = 0x0002 # text color contains green. + FOREGROUND_RED = 0x0004 # text color contains red. + FOREGROUND_WHITE = 0x0007 + FOREGROUND_INTENSITY = 0x0008 # text color is intensified. + BACKGROUND_BLUE = 0x0010 # background color contains blue. + BACKGROUND_GREEN = 0x0020 # background color contains green. + BACKGROUND_RED = 0x0040 # background color contains red. + BACKGROUND_WHITE = 0x0070 + BACKGROUND_INTENSITY = 0x0080 # background color is intensified. + + def GetStdHandle(kind): + import ctypes + return ctypes.windll.kernel32.GetStdHandle(kind) + + def SetConsoleTextAttribute(handle, attr): + import ctypes + ctypes.windll.kernel32.SetConsoleTextAttribute( + handle, attr) + + def _getdimensions(): + import ctypes + from ctypes import wintypes + + SHORT = ctypes.c_short + class COORD(ctypes.Structure): + _fields_ = [('X', SHORT), + ('Y', SHORT)] + class SMALL_RECT(ctypes.Structure): + _fields_ = [('Left', SHORT), + ('Top', SHORT), + ('Right', SHORT), + ('Bottom', SHORT)] + class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): + _fields_ = [('dwSize', COORD), + ('dwCursorPosition', COORD), + ('wAttributes', wintypes.WORD), + ('srWindow', SMALL_RECT), + ('dwMaximumWindowSize', COORD)] + STD_OUTPUT_HANDLE = -11 + handle = GetStdHandle(STD_OUTPUT_HANDLE) + info = CONSOLE_SCREEN_BUFFER_INFO() + ctypes.windll.kernel32.GetConsoleScreenBufferInfo( + handle, ctypes.byref(info)) + # Substract one from the width, otherwise the cursor wraps + # and the ending \n causes an empty line to display. + return info.dwSize.Y, info.dwSize.X - 1 + +def get_terminal_width(): + try: + height, width = _getdimensions() + except (SystemExit, KeyboardInterrupt): + raise + except: + # FALLBACK + width = int(os.environ.get('COLUMNS', 80))-1 + # XXX the windows getdimensions may be bogus, let's sanify a bit + width = max(width, 40) # we alaways need 40 chars + return width + +terminal_width = get_terminal_width() + +# XXX unify with _escaped func below +def ansi_print(text, esc, file=None, newline=True, flush=False): + if file is None: + file = sys.stderr + text = text.rstrip() + if esc and not isinstance(esc, tuple): + esc = (esc,) + if esc and sys.platform != "win32" and file.isatty(): + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text + + '\x1b[0m') # ANSI color code "reset" + if newline: + text += '\n' + + if esc and sys.platform == "win32" and file.isatty(): + if 1 in esc: + bold = True + esc = tuple([x for x in esc if x != 1]) + else: + bold = False + esctable = {() : FOREGROUND_WHITE, # normal + (31,): FOREGROUND_RED, # red + (32,): FOREGROUND_GREEN, # green + (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow + (34,): FOREGROUND_BLUE, # blue + (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple + (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan + (37,): FOREGROUND_WHITE, # white + (39,): FOREGROUND_WHITE, # reset + } + attr = esctable.get(esc, FOREGROUND_WHITE) + if bold: + attr |= FOREGROUND_INTENSITY + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + if file is sys.stderr: + handle = GetStdHandle(STD_ERROR_HANDLE) + else: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + SetConsoleTextAttribute(handle, attr) + file.write(text) + SetConsoleTextAttribute(handle, FOREGROUND_WHITE) + else: + file.write(text) + + if flush: + file.flush() + +def should_do_markup(file): + return hasattr(file, 'isatty') and file.isatty() \ + and os.environ.get('TERM') != 'dumb' + +class TerminalWriter(object): + _esctable = dict(black=30, red=31, green=32, yellow=33, + blue=34, purple=35, cyan=36, white=37, + Black=40, Red=41, Green=42, Yellow=43, + Blue=44, Purple=45, Cyan=46, White=47, + bold=1, light=2, blink=5, invert=7) + + # XXX deprecate stringio argument + def __init__(self, file=None, stringio=False, encoding=None): + self.encoding = encoding + + if file is None: + if stringio: + self.stringio = file = py.io.TextIO() + else: + file = py.std.sys.stdout + elif hasattr(file, '__call__'): + file = WriteFile(file, encoding=encoding) + self._file = file + self.fullwidth = get_terminal_width() + self.hasmarkup = should_do_markup(file) + + def _escaped(self, text, esc): + if esc and self.hasmarkup: + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text +'\x1b[0m') + return text + + def markup(self, text, **kw): + esc = [] + for name in kw: + if name not in self._esctable: + raise ValueError("unknown markup: %r" %(name,)) + if kw[name]: + esc.append(self._esctable[name]) + return self._escaped(text, tuple(esc)) + + def sep(self, sepchar, title=None, fullwidth=None, **kw): + if fullwidth is None: + fullwidth = self.fullwidth + # the goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = (fullwidth - len(title) - 2) // (2*len(sepchar)) + fill = sepchar * N + line = "%s %s %s" % (fill, title, fill) + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # in some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **kw) + + def write(self, s, **kw): + if s: + s = self._getbytestring(s) + if self.hasmarkup and kw: + s = self.markup(s, **kw) + self._file.write(s) + self._file.flush() + + def _getbytestring(self, s): + # XXX review this and the whole logic + if self.encoding and sys.version_info < (3,0) and isinstance(s, unicode): + return s.encode(self.encoding) + elif not isinstance(s, str): + return str(s) + return s + + def line(self, s='', **kw): + self.write(s, **kw) + self.write('\n') + +class Win32ConsoleWriter(TerminalWriter): + def write(self, s, **kw): + if s: + s = self._getbytestring(s) + if self.hasmarkup: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + + if self.hasmarkup and kw: + attr = 0 + if kw.pop('bold', False): + attr |= FOREGROUND_INTENSITY + + if kw.pop('red', False): + attr |= FOREGROUND_RED + elif kw.pop('blue', False): + attr |= FOREGROUND_BLUE + elif kw.pop('green', False): + attr |= FOREGROUND_GREEN + else: + attr |= FOREGROUND_WHITE + + SetConsoleTextAttribute(handle, attr) + self._file.write(s) + self._file.flush() + if self.hasmarkup: + SetConsoleTextAttribute(handle, FOREGROUND_WHITE) + + def line(self, s="", **kw): + self.write(s+"\n", **kw) + +if sys.platform == 'win32': + TerminalWriter = Win32ConsoleWriter + +class WriteFile(object): + def __init__(self, writemethod, encoding=None): + self.encoding = encoding + self._writemethod = writemethod + + def write(self, data): + if self.encoding: + data = data.encode(self.encoding) + self._writemethod(data) + + def flush(self): + return + + --- /dev/null +++ b/_py/log/log.py @@ -0,0 +1,184 @@ +""" +basic logging functionality based on a producer/consumer scheme. + +XXX implement this API: (maybe put it into slogger.py?) + + log = Logger( + info=py.log.STDOUT, + debug=py.log.STDOUT, + command=None) + log.info("hello", "world") + log.command("hello", "world") + + log = Logger(info=Logger(something=...), + debug=py.log.STDOUT, + command=None) +""" +import py, sys + +class Message(object): + def __init__(self, keywords, args): + self.keywords = keywords + self.args = args + + def content(self): + return " ".join(map(str, self.args)) + + def prefix(self): + return "[%s] " % (":".join(self.keywords)) + + def __str__(self): + return self.prefix() + self.content() + + +class Producer(object): + """ (deprecated) Log producer API which sends messages to be logged + to a 'consumer' object, which then prints them to stdout, + stderr, files, etc. Used extensively by PyPy-1.1. + """ + + Message = Message # to allow later customization + keywords2consumer = {} + + def __init__(self, keywords, keywordmapper=None, **kw): + if hasattr(keywords, 'split'): + keywords = tuple(keywords.split()) + self._keywords = keywords + if keywordmapper is None: + keywordmapper = default_keywordmapper + self._keywordmapper = keywordmapper + + def __repr__(self): + return "" % ":".join(self._keywords) + + def __getattr__(self, name): + if '_' in name: + raise AttributeError(name) + producer = self.__class__(self._keywords + (name,)) + setattr(self, name, producer) + return producer + + def __call__(self, *args): + """ write a message to the appropriate consumer(s) """ + func = self._keywordmapper.getconsumer(self._keywords) + if func is not None: + func(self.Message(self._keywords, args)) + +class KeywordMapper: + def __init__(self): + self.keywords2consumer = {} + + def getstate(self): + return self.keywords2consumer.copy() + def setstate(self, state): + self.keywords2consumer.clear() + self.keywords2consumer.update(state) + + def getconsumer(self, keywords): + """ return a consumer matching the given keywords. + + tries to find the most suitable consumer by walking, starting from + the back, the list of keywords, the first consumer matching a + keyword is returned (falling back to py.log.default) + """ + for i in range(len(keywords), 0, -1): + try: + return self.keywords2consumer[keywords[:i]] + except KeyError: + continue + return self.keywords2consumer.get('default', default_consumer) + + def setconsumer(self, keywords, consumer): + """ set a consumer for a set of keywords. """ + # normalize to tuples + if isinstance(keywords, str): + keywords = tuple(filter(None, keywords.split())) + elif hasattr(keywords, '_keywords'): + keywords = keywords._keywords + elif not isinstance(keywords, tuple): + raise TypeError("key %r is not a string or tuple" % (keywords,)) + if consumer is not None and not py.builtin.callable(consumer): + if not hasattr(consumer, 'write'): + raise TypeError( + "%r should be None, callable or file-like" % (consumer,)) + consumer = File(consumer) + self.keywords2consumer[keywords] = consumer + +def default_consumer(msg): + """ the default consumer, prints the message to stdout (using 'print') """ + sys.stderr.write(str(msg)+"\n") + +default_keywordmapper = KeywordMapper() + +def setconsumer(keywords, consumer): + default_keywordmapper.setconsumer(keywords, consumer) + +def setstate(state): + default_keywordmapper.setstate(state) +def getstate(): + return default_keywordmapper.getstate() + +# +# Consumers +# + +class File(object): + """ log consumer wrapping a file(-like) object """ + def __init__(self, f): + assert hasattr(f, 'write') + #assert isinstance(f, file) or not hasattr(f, 'open') + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + self._file.write(str(msg) + "\n") + +class Path(object): + """ log consumer that opens and writes to a Path """ + def __init__(self, filename, append=False, + delayed_create=False, buffering=False): + self._append = append + self._filename = str(filename) + self._buffering = buffering + if not delayed_create: + self._openfile() + + def _openfile(self): + mode = self._append and 'a' or 'w' + f = open(self._filename, mode) + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + if not hasattr(self, "_file"): + self._openfile() + self._file.write(str(msg) + "\n") + if not self._buffering: + self._file.flush() + +def STDOUT(msg): + """ consumer that writes to sys.stdout """ + sys.stdout.write(str(msg)+"\n") + +def STDERR(msg): + """ consumer that writes to sys.stderr """ + sys.stderr.write(str(msg)+"\n") + +class Syslog: + """ consumer that writes to the syslog daemon """ + + def __init__(self, priority = None): + if priority is None: + priority = self.LOG_INFO + self.priority = priority + + def __call__(self, msg): + """ write a message to the log """ + py.std.syslog.syslog(self.priority, str(msg)) + +for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split(): + _prio = "LOG_" + _prio + try: + setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) + except AttributeError: + pass --- /dev/null +++ b/_py/code/source.py @@ -0,0 +1,347 @@ +from __future__ import generators +import sys +import inspect, tokenize +import py +cpy_compile = compile + +try: + import _ast + from _ast import PyCF_ONLY_AST as _AST_FLAG +except ImportError: + _AST_FLAG = 0 + _ast = None + + +class Source(object): + """ a immutable object holding a source code fragment, + possibly deindenting it. + """ + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get('deindent', True) + rstrip = kwargs.get('rstrip', True) + for part in parts: + if not part: + partlines = [] + if isinstance(part, Source): + partlines = part.lines + elif isinstance(part, py.builtin._basestring): + partlines = part.split('\n') + if rstrip: + while partlines: + if partlines[-1].strip(): + break + partlines.pop() + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + return self.__getslice__(key.start, key.stop) + + def __len__(self): + return len(self.lines) + + def __getslice__(self, start, end): + newsource = Source() + newsource.lines = self.lines[start:end] + return newsource + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end-1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before='', after='', indent=' ' * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [ (indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=' ' * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent+line) for line in self.lines] + return newsource + + def getstatement(self, lineno): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + # XXX there must be a better than these heuristic ways ... + # XXX there may even be better heuristics :-) + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + + # 1. find the start of the statement + from codeop import compile_command + for start in range(lineno, -1, -1): + trylines = self.lines[start:lineno+1] + # quick hack to indent the source and get it as a string in one go + trylines.insert(0, 'def xxx():') + trysource = '\n '.join(trylines) + # ^ space here + try: + compile_command(trysource) + except (SyntaxError, OverflowError, ValueError): + pass + else: + break # got a valid or incomplete statement + + # 2. find the end of the statement + for end in range(lineno+1, len(self)+1): + trysource = self[start:end] + if trysource.isparseable(): + break + + return start, end + + def getblockend(self, lineno): + # XXX + lines = [x + '\n' for x in self.lines[lineno:]] + blocklines = inspect.getblock(lines) + #print blocklines + return lineno + len(blocklines) - 1 + + def deindent(self, offset=None): + """ return a new source object deindented by offset. + If offset is None then guess an indentation offset from + the first non-blank line. Subsequent lines which have a + lower indentation offset will be copied verbatim as + they are assumed to be part of multilines. + """ + # XXX maybe use the tokenizer to properly handle multiline + # strings etc.pp? + newsource = Source() + newsource.lines[:] = deindent(self.lines, offset) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + try: + import parser + except ImportError: + syntax_checker = lambda x: compile(x, 'asd', 'exec') + else: + syntax_checker = parser.suite + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + #compile(source+'\n', "x", "exec") + syntax_checker(source+'\n') + except SyntaxError: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile(self, filename=None, mode='exec', + flag=generators.compiler_flag, + dont_inherit=0, _genframe=None): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + if not filename: + filename = '' % (fn, lineno) + else: + filename = '' % (filename, fn, lineno) + source = "\n".join(self.lines) + '\n' + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[:ex.lineno] + if ex.offset: + msglines.append(" "*ex.offset + '^') + msglines.append("syntax error probably generated here: %s" % filename) + newex = SyntaxError('\n'.join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + co_filename = MyStr(filename) + co_filename.__source__ = self + return py.code.Code(co).new(rec=1, co_filename=co_filename) + #return newcode_withfilename(co, co_filename) + +# +# public API shortcut functions +# + +def compile_(source, filename=None, mode='exec', flags= + generators.compiler_flag, dont_inherit=0): + """ compile the given source to a raw code object, + which points back to the source code through + "co_filename.__source__". All code objects + contained in the code object will recursively + also have this special subclass-of-string + filename. + """ + if _ast is not None and isinstance(source, _ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + try: + code = py.code.Code(obj) + except TypeError: + # fallback to + fn = (py.std.inspect.getsourcefile(obj) or + py.std.inspect.getfile(obj)) + fspath = fn and py.path.local(fn) or None + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + lineno = None + else: + lineno = None + else: + fspath = code.path + lineno = code.firstlineno + return fspath, lineno + +# +# helper functions +# +class MyStr(str): + """ custom string which allows to add attributes. """ + +def findsource(obj): + obj = py.code.getrawcode(obj) + try: + fullsource = obj.co_filename.__source__ + except AttributeError: + try: + sourcelines, lineno = py.std.inspect.findsource(obj) + except (KeyboardInterrupt, SystemExit): + raise + except: + return None, None + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + else: + lineno = obj.co_firstlineno - 1 + return fullsource, lineno + + +def getsource(obj, **kwargs): + obj = py.code.getrawcode(obj) + try: + fullsource = obj.co_filename.__source__ + except AttributeError: + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = "\"Buggy python version consider upgrading, cannot get source\"" + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + else: + lineno = obj.co_firstlineno - 1 + end = fullsource.getblockend(lineno) + return Source(fullsource[lineno:end+1], deident=True) + + +def deindent(lines, offset=None): + if offset is None: + for line in lines: + line = line.expandtabs() + s = line.lstrip() + if s: + offset = len(line)-len(s) + break + else: + offset = 0 + if offset == 0: + return list(lines) + newlines = [] + def readline_generator(lines): + for line in lines: + yield line + '\n' + while True: + yield '' + + r = readline_generator(lines) + try: + readline = r.next + except AttributeError: + readline = r.__next__ + + try: + for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(readline): + if sline > len(lines): + break # End of input reached + if sline > len(newlines): + line = lines[sline - 1].expandtabs() + if line.lstrip() and line[:offset].isspace(): + line = line[offset:] # Deindent + newlines.append(line) + + for i in range(sline, eline): + # Don't deindent continuing lines of + # multiline tokens (i.e. multiline strings) + newlines.append(lines[i]) + except (IndentationError, tokenize.TokenError): + pass + # Add any lines we didn't see. E.g. if an exception was raised. + newlines.extend(lines[len(newlines):]) + return newlines --- /dev/null +++ b/_py/path/svnurl.py @@ -0,0 +1,365 @@ +""" +module defining a subversion path object based on the external +command 'svn'. This modules aims to work with svn 1.3 and higher +but might also interact well with earlier versions. +""" + +import os, sys, time, re +import py +from py import path, process +from _py.path import common +from _py.path import svnwc as svncommon +from _py.path.cacheutil import BuildcostAccessCache, AgingCache + +DEBUG=False + +class SvnCommandPath(svncommon.SvnPathBase): + """ path implementation that offers access to (possibly remote) subversion + repositories. """ + + _lsrevcache = BuildcostAccessCache(maxentries=128) + _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0) + + def __new__(cls, path, rev=None, auth=None): + self = object.__new__(cls) + if isinstance(path, cls): + rev = path.rev + auth = path.auth + path = path.strpath + svncommon.checkbadchars(path) + path = path.rstrip('/') + self.strpath = path + self.rev = rev + self.auth = auth + return self + + def __repr__(self): + if self.rev == -1: + return 'svnurl(%r)' % self.strpath + else: + return 'svnurl(%r, %r)' % (self.strpath, self.rev) + + def _svnwithrev(self, cmd, *args): + """ execute an svn command, append our own url and revision """ + if self.rev is None: + return self._svnwrite(cmd, *args) + else: + args = ['-r', self.rev] + list(args) + return self._svnwrite(cmd, *args) + + def _svnwrite(self, cmd, *args): + """ execute an svn command, append our own url """ + l = ['svn %s' % cmd] + args = ['"%s"' % self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._encodedurl()) + # fixing the locale because we can't otherwise parse + string = " ".join(l) + if DEBUG: + print("execing %s" % string) + out = self._svncmdexecauth(string) + return out + + def _svncmdexecauth(self, cmd): + """ execute an svn command 'as is' """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._cmdexec(cmd) + + def _cmdexec(self, cmd): + try: + out = process.cmdexec(cmd) + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if (e.err.find('File Exists') != -1 or + e.err.find('File already exists') != -1): + raise py.error.EEXIST(self) + raise + return out + + def _svnpopenauth(self, cmd): + """ execute an svn command, return a pipe for reading stdin """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._popen(cmd) + + def _popen(self, cmd): + return os.popen(cmd) + + def _encodedurl(self): + return self._escape(self.strpath) + + def _norev_delentry(self, path): + auth = self.auth and self.auth.makecmdoptions() or None + self._lsnorevcache.delentry((str(path), auth)) + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + if mode not in ("r", "rU",): + raise ValueError("mode %r not supported" % (mode,)) + assert self.check(file=1) # svn cat returns an empty file otherwise + if self.rev is None: + return self._svnpopenauth('svn cat "%s"' % ( + self._escape(self.strpath), )) + else: + return self._svnpopenauth('svn cat -r %s "%s"' % ( + self.rev, self._escape(self.strpath))) + + def dirpath(self, *args, **kwargs): + """ return the directory path of the current path joined + with any given path arguments. + """ + l = self.strpath.split(self.sep) + if len(l) < 4: + raise py.error.EINVAL(self, "base is not valid") + elif len(l) == 4: + return self.join(*args, **kwargs) + else: + return self.new(basename='').join(*args, **kwargs) + + # modifying methods (cache must be invalidated) + def mkdir(self, *args, **kwargs): + """ create & return the directory joined with args. + pass a 'msg' keyword argument to set the commit message. + """ + commit_msg = kwargs.get('msg', "mkdir by py lib invocation") + createpath = self.join(*args) + createpath._svnwrite('mkdir', '-m', commit_msg) + self._norev_delentry(createpath.dirpath()) + return createpath + + def copy(self, target, msg='copied by py lib invocation'): + """ copy path to target with checkin message msg.""" + if getattr(target, 'rev', None) is not None: + raise py.error.EINVAL(target, "revisions are immutable") + self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg, + self._escape(self), self._escape(target))) + self._norev_delentry(target.dirpath()) + + def rename(self, target, msg="renamed by py lib invocation"): + """ rename this path to target with checkin message msg. """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %( + msg, self._escape(self), self._escape(target))) + self._norev_delentry(self.dirpath()) + self._norev_delentry(self) + + def remove(self, rec=1, msg='removed by py lib invocation'): + """ remove a file or directory (or a directory tree if rec=1) with +checkin message msg.""" + if self.rev is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self))) + self._norev_delentry(self.dirpath()) + + def export(self, topath): + """ export to a local path + + topath should not exist prior to calling this, returns a + py.path.local instance + """ + topath = py.path.local(topath) + args = ['"%s"' % (self._escape(self),), + '"%s"' % (self._escape(topath),)] + if self.rev is not None: + args = ['-r', str(self.rev)] + args + self._svncmdexecauth('svn export %s' % (' '.join(args),)) + return topath + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). If you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + target = self.join(*args) + dir = kwargs.get('dir', 0) + for x in target.parts(reverse=True): + if x.check(): + break + else: + raise py.error.ENOENT(target, "has not any valid base!") + if x == target: + if not x.check(dir=dir): + raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) + return x + tocreate = target.relto(x) + basename = tocreate.split(self.sep, 1)[0] + tempdir = py.path.local.mkdtemp() + try: + tempdir.ensure(tocreate, dir=dir) + cmd = 'svn import -m "%s" "%s" "%s"' % ( + "ensure %s" % self._escape(tocreate), + self._escape(tempdir.join(basename)), + x.join(basename)._encodedurl()) + self._svncmdexecauth(cmd) + self._norev_delentry(x) + finally: + tempdir.remove() + return target + + # end of modifying methods + def _propget(self, name): + res = self._svnwithrev('propget', name) + return res[:-1] # strip trailing newline + + def _proplist(self): + res = self._svnwithrev('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return svncommon.PropListDict(self, lines) + + def _listdir_nameinfo(self): + """ return sequence of name-info directory entries of self """ + def builder(): + try: + res = self._svnwithrev('ls', '-v') + except process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('non-existent in that revision') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('File not found') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('not part of a repository')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('Unable to open')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.lower().find('method not allowed')!=-1: + raise py.error.EACCES(self, e.err) + raise py.error.Error(e.err) + lines = res.split('\n') + nameinfo_seq = [] + for lsline in lines: + if lsline: + info = InfoSvnCommand(lsline) + if info._name != '.': # svn 1.5 produces '.' dirs, + nameinfo_seq.append((info._name, info)) + nameinfo_seq.sort() + return nameinfo_seq + auth = self.auth and self.auth.makecmdoptions() or None + if self.rev is not None: + return self._lsrevcache.getorbuild((self.strpath, self.rev, auth), + builder) + else: + return self._lsnorevcache.getorbuild((self.strpath, auth), + builder) + + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + nameinfo_seq = self._listdir_nameinfo() + if len(nameinfo_seq) == 1: + name, info = nameinfo_seq[0] + if name == self.basename and info.kind == 'file': + #if not self.check(dir=1): + raise py.error.ENOTDIR(self) + paths = [self.join(name) for (name, info) in nameinfo_seq] + if fil: + paths = [x for x in paths if fil(x)] + self._sortlist(paths, sort) + return paths + + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() #make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' % + (rev_opt, verbose_opt, self.strpath)) + from xml.dom import minidom + tree = minidom.parse(xmlpipe) + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(svncommon.LogEntry(logentry)) + return result + +#01234567890123456789012345678901234567890123467 +# 2256 hpk 165 Nov 24 17:55 __init__.py +# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!! +# 1312 johnny 1627 May 05 14:32 test_decorators.py +# +class InfoSvnCommand: + # the '0?' part in the middle is an indication of whether the resource is + # locked, see 'svn help ls' + lspattern = re.compile( + r'^ *(?P\d+) +(?P.+?) +(0? *(?P\d+))? ' + '*(?P\w+ +\d{2} +[\d:]+) +(?P.*)$') + def __init__(self, line): + # this is a typical line from 'svn ls http://...' + #_ 1127 jum 0 Jul 13 15:28 branch/ + match = self.lspattern.match(line) + data = match.groupdict() + self._name = data['file'] + if self._name[-1] == '/': + self._name = self._name[:-1] + self.kind = 'dir' + else: + self.kind = 'file' + #self.has_props = l.pop(0) == 'P' + self.created_rev = int(data['rev']) + self.last_author = data['author'] + self.size = data['size'] and int(data['size']) or 0 + self.mtime = parse_time_with_missing_year(data['date']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + +#____________________________________________________ +# +# helper functions +#____________________________________________________ +def parse_time_with_missing_year(timestr): + """ analyze the time part from a single line of "svn ls -v" + the svn output doesn't show the year makes the 'timestr' + ambigous. + """ + import calendar + t_now = time.gmtime() + + tparts = timestr.split() + month = time.strptime(tparts.pop(0), '%b')[1] + day = time.strptime(tparts.pop(0), '%d')[2] + last = tparts.pop(0) # year or hour:minute + try: + year = time.strptime(last, '%Y')[0] + hour = minute = 0 + except ValueError: + hour, minute = time.strptime(last, '%H:%M')[3:5] + year = t_now[0] + + t_result = (year, month, day, hour, minute, 0,0,0,0) + if t_result > t_now: + year -= 1 + t_result = (year, month, day, hour, minute, 0,0,0,0) + return calendar.timegm(t_result) + +class PathEntry: + def __init__(self, ppart): + self.strpath = ppart.firstChild.nodeValue.encode('UTF-8') + self.action = ppart.getAttribute('action').encode('UTF-8') + if self.action == 'A': + self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8') + if self.copyfrom_path: + self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev')) + --- /dev/null +++ b/_py/cmdline/pyrest.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +""" +invoke + + py.rest filename1.txt directory + +to generate html files from ReST. + +It is also possible to generate pdf files using the --topdf option. + +http://docutils.sourceforge.net/docs/user/rst/quickref.html + +""" + +import os, sys +import py + +if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): + def log(msg): + print(msg) +else: + def log(msg): + pass + + +parser = py.std.optparse.OptionParser(usage=__doc__) +parser.add_option("--topdf", action="store_true", dest="topdf", default=False, + help="generate pdf files") +parser.add_option("--stylesheet", dest="stylesheet", default=None, + help="use specified latex style sheet") +parser.add_option("--debug", action="store_true", dest="debug", + default=False, + help="print debug output and don't delete files") + + +def main(): + try: + from _py.rest import directive, resthtml + from _py.rest.latex import process_rest_file, process_configfile + except ImportError: + e = sys.exc_info()[1] + print(str(e)) + sys.exit(1) + + (options, args) = parser.parse_args() + + if len(args) == 0: + filenames = [py.path.svnwc()] + else: + filenames = [py.path.svnwc(x) for x in args] + + if options.topdf: + directive.set_backend_and_register_directives("latex") + + for p in filenames: + if not p.check(): + log("path %s not found, ignoring" % p) + continue + def fil(p): + return p.check(fnmatch='*.txt', versioned=True) + def rec(p): + return p.check(dotfile=0) + if p.check(dir=1): + for x in p.visit(fil, rec): + resthtml.process(x) + elif p.check(file=1): + if p.ext == ".rst2pdfconfig": + directive.set_backend_and_register_directives("latex") + process_configfile(p, options.debug) + else: + if options.topdf: + cfg = p.new(ext=".rst2pdfconfig") + if cfg.check(): + print("using config file %s" % (cfg, )) + process_configfile(cfg, options.debug) + else: + process_rest_file(p.localpath, + options.stylesheet, + options.debug) + else: + resthtml.process(p) + --- /dev/null +++ b/_py/error.py @@ -0,0 +1,83 @@ +""" +create errno-specific classes for IO or os calls. + +""" +import sys, os, errno + +class Error(EnvironmentError): + def __repr__(self): + return "%s.%s %r: %s " %(self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + #repr(self.args) + ) + + def __str__(self): + s = "[%s]: %s" %(self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 22: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + +class ErrorMaker(object): + """ lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + Error = Error + _errno2class = {} + + def __getattr__(self, name): + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno): + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,)) + errorcls = type(Error)(clsname, (Error,), + {'__module__':'py.error', + '__doc__': os.strerror(eno)}) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call(self, func, *args): + """ call a function and raise an errno-exception if applicable. """ + __tracebackhide__ = True + try: + return func(*args) + except self.Error: + raise + except EnvironmentError: + cls, value, tb = sys.exc_info() + if not hasattr(value, 'errno'): + raise + __tracebackhide__ = False + errno = value.errno + try: + if not isinstance(value, WindowsError): + raise NameError + except NameError: + # we are not on Windows, or we got a proper OSError + cls = self._geterrnoclass(errno) + else: + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value + raise cls("%s%r" % (func.__name__, args)) + __tracebackhide__ = True + +error = ErrorMaker() --- /dev/null +++ b/_py/path/gateway/remotepath.py @@ -0,0 +1,47 @@ +import py, itertools +from _py.path import common + +COUNTER = itertools.count() + +class RemotePath(common.PathBase): + sep = '/' + + def __init__(self, channel, id, basename=None): + self._channel = channel + self._id = id + self._basename = basename + self._specs = {} + + def __del__(self): + self._channel.send(('DEL', self._id)) + + def __repr__(self): + return 'RemotePath(%s)' % self.basename + + def listdir(self, *args): + self._channel.send(('LIST', self._id) + args) + return [RemotePath(self._channel, id, basename) + for (id, basename) in self._channel.receive()] + + def dirpath(self): + id = ~COUNTER.next() + self._channel.send(('DIRPATH', self._id, id)) + return RemotePath(self._channel, id) + + def join(self, *args): + id = ~COUNTER.next() + self._channel.send(('JOIN', self._id, id) + args) + return RemotePath(self._channel, id) + + def _getbyspec(self, spec): + parts = spec.split(',') + ask = [x for x in parts if x not in self._specs] + if ask: + self._channel.send(('GET', self._id, ",".join(ask))) + for part, value in zip(ask, self._channel.receive()): + self._specs[part] = value + return [self._specs[x] for x in parts] + + def read(self): + self._channel.send(('READ', self._id)) + return self._channel.receive() --- /dev/null +++ b/_py/builtin/builtin25.py @@ -0,0 +1,15 @@ + +try: + BaseException = BaseException +except NameError: + BaseException = Exception + +try: + GeneratorExit = GeneratorExit +except NameError: + class GeneratorExit(Exception): + """ This exception is never raised, it is there to make it possible to + write code compatible with CPython 2.5 even in lower CPython + versions.""" + pass + GeneratorExit.__module__ = 'exceptions' --- /dev/null +++ b/_py/process/forkedfunc.py @@ -0,0 +1,108 @@ + +""" + ForkedFunc provides a way to run a function in a forked process + and get at its return value, stdout and stderr output as well + as signals and exitstatusus. + + XXX see if tempdir handling is sane +""" + +import py +import os +import sys +import marshal + +class ForkedFunc(object): + EXITSTATUS_EXCEPTION = 3 + def __init__(self, fun, args=None, kwargs=None, nice_level=0): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + self.fun = fun + self.args = args + self.kwargs = kwargs + self.tempdir = tempdir = py.path.local.mkdtemp() + self.RETVAL = tempdir.ensure('retval') + self.STDOUT = tempdir.ensure('stdout') + self.STDERR = tempdir.ensure('stderr') + + pid = os.fork() + if pid: # in parent process + self.pid = pid + else: # in child process + self._child(nice_level) + + def _child(self, nice_level): + # right now we need to call a function, but first we need to + # map all IO that might happen + # make sure sys.stdout points to file descriptor one + sys.stdout = stdout = self.STDOUT.open('w') + sys.stdout.flush() + fdstdout = stdout.fileno() + if fdstdout != 1: + os.dup2(fdstdout, 1) + sys.stderr = stderr = self.STDERR.open('w') + fdstderr = stderr.fileno() + if fdstderr != 2: + os.dup2(fdstderr, 2) + retvalf = self.RETVAL.open("wb") + EXITSTATUS = 0 + try: + if nice_level: + os.nice(nice_level) + try: + retval = self.fun(*self.args, **self.kwargs) + retvalf.write(marshal.dumps(retval)) + except: + excinfo = py.code.ExceptionInfo() + stderr.write(excinfo.exconly()) + EXITSTATUS = self.EXITSTATUS_EXCEPTION + finally: + stdout.close() + stderr.close() + retvalf.close() + os.close(1) + os.close(2) + os._exit(EXITSTATUS) + + def waitfinish(self, waiter=os.waitpid): + pid, systemstatus = waiter(self.pid, 0) + if systemstatus: + if os.WIFSIGNALED(systemstatus): + exitstatus = os.WTERMSIG(systemstatus) + 128 + else: + exitstatus = os.WEXITSTATUS(systemstatus) + #raise ExecutionFailed(status, systemstatus, cmd, + # ''.join(out), ''.join(err)) + else: + exitstatus = 0 + signal = systemstatus & 0x7f + if not exitstatus and not signal: + retval = self.RETVAL.open('rb') + try: + retval_data = retval.read() + finally: + retval.close() + retval = marshal.loads(retval_data) + else: + retval = None + stdout = self.STDOUT.read() + stderr = self.STDERR.read() + self._removetemp() + return Result(exitstatus, signal, retval, stdout, stderr) + + def _removetemp(self): + if self.tempdir.check(): + self.tempdir.remove() + + def __del__(self): + self._removetemp() + +class Result(object): + def __init__(self, exitstatus, signal, retval, stdout, stderr): + self.exitstatus = exitstatus + self.signal = signal + self.retval = retval + self.out = stdout + self.err = stderr --- /dev/null +++ b/_py/cmdline/pycleanup.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +"""\ +py.cleanup [PATH] + +Delete pyc file recursively, starting from PATH (which defaults to the current +working directory). Don't follow links and don't recurse into directories with +a ".". +""" +import py + +def main(): + parser = py.std.optparse.OptionParser(usage=__doc__) + parser.add_option("-e", "--remove", dest="ext", default=".pyc", action="store", + help="remove files with the given comma-separated list of extensions" + ) + parser.add_option("-n", "--dryrun", dest="dryrun", default=False, + action="store_true", + help="display would-be-removed filenames" + ) + (options, args) = parser.parse_args() + if not args: + args = ["."] + ext = options.ext.split(",") + def shouldremove(p): + return p.ext in ext + + for arg in args: + path = py.path.local(arg) + py.builtin.print_("cleaning path", path, "of extensions", ext) + for x in path.visit(shouldremove, lambda x: x.check(dotfile=0, link=0)): + if options.dryrun: + py.builtin.print_("would remove", x) + else: + py.builtin.print_("removing", x) + x.remove() --- /dev/null +++ b/_py/compat/dep_subprocess.py @@ -0,0 +1,4 @@ + +import py +py.log._apiwarn("1.1", "py.compat.subprocess deprecated, use standard library version.", stacklevel="initpkg") +subprocess = py.std.subprocess --- /dev/null +++ b/_py/cmdline/pywhich.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +"""\ +py.which [name] + +print the location of the given python module or package name +""" + +import sys + +def main(): + name = sys.argv[1] + try: + mod = __import__(name) + except ImportError: + sys.stderr.write("could not import: " + name + "\n") + else: + try: + location = mod.__file__ + except AttributeError: + sys.stderr.write("module (has no __file__): " + str(mod)) + else: + print(location) --- /dev/null +++ b/_py/path/local.py @@ -0,0 +1,789 @@ +""" +local path implementation. +""" +import sys, os, stat, re, atexit +import py +from _py.path import common + +iswin32 = sys.platform == "win32" + +class Stat(object): + def __getattr__(self, name): + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + entry = py.error.checked_call(pwd.getpwuid, self.uid) + return entry[0] + owner = property(owner, None, None, "owner of path") + + def group(self): + """ return group name of file. """ + if iswin32: + raise NotImplementedError("XXX win32") + import grp + entry = py.error.checked_call(grp.getgrgid, self.gid) + return entry[0] + group = property(group) + +class PosixPath(common.PathBase): + def chown(self, user, group, rec=0): + """ change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + py.error.checked_call(os.chown, str(x), uid, gid) + py.error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self): + """ return value of a symbolic link. """ + return py.error.checked_call(os.readlink, self.strpath) + + def mklinkto(self, oldname): + """ posix style hard link to another name. """ + py.error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """ create a symbolic link with the given value (pointing to another name). """ + if absolute: + py.error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(('..', )*n + (relsource, )) + py.error.checked_call(os.symlink, target, self.strpath) + + def samefile(self, other): + """ return True if other refers to the same stat object as self. """ + return py.std.os.path.samefile(str(self), str(other)) + +def getuserid(user): + import pwd + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] + return user + +def getgroupid(group): + import grp + if not isinstance(group, int): + group = grp.getgrnam(group)[2] + return group + +FSBase = not iswin32 and PosixPath or common.PathBase + +class LocalPath(FSBase): + """ object oriented interface to os.path and other local filesystem + related information. + """ + sep = os.sep + class Checkers(common.Checkers): + def _stat(self): + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except py.error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return stat.S_ISDIR(self._stat().mode) + + def file(self): + return stat.S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return stat.S_ISLNK(st.mode) + + def __new__(cls, path=None): + """ Initialize and return a local Path instance. + + Path can be relative to the current directory. + If it is None then the current working directory is taken. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if isinstance(path, common.PathBase): + if path.__class__ == cls: + return path + path = path.strpath + # initialize the path + self = object.__new__(cls) + if not path: + self.strpath = os.getcwd() + elif isinstance(path, py.builtin._basestring): + self.strpath = os.path.abspath(os.path.normpath(str(path))) + else: + raise ValueError("can only pass None, Path instances " + "or non-empty strings to LocalPath") + assert isinstance(self.strpath, str) + return self + + def __hash__(self): + return hash(self.strpath) + + def __eq__(self, other): + s1 = str(self) + s2 = str(other) + if iswin32: + s1 = s1.lower() + s2 = s2.lower() + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return str(self) < str(other) + + def remove(self, rec=1): + """ remove a file or directory (or a directory tree if rec=1). """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(448, rec=1) # octcal 0700 + py.error.checked_call(py.std.shutil.rmtree, self.strpath) + else: + py.error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(448) # octcal 0700 + py.error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """ return hexdigest of hashvalue for this file. """ + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError("Don't know how to compute %r hash" %(hashtype,)) + f = self.open('rb') + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """ create a modified version of this path. + the following keyword arguments modify various path parts: + + a:/some/path/to/a/file.ext + || drive + |-------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + obj = object.__new__(self.__class__) + drive, dirname, basename, purebasename,ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + try: + ext = kw['ext'] + except KeyError: + pass + else: + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + kw.setdefault('drive', drive) + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + obj.strpath = os.path.normpath( + "%(drive)s%(dirname)s%(sep)s%(basename)s" % kw) + return obj + + def _getbyspec(self, spec): + """ return a sequence of specified path parts. 'spec' is + a comma separated string containing path part names. + according to the following convention: + a:/some/path/to/a/file.ext + || drive + |-------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(',') ) + append = res.append + for name in args: + if name == 'drive': + append(parts[0]) + elif name == 'dirname': + append(self.sep.join(['']+parts[1:-1])) + else: + basename = parts[-1] + if name == 'basename': + append(basename) + else: + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + append(purebasename) + elif name == 'ext': + append(ext) + else: + raise ValueError("invalid part specification %r" % name) + return res + + def join(self, *args, **kwargs): + """ return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + if not args: + return self + strpath = self.strpath + sep = self.sep + strargs = [str(x) for x in args] + if kwargs.get('abs', 0): + for i in range(len(strargs)-1, -1, -1): + if os.path.isabs(strargs[i]): + strpath = strargs[i] + strargs = strargs[i+1:] + break + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip('/') + arg = arg.replace('/', sep) + if arg: + if not strpath.endswith(sep): + strpath += sep + strpath += arg + obj = self.new() + obj.strpath = os.path.normpath(strpath) + return obj + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + return py.error.checked_call(open, self.strpath, mode) + + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + res = [] + for name in py.error.checked_call(os.listdir, self.strpath): + childurl = self.join(name) + if fil is None or fil(childurl): + res.append(childurl) + self._sortlist(res, sort) + return res + + def size(self): + """ return size of the underlying file object """ + return self.stat().size + + def mtime(self): + """ return last modification time of the path. """ + return self.stat().mtime + + def copy(self, target, archive=False): + """ copy path to target.""" + assert not archive, "XXX archive-mode not supported" + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self!=target + copychunked(self, target) + else: + def rec(p): + return p.check(link=0) + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + + def rename(self, target): + """ rename this path to target. """ + return py.error.checked_call(os.rename, str(self), str(target)) + + def dump(self, obj, bin=1): + """ pickle object into path location""" + f = self.open('wb') + try: + py.error.checked_call(py.std.pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + p = self.join(*args) + py.error.checked_call(os.mkdir, str(p)) + return p + + def write(self, data, mode='w'): + """ write data into path. """ + if 'b' in mode: + if not py.builtin._isbytes(data): + raise ValueError("can only process bytes") + else: + if not py.builtin._istext(data): + if not py.builtin._isbytes(data): + data = str(data) + else: + data = py.builtin._totext(data, sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except py.error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get('dir', 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open('w').close() + return p + + def stat(self): + """ Return an os.stat() tuple. """ + return Stat(self, py.error.checked_call(os.stat, self.strpath)) + + def lstat(self): + """ Return an os.lstat() tuple. """ + return Stat(self, py.error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """ set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return py.error.checked_call(os.utime, self.strpath, mtime) + try: + return py.error.checked_call(os.utime, self.strpath, (-1, mtime)) + except py.error.EINVAL: + return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """ change directory to self and return old current directory """ + old = self.__class__() + py.error.checked_call(os.chdir, self.strpath) + return old + + def realpath(self): + """ return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """ return last access time of the path. """ + return self.stat().atime + + def __repr__(self): + return 'local(%r)' % self.strpath + + def __str__(self): + """ return string representation of the Path. """ + return self.strpath + + def pypkgpath(self, pkgname=None): + """ return the path's package path by looking for the given + pkgname. If pkgname is None then look for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath can not be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if pkgname is None: + if parent.check(file=1): + continue + if parent.join('__init__.py').check(): + pkgpath = parent + continue + return pkgpath + else: + if parent.basename == pkgname: + return parent + return pkgpath + + def _prependsyspath(self, path): + s = str(path) + if s != sys.path[0]: + #print "prepending to sys.path", s + sys.path.insert(0, s) + + def chmod(self, mode, rec=0): + """ change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError("mode %r must be an integer" % (mode,)) + if rec: + for x in self.visit(rec=rec): + py.error.checked_call(os.chmod, str(x), mode) + py.error.checked_call(os.chmod, str(self), mode) + + def pyimport(self, modname=None, ensuresyspath=True): + """ return path as an imported python module. + if modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + """ + if not self.check(): + raise py.error.ENOENT(self) + #print "trying to import", self + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + if ensuresyspath: + self._prependsyspath(pkgpath.dirpath()) + pkg = __import__(pkgpath.basename, None, None, []) + names = self.new(ext='').relto(pkgpath.dirpath()) + names = names.split(self.sep) + modname = ".".join(names) + else: + # no package scope, still make it possible + if ensuresyspath: + self._prependsyspath(self.dirpath()) + modname = self.purebasename + mod = __import__(modname, None, None, ['__doc__']) + #self._module = mod + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + mod = py.std.types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + py.builtin.execfile(str(self), mod.__dict__) + except: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv): + """ return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import Popen, PIPE + argv = map(str, argv) + proc = Popen([str(self)] + list(argv), stdout=PIPE, stderr=PIPE) + stdout, stderr = proc.communicate() + ret = proc.wait() + if py.builtin._isbytes(stdout): + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + if ret != 0: + if py.builtin._isbytes(stderr): + stderr = py.builtin._totext(stderr, sys.getdefaultencoding()) + raise py.process.cmdexec.Error(ret, ret, str(self), + stdout, stderr,) + return stdout + + def sysfind(cls, name, checker=None): + """ return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if os.path.isabs(name): + p = py.path.local(name) + if p.check(file=1): + return p + else: + if iswin32: + paths = py.std.os.environ['Path'].split(';') + if '' not in paths and '.' not in paths: + paths.append('.') + try: + systemroot = os.environ['SYSTEMROOT'] + except KeyError: + pass + else: + paths = [re.sub('%SystemRoot%', systemroot, path) + for path in paths] + tryadd = '', '.exe', '.com', '.bat' # XXX add more? + else: + paths = py.std.os.environ['PATH'].split(':') + tryadd = ('',) + + for x in paths: + for addext in tryadd: + p = py.path.local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except py.error.EACCES: + pass + return None + sysfind = classmethod(sysfind) + + def _gethomedir(cls): + try: + x = os.environ['HOME'] + except KeyError: + x = os.environ['HOMEPATH'] + return cls(x) + _gethomedir = classmethod(_gethomedir) + + #""" + #special class constructors for local filesystem paths + #""" + def get_temproot(cls): + """ return the system's temporary directory + (where tempfiles are usually created in) + """ + return py.path.local(py.std.tempfile.gettempdir()) + get_temproot = classmethod(get_temproot) + + def mkdtemp(cls): + """ return a Path object pointing to a fresh new temporary directory + (which we created ourself). + """ + import tempfile + tries = 10 + for i in range(tries): + dname = tempfile.mktemp() + dpath = cls(tempfile.mktemp()) + try: + dpath.mkdir() + except (py.error.EEXIST, py.error.EPERM, py.error.EACCES): + continue + return dpath + raise py.error.ENOENT(dpath, "could not create tempdir, %d tries" % tries) + mkdtemp = classmethod(mkdtemp) + + def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, + lock_timeout = 172800): # two days + """ return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + def parse_num(path): + """ parse the number out of a path (if it matches the prefix) """ + bn = path.basename + if bn.startswith(prefix): + try: + return int(bn[len(prefix):]) + except ValueError: + pass + + # compute the maximum number currently in use with the + # prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum+1)) + except py.error.EEXIST: + # race condition: another thread/process created the dir + # in the meantime. Try counting again + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + # put a .lock file in the new directory that will be removed at + # process exit + if lock_timeout: + lockfile = udir.join('.lock') + mypid = os.getpid() + if hasattr(lockfile, 'mksymlinkto'): + lockfile.mksymlinkto(str(mypid)) + else: + lockfile.write(str(mypid)) + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except py.error.Error: + pass + atexit.register(try_remove_lockfile) + + # prune old directories + if keep: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + lf = path.join('.lock') + try: + t1 = lf.lstat().mtime + t2 = lockfile.lstat().mtime + if not lock_timeout or abs(t2-t1) < lock_timeout: + continue # skip directories still locked + except py.error.Error: + pass # assume that it means that there is no 'lf' + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except: # this might be py.error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = 'current' + + src = str(udir) + dest = src[:src.rfind('-')] + '-' + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError): # AttributeError on win32 + pass + + return udir + make_numbered_dir = classmethod(make_numbered_dir) + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open('rb') + try: + fdest = dest.open('wb') + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + +def autopath(globs=None): + """ (deprecated) return the (local) path of the "current" file pointed to by globals or - if it is none - alternatively the callers frame globals. + + the path will always point to a .py file or to None. + the path will have the following payload: + pkgdir is the last parent directory path containing __init__.py + """ + py.log._apiwarn("1.1", "py.magic.autopath deprecated, " + "use py.path.local(__file__) and maybe pypkgpath/pyimport().") + if globs is None: + globs = sys._getframe(1).f_globals + try: + __file__ = globs['__file__'] + except KeyError: + if not sys.argv[0]: + raise ValueError("cannot compute autopath in interactive mode") + __file__ = os.path.abspath(sys.argv[0]) + + ret = py.path.local(__file__) + if ret.ext in ('.pyc', '.pyo'): + ret = ret.new(ext='.py') + current = pkgdir = ret.dirpath() + while 1: + if current.join('__init__.py').check(): + pkgdir = current + current = current.dirpath() + if pkgdir != current: + continue + elif str(current) not in sys.path: + sys.path.insert(0, str(current)) + break + ret.pkgdir = pkgdir + return ret + --- /dev/null +++ b/_py/compat/__init__.py @@ -0,0 +1,2 @@ +""" compatibility modules (taken from 2.4.4) """ + --- /dev/null +++ b/_py/cmdline/__init__.py @@ -0,0 +1,1 @@ +# --- /dev/null +++ b/_py/cmdline/pysvnwcrevert.py @@ -0,0 +1,55 @@ +#! /usr/bin/env python +"""\ +py.svnwcrevert [options] WCPATH + +Running this script and then 'svn up' puts the working copy WCPATH in a state +as clean as a fresh check-out. + +WARNING: you'll loose all local changes, obviously! + +This script deletes all files that have been modified +or that svn doesn't explicitly know about, including svn:ignored files +(like .pyc files, hint hint). + +The goal of this script is to leave the working copy with some files and +directories possibly missing, but - most importantly - in a state where +the following 'svn up' won't just crash. +""" + +import sys, py + +def kill(p, root): + print('< %s' % (p.relto(root),)) + p.remove(rec=1) + +def svnwcrevert(path, root=None, precious=[]): + if root is None: + root = path + wcpath = py.path.svnwc(path) + try: + st = wcpath.status() + except ValueError: # typically, "bad char in wcpath" + kill(path, root) + return + for p in path.listdir(): + if p.basename == '.svn' or p.basename in precious: + continue + wcp = py.path.svnwc(p) + if wcp not in st.unchanged and wcp not in st.external: + kill(p, root) + elif p.check(dir=1): + svnwcrevert(p, root) + +# XXX add a functional test + +parser = py.std.optparse.OptionParser(usage=__doc__) +parser.add_option("-p", "--precious", + action="append", dest="precious", default=[], + help="preserve files with this name") + +def main(): + opts, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(2) + svnwcrevert(py.path.local(args[0]), precious=opts.precious) --- /dev/null +++ b/_py/compat/dep_doctest.py @@ -0,0 +1,4 @@ +import py + +py.log._apiwarn("1.1", "py.compat.doctest deprecated, use standard library version.", stacklevel="initpkg") +doctest = py.std.doctest --- /dev/null +++ b/_py/path/gateway/channeltest2.py @@ -0,0 +1,21 @@ +import py +from remotepath import RemotePath + + +SRC = open('channeltest.py', 'r').read() + +SRC += ''' +import py +srv = PathServer(channel.receive()) +channel.send(srv.p2c(py.path.local("/tmp"))) +''' + + +#gw = execnet.SshGateway('codespeak.net') +gw = execnet.PopenGateway() +gw.remote_init_threads(5) +c = gw.remote_exec(SRC, stdout=py.std.sys.stdout, stderr=py.std.sys.stderr) +subchannel = gw._channelfactory.new() +c.send(subchannel) + +p = RemotePath(subchannel, c.receive()) --- /dev/null +++ b/_py/path/gateway/channeltest.py @@ -0,0 +1,65 @@ +import threading + + +class PathServer: + + def __init__(self, channel): + self.channel = channel + self.C2P = {} + self.next_id = 0 + threading.Thread(target=self.serve).start() + + def p2c(self, path): + id = self.next_id + self.next_id += 1 + self.C2P[id] = path + return id + + def command_LIST(self, id, *args): + path = self.C2P[id] + answer = [(self.p2c(p), p.basename) for p in path.listdir(*args)] + self.channel.send(answer) + + def command_DEL(self, id): + del self.C2P[id] + + def command_GET(self, id, spec): + path = self.C2P[id] + self.channel.send(path._getbyspec(spec)) + + def command_READ(self, id): + path = self.C2P[id] + self.channel.send(path.read()) + + def command_JOIN(self, id, resultid, *args): + path = self.C2P[id] + assert resultid not in self.C2P + self.C2P[resultid] = path.join(*args) + + def command_DIRPATH(self, id, resultid): + path = self.C2P[id] + assert resultid not in self.C2P + self.C2P[resultid] = path.dirpath() + + def serve(self): + try: + while 1: + msg = self.channel.receive() + meth = getattr(self, 'command_' + msg[0]) + meth(*msg[1:]) + except EOFError: + pass + +if __name__ == '__main__': + import py + gw = execnet.PopenGateway() + channel = gw._channelfactory.new() + srv = PathServer(channel) + c = gw.remote_exec(""" + import remotepath + p = remotepath.RemotePath(channel.receive(), channel.receive()) + channel.send(len(p.listdir())) + """) + c.send(channel) + c.send(srv.p2c(py.path.local('/tmp'))) + print(c.receive()) --- /dev/null +++ b/_py/io/capture.py @@ -0,0 +1,344 @@ +import os +import sys +import py +import tempfile + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +if sys.version_info < (3,0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + data = unicode(data, getattr(self, '_encoding', 'UTF-8')) + StringIO.write(self, data) +else: + TextIO = StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" %(data,)) + StringIO.write(self, data) + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(targetfd) + os.dup2(self.tmpfile.fileno(), targetfd) + self._patched = [] + + def setasfile(self, name, module=sys): + """ patch . to self.tmpfile + """ + key = (module, name) + self._patched.append((key, getattr(module, name))) + setattr(module, name, self.tmpfile) + + def unsetfiles(self): + """ unpatch all patched items + """ + while self._patched: + (module, name), value = self._patched.pop() + setattr(module, name, value) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + self.unsetfiles() + os.close(self._savefd) + self.tmpfile.seek(0) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + mode = mode and mode or f.mode + if sys.version_info >= (3,0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=False) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + elif isinstance(obj, str): + pass + else: + obj = str(obj) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + +class Capture(object): + def call(cls, func, *args, **kwargs): + """ return a (res, out, err) tuple where + out and err represent the output/error output + during function execution. + call the given function with args/kwargs + and capture output/error during its execution. + """ + so = cls() + try: + res = func(*args, **kwargs) + finally: + out, err = so.reset() + return res, out, err + call = classmethod(call) + + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_suspended'): + outfile = self._kwargs['out'] + errfile = self._kwargs['err'] + del self._kwargs + else: + outfile, errfile = self.done() + out, err = "", "" + if outfile: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + assert not hasattr(self, '_suspended') + self._suspended = True + outerr = self.readouterr() + outfile, errfile = self.done() + self._kwargs['out'] = outfile + self._kwargs['err'] = errfile + return outerr + + def resume(self): + """ resume capturing with original temp files. """ + assert self._suspended + self._initialize(**self._kwargs) + del self._suspended + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin) + """ + def __init__(self, out=True, err=True, + mixed=False, in_=True, patchsys=True): + self._kwargs = locals().copy() + del self._kwargs['self'] + self._initialize(**self._kwargs) + + def _initialize(self, out=True, err=True, + mixed=False, in_=True, patchsys=True): + if in_: + self._oldin = (sys.stdin, os.dup(0)) + sys.stdin = DontReadFromInput() + fd = os.open(devnullpath, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + self.out = py.io.FDCapture(1, tmpfile=tmpfile) + if patchsys: + self.out.setasfile('stdout') + if err: + if mixed and out: + tmpfile = self.out.tmpfile + elif hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + self.err = py.io.FDCapture(2, tmpfile=tmpfile) + if patchsys: + self.err.setasfile('stderr') + + def done(self): + """ return (outfile, errfile) and stop capturing. """ + if hasattr(self, 'out'): + outfile = self.out.done() + else: + outfile = None + if hasattr(self, 'err'): + errfile = self.err.done() + else: + errfile = None + if hasattr(self, '_oldin'): + oldsys, oldfd = self._oldin + os.dup2(oldfd, 0) + os.close(oldfd) + sys.stdin = oldsys + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + l = [] + for name in ('out', 'err'): + res = "" + if hasattr(self, name): + f = getattr(self, name).tmpfile + f.seek(0) + res = f.read() + f.truncate(0) + f.seek(0) + l.append(res) + return l + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True, mixed=False): + self._kwargs = locals().copy() + del self._kwargs['self'] + self._initialize(**self._kwargs) + + def _initialize(self, out, err, in_, mixed): + self._out = out + self._err = err + self._in = in_ + if out: + self._oldout = sys.stdout + if not hasattr(out, 'write'): + out = TextIO() + sys.stdout = self.out = out + if err: + self._olderr = sys.stderr + if out and mixed: + err = self.out + elif not hasattr(err, 'write'): + err = TextIO() + sys.stderr = self.err = err + if in_: + self._oldin = sys.stdin + sys.stdin = self.newin = DontReadFromInput() + + def done(self): + """ return (outfile, errfile) and stop capturing. """ + o,e = sys.stdout, sys.stderr + if self._out: + try: + sys.stdout = self._oldout + except AttributeError: + raise IOError("stdout capturing already reset") + del self._oldout + outfile = self.out + outfile.seek(0) + else: + outfile = None + if self._err: + try: + sys.stderr = self._olderr + except AttributeError: + raise IOError("stderr capturing already reset") + del self._olderr + errfile = self.err + errfile.seek(0) + else: + errfile = None + if self._in: + sys.stdin = self._oldin + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self._out: + out = sys.stdout.getvalue() + sys.stdout.truncate(0) + if self._err: + err = sys.stderr.getvalue() + sys.stderr.truncate(0) + return out, err + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + def isatty(self): + return False + +try: + devnullpath = os.devnull +except AttributeError: + if os.name == 'nt': + devnullpath = 'NUL' + else: + devnullpath = '/dev/null' + + --- /dev/null +++ b/_py/code/oldmagic2.py @@ -0,0 +1,6 @@ + +import py + +py.log._apiwarn("1.1", "py.magic.AssertionError is deprecated, use py.code._AssertionError", stacklevel=2) + +from py.code import _AssertionError as AssertionError --- /dev/null +++ b/_py/path/cacheutil.py @@ -0,0 +1,111 @@ +""" +This module contains multithread-safe cache implementations. + +All Caches have + + getorbuild(key, builder) + delentry(key) + +methods and allow configuration when instantiating the cache class. +""" +from time import time as gettime + +class BasicCache(object): + def __init__(self, maxentries=128): + self.maxentries = maxentries + self.prunenum = int(maxentries - maxentries/8) + self._dict = {} + + def _getentry(self, key): + return self._dict[key] + + def _putentry(self, key, entry): + self._prunelowestweight() + self._dict[key] = entry + + def delentry(self, key, raising=False): + try: + del self._dict[key] + except KeyError: + if raising: + raise + + def getorbuild(self, key, builder): + try: + entry = self._getentry(key) + except KeyError: + entry = self._build(key, builder) + self._putentry(key, entry) + return entry.value + + def _prunelowestweight(self): + """ prune out entries with lowest weight. """ + numentries = len(self._dict) + if numentries >= self.maxentries: + # evict according to entry's weight + items = [(entry.weight, key) + for key, entry in self._dict.items()] + items.sort() + index = numentries - self.prunenum + if index > 0: + for weight, key in items[:index]: + # in MT situations the element might be gone + self.delentry(key, raising=False) + +class BuildcostAccessCache(BasicCache): + """ A BuildTime/Access-counting cache implementation. + the weight of a value is computed as the product of + + num-accesses-of-a-value * time-to-build-the-value + + The values with the least such weights are evicted + if the cache maxentries threshold is superceded. + For implementation flexibility more than one object + might be evicted at a time. + """ + # time function to use for measuring build-times + + def _build(self, key, builder): + start = gettime() + val = builder() + end = gettime() + return WeightedCountingEntry(val, end-start) + + +class WeightedCountingEntry(object): + def __init__(self, value, oneweight): + self._value = value + self.weight = self._oneweight = oneweight + + def value(self): + self.weight += self._oneweight + return self._value + value = property(value) + +class AgingCache(BasicCache): + """ This cache prunes out cache entries that are too old. + """ + def __init__(self, maxentries=128, maxseconds=10.0): + super(AgingCache, self).__init__(maxentries) + self.maxseconds = maxseconds + + def _getentry(self, key): + entry = self._dict[key] + if entry.isexpired(): + self.delentry(key) + raise KeyError(key) + return entry + + def _build(self, key, builder): + val = builder() + entry = AgingEntry(val, gettime() + self.maxseconds) + return entry + +class AgingEntry(object): + def __init__(self, value, expirationtime): + self.value = value + self.weight = expirationtime + + def isexpired(self): + t = gettime() + return t >= self.weight --- /dev/null +++ b/_py/path/svnwc.py @@ -0,0 +1,1236 @@ +""" +svn-Command based Implementation of a Subversion WorkingCopy Path. + + SvnWCCommandPath is the main class. + +""" + +import os, sys, time, re, calendar +import py +import subprocess +from _py.path import common + +#----------------------------------------------------------- +# Caching latest repository revision and repo-paths +# (getting them is slow with the current implementations) +# +# XXX make mt-safe +#----------------------------------------------------------- + +class cache: + proplist = {} + info = {} + entries = {} + prop = {} + +class RepoEntry: + def __init__(self, url, rev, timestamp): + self.url = url + self.rev = rev + self.timestamp = timestamp + + def __str__(self): + return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp) + +class RepoCache: + """ The Repocache manages discovered repository paths + and their revisions. If inside a timeout the cache + will even return the revision of the root. + """ + timeout = 20 # seconds after which we forget that we know the last revision + + def __init__(self): + self.repos = [] + + def clear(self): + self.repos = [] + + def put(self, url, rev, timestamp=None): + if rev is None: + return + if timestamp is None: + timestamp = time.time() + + for entry in self.repos: + if url == entry.url: + entry.timestamp = timestamp + entry.rev = rev + #print "set repo", entry + break + else: + entry = RepoEntry(url, rev, timestamp) + self.repos.append(entry) + #print "appended repo", entry + + def get(self, url): + now = time.time() + for entry in self.repos: + if url.startswith(entry.url): + if now < entry.timestamp + self.timeout: + #print "returning immediate Etrny", entry + return entry.url, entry.rev + return entry.url, -1 + return url, -1 + +repositories = RepoCache() + + +# svn support code + +ALLOWED_CHARS = "_ -/\\=$.~+" #add characters as necessary when tested +if sys.platform == "win32": + ALLOWED_CHARS += ":" +ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:' + +def _getsvnversion(ver=[]): + try: + return ver[0] + except IndexError: + v = py.process.cmdexec("svn -q --version") + v.strip() + v = '.'.join(v.split('.')[:2]) + ver.append(v) + return v + +def _escape_helper(text): + text = str(text) + if py.std.sys.platform != 'win32': + text = str(text).replace('$', '\\$') + return text + +def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS): + for c in str(text): + if c.isalnum(): + continue + if c in allowed_chars: + continue + return True + return False + +def checkbadchars(url): + # (hpk) not quite sure about the exact purpose, guido w.? + proto, uri = url.split("://", 1) + if proto != "file": + host, uripath = uri.split('/', 1) + # only check for bad chars in the non-protocol parts + if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \ + or _check_for_bad_chars(uripath, ALLOWED_CHARS)): + raise ValueError("bad char in %r" % (url, )) + + +#_______________________________________________________________ + +class SvnPathBase(common.PathBase): + """ Base implementation for SvnPath implementations. """ + sep = '/' + + def _geturl(self): + return self.strpath + url = property(_geturl, None, None, "url of this svn-path.") + + def __str__(self): + """ return a string representation (including rev-number) """ + return self.strpath + + def __hash__(self): + return hash(self.strpath) + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + obj = object.__new__(self.__class__) + obj.rev = kw.get('rev', self.rev) + obj.auth = kw.get('auth', self.auth) + dirname, basename, purebasename, ext = self._getbyspec( + "dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + ext = kw.setdefault('ext', ext) + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + if kw['basename']: + obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw + else: + obj.strpath = "%(dirname)s" % kw + return obj + + def _getbyspec(self, spec): + """ get specified parts of the path. 'arg' is a string + with comma separated path parts. The parts are returned + in exactly the order of the specification. + + you may specify the following parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + res = [] + parts = self.strpath.split(self.sep) + for name in spec.split(','): + name = name.strip() + if name == 'dirname': + res.append(self.sep.join(parts[:-1])) + elif name == 'basename': + res.append(parts[-1]) + else: + basename = parts[-1] + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + res.append(purebasename) + elif name == 'ext': + res.append(ext) + else: + raise NameError("Don't know part %r" % name) + return res + + def __eq__(self, other): + """ return true if path and rev attributes each match """ + return (str(self) == str(other) and + (self.rev == other.rev or self.rev == other.rev)) + + def __ne__(self, other): + return not self == other + + def join(self, *args): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + + args = tuple([arg.strip(self.sep) for arg in args]) + parts = (self.strpath, ) + args + newpath = self.__class__(self.sep.join(parts), self.rev, self.auth) + return newpath + + def propget(self, name): + """ return the content of the given property. """ + value = self._propget(name) + return value + + def proplist(self): + """ list all property names. """ + content = self._proplist() + return content + + def info(self): + """ return an Info structure with svn-provided information. """ + parent = self.dirpath() + nameinfo_seq = parent._listdir_nameinfo() + bn = self.basename + for name, info in nameinfo_seq: + if name == bn: + return info + raise py.error.ENOENT(self) + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + # shared help methods + + def _escape(self, cmd): + return _escape_helper(cmd) + + + #def _childmaxrev(self): + # """ return maximum revision number of childs (or self.rev if no childs) """ + # rev = self.rev + # for name, info in self._listdir_nameinfo(): + # rev = max(rev, info.created_rev) + # return rev + + #def _getlatestrevision(self): + # """ return latest repo-revision for this path. """ + # url = self.strpath + # path = self.__class__(url, None) + # + # # we need a long walk to find the root-repo and revision + # while 1: + # try: + # rev = max(rev, path._childmaxrev()) + # previous = path + # path = path.dirpath() + # except (IOError, process.cmdexec.Error): + # break + # if rev is None: + # raise IOError, "could not determine newest repo revision for %s" % self + # return rev + + class Checkers(common.Checkers): + def dir(self): + try: + return self.path.info().kind == 'dir' + except py.error.Error: + return self._listdirworks() + + def _listdirworks(self): + try: + self.path.listdir() + except py.error.ENOENT: + return False + else: + return True + + def file(self): + try: + return self.path.info().kind == 'file' + except py.error.ENOENT: + return False + + def exists(self): + try: + return self.path.info() + except py.error.ENOENT: + return self._listdirworks() + +def parse_apr_time(timestr): + i = timestr.rfind('.') + if i == -1: + raise ValueError("could not parse %s" % timestr) + timestr = timestr[:i] + parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S") + return time.mktime(parsedtime) + +class PropListDict(dict): + """ a Dictionary which fetches values (InfoSvnCommand instances) lazily""" + def __init__(self, path, keynames): + dict.__init__(self, [(x, None) for x in keynames]) + self.path = path + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + if value is None: + value = self.path.propget(key) + dict.__setitem__(self, key, value) + return value + +def fixlocale(): + if sys.platform != 'win32': + return 'LC_ALL=C ' + return '' + +# some nasty chunk of code to solve path and url conversion and quoting issues +ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ') +if os.sep in ILLEGAL_CHARS: + ILLEGAL_CHARS.remove(os.sep) +ISWINDOWS = sys.platform == 'win32' +_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I) +def _check_path(path): + illegal = ILLEGAL_CHARS[:] + sp = path.strpath + if ISWINDOWS: + illegal.remove(':') + if not _reg_allow_disk.match(sp): + raise ValueError('path may not contain a colon (:)') + for char in sp: + if char not in string.printable or char in illegal: + raise ValueError('illegal character %r in path' % (char,)) + +def path_to_fspath(path, addat=True): + _check_path(path) + sp = path.strpath + if addat and path.rev != -1: + sp = '%s@%s' % (sp, path.rev) + elif addat: + sp = '%s at HEAD' % (sp,) + return sp + +def url_from_path(path): + fspath = path_to_fspath(path, False) + quote = py.std.urllib.quote + if ISWINDOWS: + match = _reg_allow_disk.match(fspath) + fspath = fspath.replace('\\', '/') + if match.group(1): + fspath = '/%s%s' % (match.group(1).replace('\\', '/'), + quote(fspath[len(match.group(1)):])) + else: + fspath = quote(fspath) + else: + fspath = quote(fspath) + if path.rev != -1: + fspath = '%s@%s' % (fspath, path.rev) + else: + fspath = '%s at HEAD' % (fspath,) + return 'file://%s' % (fspath,) + +class SvnAuth(object): + """ container for auth information for Subversion """ + def __init__(self, username, password, cache_auth=True, interactive=True): + self.username = username + self.password = password + self.cache_auth = cache_auth + self.interactive = interactive + + def makecmdoptions(self): + uname = self.username.replace('"', '\\"') + passwd = self.password.replace('"', '\\"') + ret = [] + if uname: + ret.append('--username="%s"' % (uname,)) + if passwd: + ret.append('--password="%s"' % (passwd,)) + if not self.cache_auth: + ret.append('--no-auth-cache') + if not self.interactive: + ret.append('--non-interactive') + return ' '.join(ret) + + def __str__(self): + return "" %(self.username,) + +rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)') + +class SvnWCCommandPath(common.PathBase): + """ path implementation offering access/modification to svn working copies. + It has methods similar to the functions in os.path and similar to the + commands of the svn client. + """ + sep = os.sep + + def __new__(cls, wcpath=None, auth=None): + self = object.__new__(cls) + if isinstance(wcpath, cls): + if wcpath.__class__ == cls: + return wcpath + wcpath = wcpath.localpath + if _check_for_bad_chars(str(wcpath), + ALLOWED_CHARS): + raise ValueError("bad char in wcpath %s" % (wcpath, )) + self.localpath = py.path.local(wcpath) + self.auth = auth + return self + + strpath = property(lambda x: str(x.localpath), None, None, "string path") + + def __eq__(self, other): + return self.localpath == getattr(other, 'localpath', None) + + def _geturl(self): + if getattr(self, '_url', None) is None: + info = self.info() + self._url = info.url #SvnPath(info.url, info.rev) + assert isinstance(self._url, py.builtin._basestring) + return self._url + + url = property(_geturl, None, None, "url of this WC item") + + def _escape(self, cmd): + return _escape_helper(cmd) + + def dump(self, obj): + """ pickle object into path location""" + return self.localpath.dump(obj) + + def svnurl(self): + """ return current SvnPath for this WC-item. """ + info = self.info() + return py.path.svnurl(info.url) + + def __repr__(self): + return "svnwc(%r)" % (self.strpath) # , self._url) + + def __str__(self): + return str(self.localpath) + + def _makeauthoptions(self): + if self.auth is None: + return '' + return self.auth.makecmdoptions() + + def _authsvn(self, cmd, args=None): + args = args and list(args) or [] + args.append(self._makeauthoptions()) + return self._svn(cmd, *args) + + def _svn(self, cmd, *args): + l = ['svn %s' % cmd] + args = [self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._escape(self.strpath)) + # try fixing the locale because we can't otherwise parse + string = fixlocale() + " ".join(l) + try: + try: + key = 'LC_MESSAGES' + hold = os.environ.get(key) + os.environ[key] = 'C' + out = py.process.cmdexec(string) + finally: + if hold: + os.environ[key] = hold + else: + del os.environ[key] + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + strerr = e.err.lower() + if strerr.find('file not found') != -1: + raise py.error.ENOENT(self) + if (strerr.find('file exists') != -1 or + strerr.find('file already exists') != -1 or + strerr.find("can't create directory") != -1): + raise py.error.EEXIST(self) + raise + return out + + def switch(self, url): + """ switch to given URL. """ + self._authsvn('switch', [url]) + + def checkout(self, url=None, rev=None): + """ checkout from url to local wcpath. """ + args = [] + if url is None: + url = self.url + if rev is None or rev == -1: + if (py.std.sys.platform != 'win32' and + _getsvnversion() == '1.3'): + url += "@HEAD" + else: + if _getsvnversion() == '1.3': + url += "@%d" % rev + else: + args.append('-r' + str(rev)) + args.append(url) + self._authsvn('co', args) + + def update(self, rev='HEAD'): + """ update working copy item to given revision. (None -> HEAD). """ + self._authsvn('up', ['-r', rev, "--non-interactive"],) + + def write(self, content, mode='w'): + """ write content into local filesystem wc. """ + self.localpath.write(content, mode) + + def dirpath(self, *args): + """ return the directory Path of the current Path. """ + return self.__class__(self.localpath.dirpath(*args), auth=self.auth) + + def _ensuredirs(self): + parent = self.dirpath() + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + self.mkdir() + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'directory=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if p.check(): + if p.check(versioned=False): + p.add() + return p + if kwargs.get('dir', 0): + return p._ensuredirs() + parent = p.dirpath() + parent._ensuredirs() + p.write("") + p.add() + return p + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + if args: + return self.join(*args).mkdir() + else: + self._svn('mkdir') + return self + + def add(self): + """ add ourself to svn """ + self._svn('add') + + def remove(self, rec=1, force=1): + """ remove a file or a directory tree. 'rec'ursive is + ignored and considered always true (because of + underlying svn semantics. + """ + assert rec, "svn cannot remove non-recursively" + if not self.check(versioned=True): + # not added to svn (anymore?), just remove + py.path.local(self).remove() + return + flags = [] + if force: + flags.append('--force') + self._svn('remove', *flags) + + def copy(self, target): + """ copy path to target.""" + py.process.cmdexec("svn copy %s %s" %(str(self), str(target))) + + def rename(self, target): + """ rename this path to target. """ + py.process.cmdexec("svn move --force %s %s" %(str(self), str(target))) + + def lock(self): + """ set a lock (exclusive) on the resource """ + out = self._authsvn('lock').strip() + if not out: + # warning or error, raise exception + raise Exception(out[4:]) + + def unlock(self): + """ unset a previously set lock """ + out = self._authsvn('unlock').strip() + if out.startswith('svn:'): + # warning or error, raise exception + raise Exception(out[4:]) + + def cleanup(self): + """ remove any locks from the resource """ + # XXX should be fixed properly!!! + try: + self.unlock() + except: + pass + + def status(self, updates=0, rec=0, externals=0): + """ return (collective) Status object for this file. """ + # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1 + # 2201 2192 jum test + # XXX + if externals: + raise ValueError("XXX cannot perform status() " + "on external items yet") + else: + #1.2 supports: externals = '--ignore-externals' + externals = '' + if rec: + rec= '' + else: + rec = '--non-recursive' + + # XXX does not work on all subversion versions + #if not externals: + # externals = '--ignore-externals' + + if updates: + updates = '-u' + else: + updates = '' + + try: + cmd = 'status -v --xml --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + except py.process.cmdexec.Error: + cmd = 'status -v --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + rootstatus = WCStatus(self).fromstring(out, self) + else: + rootstatus = XMLWCStatus(self).fromstring(out, self) + return rootstatus + + def diff(self, rev=None): + """ return a diff of the current path against revision rev (defaulting + to the last one). + """ + args = [] + if rev is not None: + args.append("-r %d" % rev) + out = self._authsvn('diff', args) + return out + + def blame(self): + """ return a list of tuples of three elements: + (revision, commiter, line) + """ + out = self._svn('blame') + result = [] + blamelines = out.splitlines() + reallines = py.path.svnurl(self.url).readlines() + for i, (blameline, line) in enumerate( + zip(blamelines, reallines)): + m = rex_blame.match(blameline) + if not m: + raise ValueError("output line %r of svn blame does not match " + "expected format" % (line, )) + rev, name, _ = m.groups() + result.append((int(rev), name, line)) + return result + + _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL) + def commit(self, msg='', rec=1): + """ commit with support for non-recursive commits """ + # XXX i guess escaping should be done better here?!? + cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),) + if not rec: + cmd += ' -N' + out = self._authsvn(cmd) + try: + del cache.info[self] + except KeyError: + pass + if out: + m = self._rex_commit.match(out) + return int(m.group(1)) + + def propset(self, name, value, *args): + """ set property name to value on this path. """ + d = py.path.local.mkdtemp() + try: + p = d.join('value') + p.write(value) + self._svn('propset', name, '--file', str(p), *args) + finally: + d.remove() + + def propget(self, name): + """ get property name on this path. """ + res = self._svn('propget', name) + return res[:-1] # strip trailing newline + + def propdel(self, name): + """ delete property name on this path. """ + res = self._svn('propdel', name) + return res[:-1] # strip trailing newline + + def proplist(self, rec=0): + """ return a mapping of property names to property values. +If rec is True, then return a dictionary mapping sub-paths to such mappings. +""" + if rec: + res = self._svn('proplist -R') + return make_recursive_propdict(self, res) + else: + res = self._svn('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return PropListDict(self, lines) + + def revert(self, rec=0): + """ revert the local changes of this path. if rec is True, do so +recursively. """ + if rec: + result = self._svn('revert -R') + else: + result = self._svn('revert') + return result + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + if kw: + localpath = self.localpath.new(**kw) + else: + localpath = self.localpath + return self.__class__(localpath, auth=self.auth) + + def join(self, *args, **kwargs): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + localpath = self.localpath.join(*args, **kwargs) + return self.__class__(localpath, auth=self.auth) + + def info(self, usecache=1): + """ return an Info structure with svn-provided information. """ + info = usecache and cache.info.get(self) + if not info: + try: + output = self._svn('info') + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('Path is not a working copy directory') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find("is not under version control") != -1: + raise py.error.ENOENT(self, e.err) + raise + # XXX SVN 1.3 has output on stderr instead of stdout (while it does + # return 0!), so a bit nasty, but we assume no output is output + # to stderr... + if (output.strip() == '' or + output.lower().find('not a versioned resource') != -1): + raise py.error.ENOENT(self, output) + info = InfoSvnWCCommand(output) + + # Can't reliably compare on Windows without access to win32api + if py.std.sys.platform != 'win32': + if info.path != self.localpath: + raise py.error.ENOENT(self, "not a versioned resource:" + + " %s != %s" % (info.path, self.localpath)) + cache.info[self] = info + self.rev = info.rev + return info + + def listdir(self, fil=None, sort=None): + """ return a sequence of Paths. + + listdir will return either a tuple or a list of paths + depending on implementation choices. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + # XXX unify argument naming with LocalPath.listdir + def notsvn(path): + return path.basename != '.svn' + + paths = [self.__class__(p, auth=self.auth) + for p in self.localpath.listdir() + if notsvn(p) and (not fil or fil(p))] + self._sortlist(paths, sort) + return paths + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + return open(self.strpath, mode) + + def _getbyspec(self, spec): + return self.localpath._getbyspec(spec) + + class Checkers(py.path.local.Checkers): + def __init__(self, path): + self.svnwcpath = path + self.path = path.localpath + def versioned(self): + try: + s = self.svnwcpath.info() + except (py.error.ENOENT, py.error.EEXIST): + return False + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('is not a working copy')!=-1: + return False + if e.err.lower().find('not a versioned resource') != -1: + return False + raise + else: + return True + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() # make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + locale_env = fixlocale() + # some blather on stderr + auth_opt = self._makeauthoptions() + #stdin, stdout, stderr = os.popen3(locale_env + + # 'svn log --xml %s %s %s "%s"' % ( + # rev_opt, verbose_opt, auth_opt, + # self.strpath)) + cmd = locale_env + 'svn log --xml %s %s %s "%s"' % ( + rev_opt, verbose_opt, auth_opt, self.strpath) + + popen = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) + stdout, stderr = popen.communicate() + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + minidom,ExpatError = importxml() + try: + tree = minidom.parseString(stdout) + except ExpatError: + raise ValueError('no such revision') + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(LogEntry(logentry)) + return result + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + def __hash__(self): + return hash((self.strpath, self.__class__, self.auth)) + + +class WCStatus: + attrnames = ('modified','added', 'conflict', 'unchanged', 'external', + 'deleted', 'prop_modified', 'unknown', 'update_available', + 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced' + ) + + def __init__(self, wcpath, rev=None, modrev=None, author=None): + self.wcpath = wcpath + self.rev = rev + self.modrev = modrev + self.author = author + + for name in self.attrnames: + setattr(self, name, []) + + def allpath(self, sort=True, **kw): + d = {} + for name in self.attrnames: + if name not in kw or kw[name]: + for path in getattr(self, name): + d[path] = 1 + l = d.keys() + if sort: + l.sort() + return l + + # XXX a bit scary to assume there's always 2 spaces between username and + # path, however with win32 allowing spaces in user names there doesn't + # seem to be a more solid approach :( + _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)') + + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ return a new WCStatus object from data 's' + """ + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + for line in data.split('\n'): + if not line.strip(): + continue + #print "processing %r" % line + flags, rest = line[:8], line[8:] + # first column + c0,c1,c2,c3,c4,c5,x6,c7 = flags + #if '*' in line: + # print "flags", repr(flags), "rest", repr(rest) + + if c0 in '?XI': + fn = line.split(None, 1)[1] + if c0 == '?': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.unknown.append(wcpath) + elif c0 == 'X': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(fn, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + elif c0 == 'I': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.ignored.append(wcpath) + + continue + + #elif c0 in '~!' or c4 == 'S': + # raise NotImplementedError("received flag %r" % c0) + + m = WCStatus._rex_status.match(rest) + if not m: + if c7 == '*': + fn = rest.strip() + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.update_available.append(wcpath) + continue + if line.lower().find('against revision:')!=-1: + update_rev = int(rest.split(':')[1].strip()) + continue + if line.lower().find('status on external') > -1: + # XXX not sure what to do here... perhaps we want to + # store some state instead of just continuing, as right + # now it makes the top-level external get added twice + # (once as external, once as 'normal' unchanged item) + # because of the way SVN presents external items + continue + # keep trying + raise ValueError("could not parse line %r" % line) + else: + rev, modrev, author, fn = m.groups() + wcpath = rootwcpath.join(fn, abs=1) + #assert wcpath.check() + if c0 == 'M': + assert wcpath.check(file=1), "didn't expect a directory with changed content here" + rootstatus.modified.append(wcpath) + elif c0 == 'A' or c3 == '+' : + rootstatus.added.append(wcpath) + elif c0 == 'D': + rootstatus.deleted.append(wcpath) + elif c0 == 'C': + rootstatus.conflict.append(wcpath) + elif c0 == '~': + rootstatus.kindmismatch.append(wcpath) + elif c0 == '!': + rootstatus.incomplete.append(wcpath) + elif c0 == 'R': + rootstatus.replaced.append(wcpath) + elif not c0.strip(): + rootstatus.unchanged.append(wcpath) + else: + raise NotImplementedError("received flag %r" % c0) + + if c1 == 'M': + rootstatus.prop_modified.append(wcpath) + # XXX do we cover all client versions here? + if c2 == 'L' or c5 == 'K': + rootstatus.locked.append(wcpath) + if c7 == '*': + rootstatus.update_available.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + if update_rev: + rootstatus.update_rev = update_rev + continue + return rootstatus + fromstring = staticmethod(fromstring) + +class XMLWCStatus(WCStatus): + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ parse 'data' (XML string as outputted by svn st) into a status obj + """ + # XXX for externals, the path is shown twice: once + # with external information, and once with full info as if + # the item was a normal non-external... the current way of + # dealing with this issue is by ignoring it - this does make + # externals appear as external items as well as 'normal', + # unchanged ones in the status object so this is far from ideal + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + minidom, ExpatError = importxml() + try: + doc = minidom.parseString(data) + except ExpatError: + e = sys.exc_info()[1] + raise ValueError(str(e)) + urevels = doc.getElementsByTagName('against') + if urevels: + rootstatus.update_rev = urevels[-1].getAttribute('revision') + for entryel in doc.getElementsByTagName('entry'): + path = entryel.getAttribute('path') + statusel = entryel.getElementsByTagName('wc-status')[0] + itemstatus = statusel.getAttribute('item') + + if itemstatus == 'unversioned': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.unknown.append(wcpath) + continue + elif itemstatus == 'external': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(path, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + continue + elif itemstatus == 'ignored': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.ignored.append(wcpath) + continue + elif itemstatus == 'incomplete': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.incomplete.append(wcpath) + continue + + rev = statusel.getAttribute('revision') + if itemstatus == 'added' or itemstatus == 'none': + rev = '0' + modrev = '?' + author = '?' + date = '' + else: + #print entryel.toxml() + commitel = entryel.getElementsByTagName('commit')[0] + if commitel: + modrev = commitel.getAttribute('revision') + author = '' + author_els = commitel.getElementsByTagName('author') + if author_els: + for c in author_els[0].childNodes: + author += c.nodeValue + date = '' + for c in commitel.getElementsByTagName('date')[0]\ + .childNodes: + date += c.nodeValue + + wcpath = rootwcpath.join(path, abs=1) + + assert itemstatus != 'modified' or wcpath.check(file=1), ( + 'did\'t expect a directory with changed content here') + + itemattrname = { + 'normal': 'unchanged', + 'unversioned': 'unknown', + 'conflicted': 'conflict', + 'none': 'added', + }.get(itemstatus, itemstatus) + + attr = getattr(rootstatus, itemattrname) + attr.append(wcpath) + + propsstatus = statusel.getAttribute('props') + if propsstatus not in ('none', 'normal'): + rootstatus.prop_modified.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + rootstatus.date = date + + # handle repos-status element (remote info) + rstatusels = entryel.getElementsByTagName('repos-status') + if rstatusels: + rstatusel = rstatusels[0] + ritemstatus = rstatusel.getAttribute('item') + if ritemstatus in ('added', 'modified'): + rootstatus.update_available.append(wcpath) + + lockels = entryel.getElementsByTagName('lock') + if len(lockels): + rootstatus.locked.append(wcpath) + + return rootstatus + fromstring = staticmethod(fromstring) + +class InfoSvnWCCommand: + def __init__(self, output): + # Path: test + # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test + # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada + # Revision: 2151 + # Node Kind: directory + # Schedule: normal + # Last Changed Author: hpk + # Last Changed Rev: 2100 + # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003) + + d = {} + for line in output.split('\n'): + if not line.strip(): + continue + key, value = line.split(':', 1) + key = key.lower().replace(' ', '') + value = value.strip() + d[key] = value + try: + self.url = d['url'] + except KeyError: + raise ValueError("Not a versioned resource") + #raise ValueError, "Not a versioned resource %r" % path + self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] + self.rev = int(d['revision']) + self.path = py.path.local(d['path']) + self.size = self.path.size() + if 'lastchangedrev' in d: + self.created_rev = int(d['lastchangedrev']) + if 'lastchangedauthor' in d: + self.last_author = d['lastchangedauthor'] + if 'lastchangeddate' in d: + self.mtime = parse_wcinfotime(d['lastchangeddate']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + +def parse_wcinfotime(timestr): + """ Returns seconds since epoch, UTC. """ + # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr) + if not m: + raise ValueError("timestring %r does not match" % timestr) + timestr, timezone = m.groups() + # do not handle timezone specially, return value should be UTC + parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S") + return calendar.timegm(parsedtime) + +def make_recursive_propdict(wcroot, + output, + rex = re.compile("Properties on '(.*)':")): + """ Return a dictionary of path->PropListDict mappings. """ + lines = [x for x in output.split('\n') if x] + pdict = {} + while lines: + line = lines.pop(0) + m = rex.match(line) + if not m: + raise ValueError("could not parse propget-line: %r" % line) + path = m.groups()[0] + wcpath = wcroot.join(path, abs=1) + propnames = [] + while lines and lines[0].startswith(' '): + propname = lines.pop(0).strip() + propnames.append(propname) + assert propnames, "must have found properties!" + pdict[wcpath] = PropListDict(wcpath, propnames) + return pdict + + +def importxml(cache=[]): + if cache: + return cache + from xml.dom import minidom + from xml.parsers.expat import ExpatError + cache.extend([minidom, ExpatError]) + return cache + +class LogEntry: + def __init__(self, logentry): + self.rev = int(logentry.getAttribute('revision')) + for lpart in filter(None, logentry.childNodes): + if lpart.nodeType == lpart.ELEMENT_NODE: + if lpart.nodeName == 'author': + self.author = lpart.firstChild.nodeValue + elif lpart.nodeName == 'msg': + if lpart.firstChild: + self.msg = lpart.firstChild.nodeValue + else: + self.msg = '' + elif lpart.nodeName == 'date': + #2003-07-29T20:05:11.598637Z + timestr = lpart.firstChild.nodeValue + self.date = parse_apr_time(timestr) + elif lpart.nodeName == 'paths': + self.strpaths = [] + for ppart in filter(None, lpart.childNodes): + if ppart.nodeType == ppart.ELEMENT_NODE: + self.strpaths.append(PathEntry(ppart)) + def __repr__(self): + return '' % ( + self.rev, self.author, self.date) + + --- /dev/null +++ b/_py/code/assertion.py @@ -0,0 +1,75 @@ +import sys +import py + +BuiltinAssertionError = py.builtin.builtins.AssertionError + + +def _format_explanation(explanation): + # uck! See CallFunc for where \n{ and \n} escape sequences are used + raw_lines = (explanation or '').split('\n') + # escape newlines not followed by { and } + lines = [raw_lines[0]] + for l in raw_lines[1:]: + if l.startswith('{') or l.startswith('}'): + lines.append(l) + else: + lines[-1] += '\\n' + l + + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith('{'): + if stackcnt[-1]: + s = 'and ' + else: + s = 'where ' + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + else: + assert line.startswith('}') + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + assert len(stack) == 1 + return '\n'.join(result) + + +if sys.version_info >= (2, 6): + from _py.code._assertionnew import interpret +else: + from _py.code._assertionold import interpret + + +class AssertionError(BuiltinAssertionError): + + def __init__(self, *args): + BuiltinAssertionError.__init__(self, *args) + if args: + try: + self.msg = str(args[0]) + except (KeyboardInterrupt, SystemExit): + raise + except: + self.msg = "<[broken __repr__] %s at %0xd>" %( + args[0].__class__, id(args[0])) + else: + f = py.code.Frame(sys._getframe(1)) + try: + source = f.statement + source = str(source.deindent()).strip() + except py.error.ENOENT: + source = None + # this can also occur during reinterpretation, when the + # co_filename is set to "". + if source: + self.msg = interpret(source, f, should_fail=True) + if not self.args: + self.args = (self.msg,) + else: + self.msg = None + +if sys.version_info > (3, 0): + AssertionError.__module__ = "builtins" --- /dev/null +++ b/_py/log/warning.py @@ -0,0 +1,70 @@ +import py, sys + +class Warning(DeprecationWarning): + def __init__(self, msg, path, lineno): + self.msg = msg + self.path = path + self.lineno = lineno + def __repr__(self): + return "%s:%d: %s" %(self.path, self.lineno+1, self.msg) + def __str__(self): + return self.msg + +def _apiwarn(startversion, msg, stacklevel=2, function=None): + # below is mostly COPIED from python2.4/warnings.py's def warn() + # Get context information + if stacklevel == "initpkg": + frame = sys._getframe(stacklevel == "initpkg" and 1 or stacklevel) + level = 2 + while frame: + co = frame.f_code + if co.co_name == "__getattr__" and co.co_filename.find("initpkg") !=-1: + stacklevel = level + break + level += 1 + frame = frame.f_back + else: + stacklevel = 1 + msg = "%s (since version %s)" %(msg, startversion) + warn(msg, stacklevel=stacklevel+1, function=function) + +def warn(msg, stacklevel=1, function=None): + if function is not None: + filename = py.std.inspect.getfile(function) + lineno = py.code.getrawcode(function).co_firstlineno + else: + try: + caller = sys._getframe(stacklevel) + except ValueError: + globals = sys.__dict__ + lineno = 1 + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__name__' in globals: + module = globals['__name__'] + else: + module = "" + filename = globals.get('__file__') + if filename: + fnl = filename.lower() + if fnl.endswith(".pyc") or fnl.endswith(".pyo"): + filename = filename[:-1] + else: + if module == "__main__": + try: + filename = sys.argv[0] + except AttributeError: + # embedded interpreters don't have sys.argv, see bug #839151 + filename = '__main__' + if not filename: + filename = module + path = py.path.local(filename) + warning = Warning(msg, path, lineno) + py.std.warnings.warn_explicit(warning, category=Warning, + filename=str(warning.path), + lineno=warning.lineno, + registry=py.std.warnings.__dict__.setdefault( + "__warningsregistry__", {}) + ) + --- /dev/null +++ b/_py/path/gateway/__init__.py @@ -0,0 +1,1 @@ +# --- /dev/null +++ b/_py/io/__init__.py @@ -0,0 +1,1 @@ +""" input/output helping """ --- /dev/null +++ b/_py/path/common.py @@ -0,0 +1,329 @@ +""" +""" +import os, sys +import py + +class Checkers: + _depend_on_existence = 'exists', 'link', 'dir', 'file' + + def __init__(self, path): + self.path = path + + def dir(self): + raise NotImplementedError + + def file(self): + raise NotImplementedError + + def dotfile(self): + return self.path.basename.startswith('.') + + def ext(self, arg): + if not arg.startswith('.'): + arg = '.' + arg + return self.path.ext == arg + + def exists(self): + raise NotImplementedError + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return FNMatcher(arg)(self.path) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == 'not': + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError( + "no %r checker available for %r" % (name, self.path)) + try: + if py.code.getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (py.error.ENOENT, py.error.ENOTDIR): + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = 'not' + name + if name in kw: + if not kw.get(name): + return False + return True + +class NeverRaised(Exception): + pass + +class PathBase(object): + """ shared implementation for filesystem path objects.""" + Checkers = Checkers + + def __div__(self, other): + return self.join(str(other)) + __truediv__ = __div__ # py3k + + def basename(self): + """ basename part of path. """ + return self._getbyspec('basename')[0] + basename = property(basename, None, None, basename.__doc__) + + def purebasename(self): + """ pure base name of the path.""" + return self._getbyspec('purebasename')[0] + purebasename = property(purebasename, None, None, purebasename.__doc__) + + def ext(self): + """ extension of the path (including the '.').""" + return self._getbyspec('ext')[0] + ext = property(ext, None, None, ext.__doc__) + + def dirpath(self, *args, **kwargs): + """ return the directory Path of the current Path joined + with any given path arguments. + """ + return self.new(basename='').join(*args, **kwargs) + + def read(self, mode='r'): + """ read and return a bytestring from reading the path. """ + if sys.version_info < (2,3): + for x in 'u', 'U': + if x in mode: + mode = mode.replace(x, '') + f = self.open(mode) + try: + return f.read() + finally: + f.close() + + def readlines(self, cr=1): + """ read and return a list of lines from the path. if cr is False, the +newline will be removed from the end of each line. """ + if not cr: + content = self.read('rU') + return content.split('\n') + else: + f = self.open('rU') + try: + return f.readlines() + finally: + f.close() + + def load(self): + """ (deprecated) return object unpickled from self.read() """ + f = self.open('rb') + try: + return py.error.checked_call(py.std.pickle.load, f) + finally: + f.close() + + def move(self, target): + """ move this path to target. """ + if target.relto(self): + raise py.error.EINVAL(target, + "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except py.error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def __repr__(self): + """ return a string representation of this path. """ + return repr(str(self)) + + def check(self, **kw): + """ check a path for existence, or query its properties + + without arguments, this returns True if the path exists (on the + filesystem), False if not + + with (keyword only) arguments, the object compares the value + of the argument with the value of a property with the same name + (if it has one, else it raises a TypeError) + + when for example the keyword argument 'ext' is '.py', this will + return True if self.ext == '.py', False otherwise + """ + if not kw: + kw = {'exists' : 1} + return self.Checkers(self)._evaluate(kw) + + def relto(self, relpath): + """ return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, PathBase)): + raise TypeError("%r: not a string or path object" %(relpath,)) + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + #assert strrelpath[-1] == self.sep + #assert strrelpath[-2] != self.sep + strself = str(self) + if sys.platform == "win32": + if os.path.normcase(strself).startswith( + os.path.normcase(strrelpath)): + return strself[len(strrelpath):] + elif strself.startswith(strrelpath): + return strself[len(strrelpath):] + return "" + + def bestrelpath(self, dest): + """ return a string which is a relative path from self + to dest such that self.join(bestrelpath) == dest and + if not such path can be determined return dest. + """ + try: + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + l = ['..'] * n + if reldest: + l.append(reldest) + target = dest.sep.join(l) + return target + except AttributeError: + return str(dest) + + + def parts(self, reverse=False): + """ return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + l = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + l.insert(0, current) + if reverse: + l.reverse() + return l + + def common(self, other): + """ return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """ return new path object with 'other' added to the basename""" + return self.new(basename=self.basename+str(other)) + + def __cmp__(self, other): + """ return sort value (-1, 0, +1). """ + try: + return cmp(self.strpath, other.strpath) + except AttributeError: + return cmp(str(self), str(other)) # self.path, other.path) + + def __lt__(self, other): + try: + return self.strpath < other.strpath + except AttributeError: + return str(self) < str(other) + + def visit(self, fil=None, rec=None, ignore=NeverRaised): + """ yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + """ + if isinstance(fil, str): + fil = FNMatcher(fil) + if rec: + if isinstance(rec, str): + rec = fnmatch(fil) + elif not hasattr(rec, '__call__'): + rec = None + try: + entries = self.listdir() + except ignore: + return + dirs = [p for p in entries + if p.check(dir=1) and (rec is None or rec(p))] + for subdir in dirs: + for p in subdir.visit(fil=fil, rec=rec, ignore=ignore): + yield p + for p in entries: + if fil is None or fil(p): + yield p + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, '__call__'): + res.sort(sort) + else: + res.sort() + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + def __call__(self, path): + """return true if the basename/fullname matches the glob-'pattern'. + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + if the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + pattern = self.pattern + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + pattern = '*' + path.sep + pattern + from fnmatch import fnmatch + return fnmatch(name, pattern) + --- /dev/null +++ b/_py/builtin/__init__.py @@ -0,0 +1,2 @@ +""" backports and additions of builtins """ + --- /dev/null +++ b/_py/compat/dep_optparse.py @@ -0,0 +1,4 @@ +import py +py.log._apiwarn("1.1", "py.compat.optparse deprecated, use standard library version.", stacklevel="initpkg") + +optparse = py.std.optparse --- /dev/null +++ b/_py/cmdline/pycountloc.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +# hands on script to compute the non-empty Lines of Code +# for tests and non-test code + +"""\ +py.countloc [PATHS] + +Count (non-empty) lines of python code and number of python files recursively +starting from a list of paths given on the command line (starting from the +current working directory). Distinguish between test files and normal ones and +report them separately. +""" +import py + +def main(): + parser = py.std.optparse.OptionParser(usage=__doc__) + (options, args) = parser.parse_args() + countloc(args) + +def nodot(p): + return p.check(dotfile=0) + +class FileCounter(object): + def __init__(self): + self.file2numlines = {} + self.numlines = 0 + self.numfiles = 0 + + def addrecursive(self, directory, fil="*.py", rec=nodot): + for x in directory.visit(fil, rec): + self.addfile(x) + + def addfile(self, fn, emptylines=False): + if emptylines: + s = len(p.readlines()) + else: + s = 0 + for i in fn.readlines(): + if i.strip(): + s += 1 + self.file2numlines[fn] = s + self.numfiles += 1 + self.numlines += s + + def getnumlines(self, fil): + numlines = 0 + for path, value in self.file2numlines.items(): + if fil(path): + numlines += value + return numlines + + def getnumfiles(self, fil): + numfiles = 0 + for path in self.file2numlines: + if fil(path): + numfiles += 1 + return numfiles + +def get_loccount(locations=None): + if locations is None: + localtions = [py.path.local()] + counter = FileCounter() + for loc in locations: + counter.addrecursive(loc, '*.py', rec=nodot) + + def istestfile(p): + return p.check(fnmatch='test_*.py') + isnottestfile = lambda x: not istestfile(x) + + numfiles = counter.getnumfiles(isnottestfile) + numlines = counter.getnumlines(isnottestfile) + numtestfiles = counter.getnumfiles(istestfile) + numtestlines = counter.getnumlines(istestfile) + + return counter, numfiles, numlines, numtestfiles, numtestlines + +def countloc(paths=None): + if not paths: + paths = ['.'] + locations = [py.path.local(x) for x in paths] + (counter, numfiles, numlines, numtestfiles, + numtestlines) = get_loccount(locations) + + items = counter.file2numlines.items() + items.sort(lambda x,y: cmp(x[1], y[1])) + for x, y in items: + print("%3d %30s" % (y,x)) + + print("%30s %3d" %("number of testfiles", numtestfiles)) + print("%30s %3d" %("number of non-empty testlines", numtestlines)) + print("%30s %3d" %("number of files", numfiles)) + print("%30s %3d" %("number of non-empty lines", numlines)) + --- /dev/null +++ b/_py/code/oldmagic.py @@ -0,0 +1,62 @@ +""" deprecated module for turning on/off some features. """ + +import py + +from py.builtin import builtins as cpy_builtin + +def invoke(assertion=False, compile=False): + """ (deprecated) invoke magic, currently you can specify: + + assertion patches the builtin AssertionError to try to give + more meaningful AssertionErrors, which by means + of deploying a mini-interpreter constructs + a useful error message. + """ + py.log._apiwarn("1.1", + "py.magic.invoke() is deprecated, use py.code.patch_builtins()", + stacklevel=2, + ) + py.code.patch_builtins(assertion=assertion, compile=compile) + +def revoke(assertion=False, compile=False): + """ (deprecated) revoke previously invoked magic (see invoke()).""" + py.log._apiwarn("1.1", + "py.magic.revoke() is deprecated, use py.code.unpatch_builtins()", + stacklevel=2, + ) + py.code.unpatch_builtins(assertion=assertion, compile=compile) + +patched = {} + +def patch(namespace, name, value): + """ (deprecated) rebind the 'name' on the 'namespace' to the 'value', + possibly and remember the original value. Multiple + invocations to the same namespace/name pair will + remember a list of old values. + """ + py.log._apiwarn("1.1", + "py.magic.patch() is deprecated, in tests use monkeypatch funcarg.", + stacklevel=2, + ) + nref = (namespace, name) + orig = getattr(namespace, name) + patched.setdefault(nref, []).append(orig) + setattr(namespace, name, value) + return orig + +def revert(namespace, name): + """ (deprecated) revert to the orginal value the last patch modified. + Raise ValueError if no such original value exists. + """ + py.log._apiwarn("1.1", + "py.magic.revert() is deprecated, in tests use monkeypatch funcarg.", + stacklevel=2, + ) + nref = (namespace, name) + if nref not in patched or not patched[nref]: + raise ValueError("No original value stored for %s.%s" % nref) + current = getattr(namespace, name) + orig = patched[nref].pop() + setattr(namespace, name, orig) + return current + --- /dev/null +++ b/_py/code/__init__.py @@ -0,0 +1,1 @@ +""" python inspection/code generation API """ --- /dev/null +++ b/_py/cmdline/pyconvert_unittest.py @@ -0,0 +1,249 @@ +import re +import sys +import parser + +d={} +# d is the dictionary of unittest changes, keyed to the old name +# used by unittest. +# d[old][0] is the new replacement function. +# d[old][1] is the operator you will substitute, or '' if there is none. +# d[old][2] is the possible number of arguments to the unittest +# function. + +# Old Unittest Name new name operator # of args +d['assertRaises'] = ('raises', '', ['Any']) +d['fail'] = ('raise AssertionError', '', [0,1]) +d['assert_'] = ('assert', '', [1,2]) +d['failIf'] = ('assert not', '', [1,2]) +d['assertEqual'] = ('assert', ' ==', [2,3]) +d['failIfEqual'] = ('assert not', ' ==', [2,3]) +d['assertIn'] = ('assert', ' in', [2,3]) +d['assertNotIn'] = ('assert', ' not in', [2,3]) +d['assertNotEqual'] = ('assert', ' !=', [2,3]) +d['failUnlessEqual'] = ('assert', ' ==', [2,3]) +d['assertAlmostEqual'] = ('assert round', ' ==', [2,3,4]) +d['failIfAlmostEqual'] = ('assert not round', ' ==', [2,3,4]) +d['assertNotAlmostEqual'] = ('assert round', ' !=', [2,3,4]) +d['failUnlessAlmostEquals'] = ('assert round', ' ==', [2,3,4]) + +# the list of synonyms +d['failUnlessRaises'] = d['assertRaises'] +d['failUnless'] = d['assert_'] +d['assertEquals'] = d['assertEqual'] +d['assertNotEquals'] = d['assertNotEqual'] +d['assertAlmostEquals'] = d['assertAlmostEqual'] +d['assertNotAlmostEquals'] = d['assertNotAlmostEqual'] + +# set up the regular expressions we will need +leading_spaces = re.compile(r'^(\s*)') # this never fails + +pat = '' +for k in d.keys(): # this complicated pattern to match all unittests + pat += '|' + r'^(\s*)' + 'self.' + k + r'\(' # \tself.whatever( + +old_names = re.compile(pat[1:]) +linesep='\n' # nobody will really try to convert files not read + # in text mode, will they? + + +def blocksplitter(fp): + '''split a file into blocks that are headed by functions to rename''' + + blocklist = [] + blockstring = '' + + for line in fp: + interesting = old_names.match(line) + if interesting : + if blockstring: + blocklist.append(blockstring) + blockstring = line # reset the block + else: + blockstring += line + + blocklist.append(blockstring) + return blocklist + +def rewrite_utest(block): + '''rewrite every block to use the new utest functions''' + + '''returns the rewritten unittest, unless it ran into problems, + in which case it just returns the block unchanged. + ''' + utest = old_names.match(block) + + if not utest: + return block + + old = utest.group(0).lstrip()[5:-1] # the name we want to replace + new = d[old][0] # the name of the replacement function + op = d[old][1] # the operator you will use , or '' if there is none. + possible_args = d[old][2] # a list of the number of arguments the + # unittest function could possibly take. + + if possible_args == ['Any']: # just rename assertRaises & friends + return re.sub('self.'+old, new, block) + + message_pos = possible_args[-1] + # the remaining unittests can have an optional message to print + # when they fail. It is always the last argument to the function. + + try: + indent, argl, trailer = decompose_unittest(old, block) + + except SyntaxError: # but we couldn't parse it! + return block + + argnum = len(argl) + if argnum not in possible_args: + # sanity check - this one isn't real either + return block + + elif argnum == message_pos: + message = argl[-1] + argl = argl[:-1] + else: + message = None + + if argnum is 0 or (argnum is 1 and argnum is message_pos): #unittest fail() + string = '' + if message: + message = ' ' + message + + elif message_pos is 4: # assertAlmostEqual & friends + try: + pos = argl[2].lstrip() + except IndexError: + pos = '7' # default if none is specified + string = '(%s -%s, %s)%s 0' % (argl[0], argl[1], pos, op ) + + else: # assert_, assertEquals and all the rest + string = ' ' + op.join(argl) + + if message: + string = string + ',' + message + + return indent + new + string + trailer + +def decompose_unittest(old, block): + '''decompose the block into its component parts''' + + ''' returns indent, arglist, trailer + indent -- the indentation + arglist -- the arguments to the unittest function + trailer -- any extra junk after the closing paren, such as #commment + ''' + + indent = re.match(r'(\s*)', block).group() + pat = re.search('self.' + old + r'\(', block) + + args, trailer = get_expr(block[pat.end():], ')') + arglist = break_args(args, []) + + if arglist == ['']: # there weren't any + return indent, [], trailer + + for i in range(len(arglist)): + try: + parser.expr(arglist[i].lstrip('\t ')) + except SyntaxError: + if i == 0: + arglist[i] = '(' + arglist[i] + ')' + else: + arglist[i] = ' (' + arglist[i] + ')' + + return indent, arglist, trailer + +def break_args(args, arglist): + '''recursively break a string into a list of arguments''' + try: + first, rest = get_expr(args, ',') + if not rest: + return arglist + [first] + else: + return [first] + break_args(rest, arglist) + except SyntaxError: + return arglist + [args] + +def get_expr(s, char): + '''split a string into an expression, and the rest of the string''' + + pos=[] + for i in range(len(s)): + if s[i] == char: + pos.append(i) + if pos == []: + raise SyntaxError # we didn't find the expected char. Ick. + + for p in pos: + # make the python parser do the hard work of deciding which comma + # splits the string into two expressions + try: + parser.expr('(' + s[:p] + ')') + return s[:p], s[p+1:] + except SyntaxError: # It's not an expression yet + pass + raise SyntaxError # We never found anything that worked. + + +def main(): + import sys + import py + + usage = "usage: %prog [-s [filename ...] | [-i | -c filename ...]]" + optparser = py.std.optparse.OptionParser(usage) + + def select_output (option, opt, value, optparser, **kw): + if hasattr(optparser, 'output'): + optparser.error( + 'Cannot combine -s -i and -c options. Use one only.') + else: + optparser.output = kw['output'] + + optparser.add_option("-s", "--stdout", action="callback", + callback=select_output, + callback_kwargs={'output':'stdout'}, + help="send your output to stdout") + + optparser.add_option("-i", "--inplace", action="callback", + callback=select_output, + callback_kwargs={'output':'inplace'}, + help="overwrite files in place") + + optparser.add_option("-c", "--copy", action="callback", + callback=select_output, + callback_kwargs={'output':'copy'}, + help="copy files ... fn.py --> fn_cp.py") + + options, args = optparser.parse_args() + + output = getattr(optparser, 'output', 'stdout') + + if output in ['inplace', 'copy'] and not args: + optparser.error( + '-i and -c option require at least one filename') + + if not args: + s = '' + for block in blocksplitter(sys.stdin): + s += rewrite_utest(block) + sys.stdout.write(s) + + else: + for infilename in args: # no error checking to see if we can open, etc. + infile = file(infilename) + s = '' + for block in blocksplitter(infile): + s += rewrite_utest(block) + if output == 'inplace': + outfile = file(infilename, 'w+') + elif output == 'copy': # yes, just go clobber any existing .cp + outfile = file (infilename[:-3]+ '_cp.py', 'w+') + else: + outfile = sys.stdout + + outfile.write(s) + + +if __name__ == '__main__': + main() --- /dev/null +++ b/_py/code/_assertionnew.py @@ -0,0 +1,314 @@ +""" +Like _assertion.py but using builtin AST. It should replace _assertion.py +eventually. +""" + +import sys +import ast + +import py +from _py.code.assertion import _format_explanation, BuiltinAssertionError + + +class Failure(Exception): + """Error found while interpreting AST.""" + + def __init__(self, explanation=""): + self.cause = sys.exc_info() + self.explanation = explanation + + +def interpret(source, frame, should_fail=False): + mod = ast.parse(source) + visitor = DebugInterpreter(frame) + try: + visitor.visit(mod) + except Failure: + failure = sys.exc_info()[1] + return getfailure(failure) + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --nomagic)") + +def run(offending_line, frame=None): + if frame is None: + frame = py.code.Frame(sys._getframe(1)) + return interpret(offending_line, frame) + +def getfailure(failure): + explanation = _format_explanation(failure.explanation) + value = failure.cause[1] + if str(value): + lines = explanation.splitlines() + if not lines: + lines.append("") + lines[0] += " << {0}".format(value) + explanation = "\n".join(lines) + text = "{0}: {1}".format(failure.cause[0].__name__, explanation) + if text.startswith("AssertionError: assert "): + text = text[16:] + return text + + +operator_map = { + ast.BitOr : "|", + ast.BitXor : "^", + ast.BitAnd : "&", + ast.LShift : "<<", + ast.RShift : ">>", + ast.Add : "+", + ast.Sub : "-", + ast.Mult : "*", + ast.Div : "/", + ast.FloorDiv : "//", + ast.Mod : "%", + ast.Eq : "==", + ast.NotEq : "!=", + ast.Lt : "<", + ast.LtE : "<=", + ast.Gt : ">", + ast.GtE : ">=", + ast.Is : "is", + ast.IsNot : "is not", + ast.In : "in", + ast.NotIn : "not in" +} + +unary_map = { + ast.Not : "not {0}", + ast.Invert : "~{0}", + ast.USub : "-{0}", + ast.UAdd : "+{0}" +} + + +class DebugInterpreter(ast.NodeVisitor): + """Interpret AST nodes to gleam useful debugging information.""" + + def __init__(self, frame): + self.frame = frame + + def generic_visit(self, node): + # Fallback when we don't have a special implementation. + if isinstance(node, ast.expr): + mod = ast.Expression(node) + co = self._compile(mod) + try: + result = self.frame.eval(co) + except Exception: + raise Failure() + explanation = self.frame.repr(result) + return explanation, result + elif isinstance(node, ast.stmt): + mod = ast.Module([node]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co) + except Exception: + raise Failure() + return None, None + else: + raise AssertionError("can't handle {0}".format(node)) + + def _compile(self, source, mode="eval"): + return compile(source, "", mode) + + def visit_Expr(self, expr): + return self.visit(expr.value) + + def visit_Module(self, mod): + for stmt in mod.body: + self.visit(stmt) + + def visit_Name(self, name): + explanation, result = self.generic_visit(name) + # See if the name is local. + source = "{0!r} in locals() is not globals()".format(name.id) + co = self._compile(source) + try: + local = self.frame.eval(co) + except Exception: + # have to assume it isn't + local = False + if not local: + return name.id, result + return explanation, result + + def visit_Compare(self, comp): + left = comp.left + left_explanation, left_result = self.visit(left) + got_result = False + for op, next_op in zip(comp.ops, comp.comparators): + if got_result and not result: + break + next_explanation, next_result = self.visit(next_op) + op_symbol = operator_map[op.__class__] + explanation = "{0} {1} {2}".format(left_explanation, op_symbol, + next_explanation) + source = "__exprinfo_left {0} __exprinfo_right".format(op_symbol) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=next_result) + except Exception: + raise Failure(explanation) + else: + got_result = True + left_explanation, left_result = next_explanation, next_result + return explanation, result + + def visit_BoolOp(self, boolop): + is_or = isinstance(boolop.op, ast.Or) + explanations = [] + for operand in boolop.values: + explanation, result = self.visit(operand) + explanations.append(explanation) + if result == is_or: + break + name = is_or and " or " or " and " + explanation = "(" + name.join(explanations) + ")" + return explanation, result + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_explanation, operand_result = self.visit(unary.operand) + explanation = pattern.format(operand_explanation) + co = self._compile(pattern.format("__exprinfo_expr")) + try: + result = self.frame.eval(co, __exprinfo_expr=operand_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_BinOp(self, binop): + left_explanation, left_result = self.visit(binop.left) + right_explanation, right_result = self.visit(binop.right) + symbol = operator_map[binop.op.__class__] + explanation = "({0} {1} {2})".format(left_explanation, symbol, + right_explanation) + source = "__exprinfo_left {0} __exprinfo_right".format(symbol) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=right_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_Call(self, call): + func_explanation, func = self.visit(call.func) + arg_explanations = [] + ns = {"__exprinfo_func" : func} + arguments = [] + for arg in call.args: + arg_explanation, arg_result = self.visit(arg) + arg_name = "__exprinfo_{0}".format(len(ns)) + ns[arg_name] = arg_result + arguments.append(arg_name) + arg_explanations.append(arg_explanation) + for keyword in call.keywords: + arg_explanation, arg_result = self.visit(keyword.value) + arg_name = "__exprinfo_{0}".format(len(ns)) + ns[arg_name] = arg_result + keyword_source = "{0}={{0}}".format(keyword.id) + arguments.append(keyword_source.format(arg_name)) + arg_explanations.append(keyword_source.format(arg_explanation)) + if call.starargs: + arg_explanation, arg_result = self.visit(call.starargs) + arg_name = "__exprinfo_star" + ns[arg_name] = arg_result + arguments.append("*{0}".format(arg_name)) + arg_explanations.append("*{0}".format(arg_explanation)) + if call.kwargs: + arg_explanation, arg_result = self.visit(call.kwargs) + arg_name = "__exprinfo_kwds" + ns[arg_name] = arg_result + arguments.append("**{0}".format(arg_name)) + arg_explanations.append("**{0}".format(arg_explanation)) + args_explained = ", ".join(arg_explanations) + explanation = "{0}({1})".format(func_explanation, args_explained) + args = ", ".join(arguments) + source = "__exprinfo_func({0})".format(args) + co = self._compile(source) + try: + result = self.frame.eval(co, **ns) + except Exception: + raise Failure(explanation) + # Only show result explanation if it's not a builtin call or returns a + # bool. + if not isinstance(call.func, ast.Name) or \ + not self._is_builtin_name(call.func): + source = "isinstance(__exprinfo_value, bool)" + co = self._compile(source) + try: + is_bool = self.frame.eval(co, __exprinfo_value=result) + except Exception: + is_bool = False + if not is_bool: + pattern = "{0}\n{{{0} = {1}\n}}" + rep = self.frame.repr(result) + explanation = pattern.format(rep, explanation) + return explanation, result + + def _is_builtin_name(self, name): + pattern = "{0!r} not in globals() and {0!r} not in locals()" + source = pattern.format(name.id) + co = self._compile(source) + try: + return self.frame.eval(co) + except Exception: + return False + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + source_explanation, source_result = self.visit(attr.value) + explanation = "{0}.{1}".format(source_explanation, attr.attr) + source = "__exprinfo_expr.{0}".format(attr.attr) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + raise Failure(explanation) + # Check if the attr is from an instance. + source = "{0!r} in getattr(__exprinfo_expr, '__dict__', {{}})" + source = source.format(attr.attr) + co = self._compile(source) + try: + from_instance = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + from_instance = True + if from_instance: + rep = self.frame.repr(result) + pattern = "{0}\n{{{0} = {1}\n}}" + explanation = pattern.format(rep, explanation) + return explanation, result + + def visit_Assert(self, assrt): + test_explanation, test_result = self.visit(assrt.test) + if test_explanation.startswith("False\n{False =") and \ + test_explanation.endswith("\n"): + test_explanation = test_explanation[15:-2] + explanation = "assert {0}".format(test_explanation) + if not test_result: + try: + raise BuiltinAssertionError + except Exception: + raise Failure(explanation) + return explanation, test_result + + def visit_Assign(self, assign): + value_explanation, value_result = self.visit(assign.value) + explanation = "... = {0}".format(value_explanation) + name = ast.Name("__exprinfo_expr", ast.Load(), assign.value.lineno, + assign.value.col_offset) + new_assign = ast.Assign(assign.targets, name, assign.lineno, + assign.col_offset) + mod = ast.Module([new_assign]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co, __exprinfo_expr=value_result) + except Exception: + raise Failure(explanation) + return explanation, value_result --- /dev/null +++ b/_py/compat/dep_textwrap.py @@ -0,0 +1,4 @@ +import py + +py.log._apiwarn("1.1", "py.compat.textwrap deprecated, use standard library version.", stacklevel="initpkg") +textwrap = py.std.textwrap --- /dev/null +++ b/_py/builtin/builtin24.py @@ -0,0 +1,71 @@ +try: + reversed = reversed +except NameError: + def reversed(sequence): + """reversed(sequence) -> reverse iterator over values of the sequence + + Return a reverse iterator + """ + if hasattr(sequence, '__reversed__'): + return sequence.__reversed__() + if not hasattr(sequence, '__getitem__'): + raise TypeError("argument to reversed() must be a sequence") + return reversed_iterator(sequence) + + class reversed_iterator(object): + + def __init__(self, seq): + self.seq = seq + self.remaining = len(seq) + + def __iter__(self): + return self + + def next(self): + i = self.remaining + if i > 0: + i -= 1 + item = self.seq[i] + self.remaining = i + return item + raise StopIteration + + def __length_hint__(self): + return self.remaining + +try: + sorted = sorted +except NameError: + builtin_cmp = cmp # need to use cmp as keyword arg + + def sorted(iterable, cmp=None, key=None, reverse=0): + use_cmp = None + if key is not None: + if cmp is None: + def use_cmp(x, y): + return builtin_cmp(x[0], y[0]) + else: + def use_cmp(x, y): + return cmp(x[0], y[0]) + l = [(key(element), element) for element in iterable] + else: + if cmp is not None: + use_cmp = cmp + l = list(iterable) + if use_cmp is not None: + l.sort(use_cmp) + else: + l.sort() + if reverse: + l.reverse() + if key is not None: + return [element for (_, element) in l] + return l + +try: + set, frozenset = set, frozenset +except NameError: + from sets import set, frozenset + +# pass through +enumerate = enumerate --- /dev/null +++ b/_py/process/killproc.py @@ -0,0 +1,23 @@ +import py +import os, sys + +if sys.platform == "win32": + try: + import ctypes + except ImportError: + def dokill(pid): + py.process.cmdexec("taskkill /F /PID %d" %(pid,)) + else: + def dokill(pid): + PROCESS_TERMINATE = 1 + handle = ctypes.windll.kernel32.OpenProcess( + PROCESS_TERMINATE, False, pid) + ctypes.windll.kernel32.TerminateProcess(handle, -1) + ctypes.windll.kernel32.CloseHandle(handle) +else: + def dokill(pid): + os.kill(pid, 15) + +def kill(pid): + """ kill process by id. """ + dokill(pid) --- /dev/null +++ b/_py/process/cmdexec.py @@ -0,0 +1,177 @@ +""" + +module defining basic hook for executing commands +in a - as much as possible - platform independent way. + +Current list: + + exec_cmd(cmd) executes the given command and returns output + or ExecutionFailed exception (if exit status!=0) + +""" + +import os, sys +import py +from subprocess import Popen, PIPE + +#----------------------------------------------------------- +# posix external command execution +#----------------------------------------------------------- +def posix_exec_cmd(cmd): + """ return output of executing 'cmd'. + + raise ExecutionFailed exeception if the command failed. + the exception will provide an 'err' attribute containing + the error-output from the command. + """ + #__tracebackhide__ = True + + import errno + + child = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True) + stdin, stdout, stderr = child.stdin, child.stdout, child.stderr + + # XXX sometimes we get a blocked r.read() call (see below) + # although select told us there is something to read. + # only the next three lines appear to prevent + # the read call from blocking infinitely. + import fcntl + def set_non_block(fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + flags = flags | os.O_NONBLOCK + fcntl.fcntl(fd, fcntl.F_SETFL, flags) + set_non_block(stdout.fileno()) + set_non_block(stderr.fileno()) + #fcntl.fcntl(stdout, fcntl.F_SETFL, os.O_NONBLOCK) + #fcntl.fcntl(stderr, fcntl.F_SETFL, os.O_NONBLOCK) + + import select + out, err = [], [] + while 1: + r_list = [x for x in [stdout, stderr] if x and not x.closed] + if not r_list: + break + try: + r_list = select.select(r_list, [], [])[0] + except (select.error, IOError): + se = sys.exc_info()[1] + if se.args[0] == errno.EINTR: + continue + else: + raise + for r in r_list: + try: + data = r.read() # XXX see XXX above + except IOError: + io = sys.exc_info()[1] + if io.args[0] == errno.EAGAIN: + continue + # Connection Lost + raise + except OSError: + ose = sys.exc_info()[1] + if ose.errno == errno.EPIPE: + # Connection Lost + raise + if ose.errno == errno.EAGAIN: # MacOS-X does this + continue + raise + + if not data: + r.close() + continue + if r is stdout: + out.append(data) + else: + err.append(data) + pid, systemstatus = os.waitpid(child.pid, 0) + if pid != child.pid: + raise ExecutionFailed("child process disappeared during: "+ cmd) + if systemstatus: + if os.WIFSIGNALED(systemstatus): + status = os.WTERMSIG(systemstatus) + 128 + else: + status = os.WEXITSTATUS(systemstatus) + raise ExecutionFailed(status, systemstatus, cmd, + joiner(out), joiner(err)) + return joiner(out) + +def joiner(out): + encoding = sys.getdefaultencoding() + return "".join([py.builtin._totext(x, encoding) for x in out]) + +#----------------------------------------------------------- +# simple win32 external command execution +#----------------------------------------------------------- +def win32_exec_cmd(cmd): + """ return output of executing 'cmd'. + + raise ExecutionFailed exeception if the command failed. + the exception will provide an 'err' attribute containing + the error-output from the command. + + Note that this method can currently deadlock because + we don't have WaitForMultipleObjects in the std-python api. + + Further note that the rules for quoting are very special + under Windows. Do a HELP CMD in a shell, and tell me if + you understand this. For now, I try to do a fix. + """ + #print "*****", cmd + + # the following quoting is only valid for CMD.EXE, not COMMAND.COM + cmd_quoting = True + try: + if os.environ['COMSPEC'].upper().endswith('COMMAND.COM'): + cmd_quoting = False + except KeyError: + pass + if cmd_quoting: + if '"' in cmd and not cmd.startswith('""'): + cmd = '"%s"' % cmd + + return popen3_exec_cmd(cmd) + +def popen3_exec_cmd(cmd): + stdin, stdout, stderr = os.popen3(cmd) + out = stdout.read() + err = stderr.read() + stdout.close() + stderr.close() + status = stdin.close() + if status: + raise ExecutionFailed(status, status, cmd, out, err) + return out + +def pypy_exec_cmd(cmd): + return popen3_exec_cmd(cmd) + +class ExecutionFailed(py.error.Error): + def __init__(self, status, systemstatus, cmd, out, err): + Exception.__init__(self) + self.status = status + self.systemstatus = systemstatus + self.cmd = cmd + self.err = err + self.out = out + + def __str__(self): + return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err) +# +# choose correct platform-version +# + +if sys.platform == 'win32': + cmdexec = win32_exec_cmd +elif hasattr(sys, 'pypy') or hasattr(sys, 'pypy_objspaceclass'): + cmdexec = popen3_exec_cmd +else: + cmdexec = posix_exec_cmd + +# export the exception under the name 'py.process.cmdexec.Error' +cmdexec.Error = ExecutionFailed +try: + ExecutionFailed.__module__ = 'py.process.cmdexec' + ExecutionFailed.__name__ = 'Error' +except (AttributeError, TypeError): + pass --- /dev/null +++ b/_py/code/code.py @@ -0,0 +1,764 @@ +import py +import sys + +builtin_repr = repr + +repr = py.builtin._tryimport('repr', 'reprlib') + +class Code(object): + """ wrapper around Python code objects """ + def __init__(self, rawcode): + rawcode = py.code.getrawcode(rawcode) + self.raw = rawcode + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" %(rawcode,)) + + def __eq__(self, other): + return self.raw == other.raw + + def __ne__(self, other): + return not self == other + + def new(self, rec=False, **kwargs): + """ return new code object with modified attributes. + if rec-cursive is true then dive into code + objects contained in co_consts. + """ + names = [x for x in dir(self.raw) if x[:3] == 'co_'] + for name in kwargs: + if name not in names: + raise TypeError("unknown code attribute: %r" %(name, )) + if rec and hasattr(self.raw, 'co_consts'): # jython + newconstlist = [] + co = self.raw + cotype = type(co) + for c in co.co_consts: + if isinstance(c, cotype): + c = self.__class__(c).new(rec=True, **kwargs) + newconstlist.append(c) + return self.new(rec=False, co_consts=tuple(newconstlist), **kwargs) + for name in names: + if name not in kwargs: + kwargs[name] = getattr(self.raw, name) + arglist = [ + kwargs['co_argcount'], + kwargs['co_nlocals'], + kwargs.get('co_stacksize', 0), # jython + kwargs.get('co_flags', 0), # jython + kwargs.get('co_code', ''), # jython + kwargs.get('co_consts', ()), # jython + kwargs.get('co_names', []), # + kwargs['co_varnames'], + kwargs['co_filename'], + kwargs['co_name'], + kwargs['co_firstlineno'], + kwargs.get('co_lnotab', ''), #jython + kwargs.get('co_freevars', None), #jython + kwargs.get('co_cellvars', None), # jython + ] + if sys.version_info >= (3,0): + arglist.insert(1, kwargs['co_kwonlyargcount']) + return self.raw.__class__(*arglist) + else: + return py.std.new.code(*arglist) + + def path(self): + """ return a py.path.local object pointing to the source code """ + fn = self.raw.co_filename + try: + return fn.__path__ + except AttributeError: + p = py.path.local(self.raw.co_filename) + if not p.check(file=1): + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + return p + + path = property(path, None, None, "path of this code object") + + def fullsource(self): + """ return a py.code.Source object for the full source file of the code + """ + from _py.code import source + full, _ = source.findsource(self.raw) + return full + fullsource = property(fullsource, None, None, + "full source containing this code object") + + def source(self): + """ return a py.code.Source object for the code object's source only + """ + # return source only for that part of code + return py.code.Source(self.raw) + + def getargs(self): + """ return a tuple with the argument names for the code object + """ + # handfull shortcut for getting args + raw = self.raw + return raw.co_varnames[:raw.co_argcount] + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.code = py.code.Code(frame.f_code) + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + + def statement(self): + if self.code.fullsource is None: + return py.code.Source("") + return self.code.fullsource.getstatement(self.lineno) + statement = property(statement, None, None, + "statement this frame is at") + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + py.builtin.exec_(code, self.f_globals, f_locals ) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return safe_repr(object) + + def is_true(self, object): + return object + + def getargs(self): + """ return a list of tuples (name, value) for all arguments + """ + retval = [] + for arg in self.code.getargs(): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + +class TracebackEntry(object): + """ a single entry in a traceback """ + + exprinfo = None + + def __init__(self, rawentry): + self._rawentry = rawentry + self.frame = py.code.Frame(rawentry.tb_frame) + # Ugh. 2.4 and 2.5 differs here when encountering + # multi-line statements. Not sure about the solution, but + # should be portable + self.lineno = rawentry.tb_lineno - 1 + self.relline = self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" %(self.frame.code.path, self.lineno+1) + + def statement(self): + """ return a py.code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + statement = property(statement, None, None, + "statement of this traceback entry.") + + def path(self): + return self.frame.code.path + path = property(path, None, None, "path to the full source code") + + def getlocals(self): + return self.frame.f_locals + locals = property(getlocals, None, None, "locals of underlaying frame") + + def reinterpret(self): + """Reinterpret the failing statement and returns a detailed information + about what operations are performed.""" + if self.exprinfo is None: + from _py.code import assertion + source = str(self.statement).strip() + x = assertion.interpret(source, self.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + self.exprinfo = x + return self.exprinfo + + def getfirstlinesource(self): + return self.frame.code.firstlineno + + def getsource(self): + """ return failing source code. """ + source = self.frame.code.fullsource + if source is None: + return None + start = self.getfirstlinesource() + end = self.lineno + try: + _, end = source.getstatementrange(end) + except IndexError: + end = self.lineno + 1 + # heuristic to stop displaying source on e.g. + # if something: # assume this causes a NameError + # # _this_ lines and the one + # below we don't want from entry.getsource() + for i in range(self.lineno, end): + if source[i].rstrip().endswith(':'): + end = i + 1 + break + return source[start:end] + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + mostly for internal use + """ + try: + return self.frame.eval("__tracebackhide__") + except (SystemExit, KeyboardInterrupt): + raise + except: + return False + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = '???' + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: + line = "???" + return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + + def name(self): + return self.frame.code.raw.co_name + name = property(name, None, None, "co_name of underlaying code") + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + Entry = TracebackEntry + def __init__(self, tb): + """ initialize from given python traceback object. """ + if hasattr(tb, 'tb_next'): + def f(cur): + while cur is not None: + yield self.Entry(cur) + cur = cur.tb_next + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ((path is None or codepath == path) and + (excludepath is None or (hasattr(codepath, 'relto') and + not codepath.relto(excludepath))) and + (lineno is None or x.lineno == lineno) and + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + return Traceback(x._rawentry) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackItem + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackItems which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self)) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + tb = self.filter() + if not tb: + tb = self + return tb[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackItem where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + key = entry.frame.code.path, entry.lineno + #print "checking for recursion at", key + l = cache.setdefault(key, []) + if l: + f = entry.frame + loc = f.f_locals + for otherloc in l: + if f.is_true(f.eval(co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): + return i + l.append(entry.frame.f_locals) + return None + +co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', + '?', 'eval') + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + _striptext = '' + def __init__(self, tup=None, exprinfo=None): + # NB. all attributes are private! Subclasses or other + # ExceptionInfo-like classes may have different attributes. + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], py.code._AssertionError): + exprinfo = getattr(tup[1], 'msg', None) + if exprinfo is None: + exprinfo = str(tup[1]) + if exprinfo and exprinfo.startswith('assert '): + self._striptext = 'AssertionError: ' + self._excinfo = tup + self.type, self.value, tb = self._excinfo + self.typename = self.type.__name__ + self.traceback = py.code.Traceback(tb) + + def __repr__(self): + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + py.code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = py.std.traceback.format_exception_only(self.type, self.value) + text = ''.join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext):] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.path, entry.lineno + reprcrash = ReprFileLocation(path, lineno+1, exconly) + return reprcrash + + def getrepr(self, showlocals=False, style="long", + abspath=False, tbfilter=True, funcargs=False): + """ return str()able representation of this exception info. + showlocals: show locals per traceback entry + style: long|short|no traceback style + tbfilter: hide entries (where __tracebackhide__ is true) + """ + fmt = FormattedExcinfo(showlocals=showlocals, style=style, + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): + self.showlocals = showlocals + self.style = style + self.tbfilter = tbfilter + self.funcargs = funcargs + self.abspath = abspath + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source)-1)) + except KeyboardInterrupt: + raise + except: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource() + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return safe_repr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None): + """ return formatted and marked up source lines. """ + lines = [] + if source is None: + source = py.code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + for i in range(len(source)): + if i == line_index: + prefix = self.flow_marker + " " + else: + prefix = " " + line = prefix + source[i] + lines.append(line) + if excinfo is not None: + indent = self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split('\n') + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = list(locals) + keys.sort() + for name in keys: + value = locals[name] + if name == '__builtins__': + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only repr.Repr in + # disguise, so is very configurable. + str_repr = self._saferepr(value) + #if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" %(name, str_repr)) + #else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # py.std.pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + # excinfo is not None if this is the last tb entry + source = self._getentrysource(entry) + if source is None: + source = py.code.Source("???") + line_index = 0 + else: + line_index = entry.lineno - entry.getfirstlinesource() + + lines = [] + if self.style == "long": + reprargs = self.repr_args(entry) + lines.extend(self.get_source(source, line_index, excinfo)) + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr) + else: + if self.style == "short": + line = source[line_index].lstrip() + lines.append(' File "%s", line %d, in %s' % ( + entry.path.basename, entry.lineno+1, entry.name)) + lines.append(" " + line) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None) + + def _makepath(self, path): + if not self.abspath: + np = py.path.local().bestrelpath(path) + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + recursionindex = None + if excinfo.errisinstance(RuntimeError): + recursionindex = traceback.recursionindex() + last = traceback[-1] + entries = [] + extraline = None + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + if index == recursionindex: + extraline = "!!! Recursion detected (same locals & position)" + break + return ReprTraceback(entries, extraline, style=self.style) + + def repr_excinfo(self, excinfo): + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + return ReprExceptionInfo(reprtraceback, reprcrash) + +class TerminalRepr: + def __str__(self): + tw = py.io.TerminalWriter(stringio=True) + self.toterminal(tw) + return tw.stringio.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" %(self.__class__, id(self)) + +class ReprExceptionInfo(TerminalRepr): + def __init__(self, reprtraceback, reprcrash): + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + sepok = False + for entry in self.reprentries: + if self.style == "long": + if sepok: + tw.sep(self.entrysep) + tw.line("") + sepok = True + entry.toterminal(tw) + if self.extraline: + tw.line(self.extraline) + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + + def toterminal(self, tw): + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + #tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), + self.reprlocals, + self.reprfileloc) + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" %(name, value) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + + +class SafeRepr(repr.Repr): + """ subclass of repr.Repr that limits the resulting size of repr() + and includes information on exceptions raised during the call. + """ + def __init__(self, *args, **kwargs): + repr.Repr.__init__(self, *args, **kwargs) + self.maxstring = 240 # 3 * 80 chars + self.maxother = 160 # 2 * 80 chars + + def repr(self, x): + return self._callhelper(repr.Repr.repr, self, x) + + def repr_instance(self, x, level): + return self._callhelper(builtin_repr, x) + + def _callhelper(self, call, x, *args): + try: + # Try the vanilla repr and make sure that the result is a string + s = call(x, *args) + except (KeyboardInterrupt, MemoryError, SystemExit): + raise + except: + cls, e, tb = sys.exc_info() + try: + exc_name = cls.__name__ + except: + exc_name = 'unknown' + try: + exc_info = str(e) + except: + exc_info = 'unknown' + return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( + exc_name, exc_info, x.__class__.__name__, id(x)) + else: + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + +safe_repr = SafeRepr().repr + +oldbuiltins = {} + +def patch_builtins(assertion=True, compile=True): + """ put compile and AssertionError builtins to Python's builtins. """ + if assertion: + from _py.code import assertion + l = oldbuiltins.setdefault('AssertionError', []) + l.append(py.builtin.builtins.AssertionError) + py.builtin.builtins.AssertionError = assertion.AssertionError + if compile: + l = oldbuiltins.setdefault('compile', []) + l.append(py.builtin.builtins.compile) + py.builtin.builtins.compile = py.code.compile + +def unpatch_builtins(assertion=True, compile=True): + """ remove compile and AssertionError builtins from Python builtins. """ + if assertion: + py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() + if compile: + py.builtin.builtins.compile = oldbuiltins['compile'].pop() + +def getrawcode(obj): + """ return code object for given function. """ + obj = getattr(obj, 'im_func', obj) + obj = getattr(obj, 'func_code', obj) + obj = getattr(obj, 'f_code', obj) + obj = getattr(obj, '__code__', obj) + return obj + --- /dev/null +++ b/_py/path/__init__.py @@ -0,0 +1,1 @@ +""" unified file system api """ --- /dev/null +++ b/_py/builtin/builtin31.py @@ -0,0 +1,117 @@ +import py +import sys + +if sys.version_info >= (3, 0): + exec ("print_ = print ; exec_=exec") + import builtins + + # some backward compatibility helpers + _basestring = str + def _totext(obj, encoding): + if isinstance(obj, bytes): + obj = obj.decode(encoding) + elif not isinstance(obj, str): + obj = str(obj) + return obj + + def _isbytes(x): + return isinstance(x, bytes) + def _istext(x): + return isinstance(x, str) + + def _getimself(function): + return getattr(function, '__self__', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def execfile(fn, globs=None, locs=None): + if globs is None: + back = sys._getframe(1) + globs = back.f_globals + locs = back.f_locals + del back + elif locs is None: + locs = globs + fp = open(fn, "rb") + try: + source = fp.read() + finally: + fp.close() + co = compile(source, fn, "exec", dont_inherit=True) + exec_(co, globs, locs) + + def callable(obj): + return hasattr(obj, "__call__") + +else: + import __builtin__ as builtins + _totext = unicode + _basestring = basestring + execfile = execfile + callable = callable + def _isbytes(x): + return isinstance(x, str) + def _istext(x): + return isinstance(x, unicode) + + def _getimself(function): + return getattr(function, 'im_self', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def print_(*args, **kwargs): + """ minimal backport of py3k print statement. """ + sep = ' ' + if 'sep' in kwargs: + sep = kwargs.pop('sep') + end = '\n' + if 'end' in kwargs: + end = kwargs.pop('end') + file = 'file' in kwargs and kwargs.pop('file') or sys.stdout + if kwargs: + args = ", ".join([str(x) for x in kwargs]) + raise TypeError("invalid keyword arguments: %s" % args) + at_start = True + for x in args: + if not at_start: + file.write(sep) + file.write(str(x)) + at_start = False + file.write(end) + + def exec_(obj, globals=None, locals=None): + """ minimal backport of py3k exec statement. """ + if globals is None: + frame = sys._getframe(1) + globals = frame.f_globals + if locals is None: + locals = frame.f_locals + elif locals is None: + locals = globals + exec2(obj, globals, locals) + +if sys.version_info >= (3,0): + exec (""" +def _reraise(cls, val, tb): + assert hasattr(val, '__traceback__') + raise val +""") +else: + exec (""" +def _reraise(cls, val, tb): + raise cls, val, tb +def exec2(obj, globals, locals): + exec obj in globals, locals +""") + +def _tryimport(*names): + """ return the first successfully imported module. """ + assert names + for name in names: + try: + return __import__(name, None, None, '__doc__') + except ImportError: + excinfo = sys.exc_info() + py.builtin._reraise(*excinfo) --- /dev/null +++ b/_py/cmdline/pytest.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python +import py + +def main(): + py.test.cmdline.main() --- /dev/null +++ b/_py/log/__init__.py @@ -0,0 +1,2 @@ +""" logging API ('producers' and 'consumers' connected via keywords) """ + --- /dev/null +++ b/_py/process/__init__.py @@ -0,0 +1,1 @@ +""" high-level sub-process handling """ --- /dev/null +++ b/_py/code/_assertionold.py @@ -0,0 +1,558 @@ +import py +import sys, inspect +from compiler import parse, ast, pycodegen +from _py.code.assertion import BuiltinAssertionError, _format_explanation + +passthroughex = (KeyboardInterrupt, SystemExit, MemoryError) + +class Failure: + def __init__(self, node): + self.exc, self.value, self.tb = sys.exc_info() + self.node = node + +class View(object): + """View base class. + + If C is a subclass of View, then C(x) creates a proxy object around + the object x. The actual class of the proxy is not C in general, + but a *subclass* of C determined by the rules below. To avoid confusion + we call view class the class of the proxy (a subclass of C, so of View) + and object class the class of x. + + Attributes and methods not found in the proxy are automatically read on x. + Other operations like setting attributes are performed on the proxy, as + determined by its view class. The object x is available from the proxy + as its __obj__ attribute. + + The view class selection is determined by the __view__ tuples and the + optional __viewkey__ method. By default, the selected view class is the + most specific subclass of C whose __view__ mentions the class of x. + If no such subclass is found, the search proceeds with the parent + object classes. For example, C(True) will first look for a subclass + of C with __view__ = (..., bool, ...) and only if it doesn't find any + look for one with __view__ = (..., int, ...), and then ..., object,... + If everything fails the class C itself is considered to be the default. + + Alternatively, the view class selection can be driven by another aspect + of the object x, instead of the class of x, by overriding __viewkey__. + See last example at the end of this module. + """ + + _viewcache = {} + __view__ = () + + def __new__(rootclass, obj, *args, **kwds): + self = object.__new__(rootclass) + self.__obj__ = obj + self.__rootclass__ = rootclass + key = self.__viewkey__() + try: + self.__class__ = self._viewcache[key] + except KeyError: + self.__class__ = self._selectsubclass(key) + return self + + def __getattr__(self, attr): + # attributes not found in the normal hierarchy rooted on View + # are looked up in the object's real class + return getattr(self.__obj__, attr) + + def __viewkey__(self): + return self.__obj__.__class__ + + def __matchkey__(self, key, subclasses): + if inspect.isclass(key): + keys = inspect.getmro(key) + else: + keys = [key] + for key in keys: + result = [C for C in subclasses if key in C.__view__] + if result: + return result + return [] + + def _selectsubclass(self, key): + subclasses = list(enumsubclasses(self.__rootclass__)) + for C in subclasses: + if not isinstance(C.__view__, tuple): + C.__view__ = (C.__view__,) + choices = self.__matchkey__(key, subclasses) + if not choices: + return self.__rootclass__ + elif len(choices) == 1: + return choices[0] + else: + # combine the multiple choices + return type('?', tuple(choices), {}) + + def __repr__(self): + return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) + + +def enumsubclasses(cls): + for subcls in cls.__subclasses__(): + for subsubclass in enumsubclasses(subcls): + yield subsubclass + yield cls + + +class Interpretable(View): + """A parse tree node with a few extra methods.""" + explanation = None + + def is_builtin(self, frame): + return False + + def eval(self, frame): + # fall-back for unknown expression nodes + try: + expr = ast.Expression(self.__obj__) + expr.filename = '' + self.__obj__.filename = '' + co = pycodegen.ExpressionCodeGenerator(expr).getCode() + result = frame.eval(co) + except passthroughex: + raise + except: + raise Failure(self) + self.result = result + self.explanation = self.explanation or frame.repr(self.result) + + def run(self, frame): + # fall-back for unknown statement nodes + try: + expr = ast.Module(None, ast.Stmt([self.__obj__])) + expr.filename = '' + co = pycodegen.ModuleCodeGenerator(expr).getCode() + frame.exec_(co) + except passthroughex: + raise + except: + raise Failure(self) + + def nice_explanation(self): + return _format_explanation(self.explanation) + + +class Name(Interpretable): + __view__ = ast.Name + + def is_local(self, frame): + co = compile('%r in locals() is not globals()' % self.name, '?', 'eval') + try: + return frame.is_true(frame.eval(co)) + except passthroughex: + raise + except: + return False + + def is_global(self, frame): + co = compile('%r in globals()' % self.name, '?', 'eval') + try: + return frame.is_true(frame.eval(co)) + except passthroughex: + raise + except: + return False + + def is_builtin(self, frame): + co = compile('%r not in locals() and %r not in globals()' % ( + self.name, self.name), '?', 'eval') + try: + return frame.is_true(frame.eval(co)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + super(Name, self).eval(frame) + if not self.is_local(frame): + self.explanation = self.name + +class Compare(Interpretable): + __view__ = ast.Compare + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + for operation, expr2 in self.ops: + if hasattr(self, 'result'): + # shortcutting in chained expressions + if not frame.is_true(self.result): + break + expr2 = Interpretable(expr2) + expr2.eval(frame) + self.explanation = "%s %s %s" % ( + expr.explanation, operation, expr2.explanation) + co = compile("__exprinfo_left %s __exprinfo_right" % operation, + '?', 'eval') + try: + self.result = frame.eval(co, __exprinfo_left=expr.result, + __exprinfo_right=expr2.result) + except passthroughex: + raise + except: + raise Failure(self) + expr = expr2 + +class And(Interpretable): + __view__ = ast.And + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if not frame.is_true(expr.result): + break + self.explanation = '(' + ' and '.join(explanations) + ')' + +class Or(Interpretable): + __view__ = ast.Or + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if frame.is_true(expr.result): + break + self.explanation = '(' + ' or '.join(explanations) + ')' + + +# == Unary operations == +keepalive = [] +for astclass, astpattern in { + ast.Not : 'not __exprinfo_expr', + ast.Invert : '(~__exprinfo_expr)', + }.items(): + + class UnaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern, + co=compile(astpattern, '?', 'eval')): + expr = Interpretable(self.expr) + expr.eval(frame) + self.explanation = astpattern.replace('__exprinfo_expr', + expr.explanation) + try: + self.result = frame.eval(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(UnaryArith) + +# == Binary operations == +for astclass, astpattern in { + ast.Add : '(__exprinfo_left + __exprinfo_right)', + ast.Sub : '(__exprinfo_left - __exprinfo_right)', + ast.Mul : '(__exprinfo_left * __exprinfo_right)', + ast.Div : '(__exprinfo_left / __exprinfo_right)', + ast.Mod : '(__exprinfo_left % __exprinfo_right)', + ast.Power : '(__exprinfo_left ** __exprinfo_right)', + }.items(): + + class BinaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern, + co=compile(astpattern, '?', 'eval')): + left = Interpretable(self.left) + left.eval(frame) + right = Interpretable(self.right) + right.eval(frame) + self.explanation = (astpattern + .replace('__exprinfo_left', left .explanation) + .replace('__exprinfo_right', right.explanation)) + try: + self.result = frame.eval(co, __exprinfo_left=left.result, + __exprinfo_right=right.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(BinaryArith) + + +class CallFunc(Interpretable): + __view__ = ast.CallFunc + + def is_bool(self, frame): + co = compile('isinstance(__exprinfo_value, bool)', '?', 'eval') + try: + return frame.is_true(frame.eval(co, __exprinfo_value=self.result)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + node = Interpretable(self.node) + node.eval(frame) + explanations = [] + vars = {'__exprinfo_fn': node.result} + source = '__exprinfo_fn(' + for a in self.args: + if isinstance(a, ast.Keyword): + keyword = a.name + a = a.expr + else: + keyword = None + a = Interpretable(a) + a.eval(frame) + argname = '__exprinfo_%d' % len(vars) + vars[argname] = a.result + if keyword is None: + source += argname + ',' + explanations.append(a.explanation) + else: + source += '%s=%s,' % (keyword, argname) + explanations.append('%s=%s' % (keyword, a.explanation)) + if self.star_args: + star_args = Interpretable(self.star_args) + star_args.eval(frame) + argname = '__exprinfo_star' + vars[argname] = star_args.result + source += '*' + argname + ',' + explanations.append('*' + star_args.explanation) + if self.dstar_args: + dstar_args = Interpretable(self.dstar_args) + dstar_args.eval(frame) + argname = '__exprinfo_kwds' + vars[argname] = dstar_args.result + source += '**' + argname + ',' + explanations.append('**' + dstar_args.explanation) + self.explanation = "%s(%s)" % ( + node.explanation, ', '.join(explanations)) + if source.endswith(','): + source = source[:-1] + source += ')' + co = compile(source, '?', 'eval') + try: + self.result = frame.eval(co, **vars) + except passthroughex: + raise + except: + raise Failure(self) + if not node.is_builtin(frame) or not self.is_bool(frame): + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +class Getattr(Interpretable): + __view__ = ast.Getattr + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + co = compile('__exprinfo_expr.%s' % self.attrname, '?', 'eval') + try: + self.result = frame.eval(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + self.explanation = '%s.%s' % (expr.explanation, self.attrname) + # if the attribute comes from the instance, its value is interesting + co = compile('hasattr(__exprinfo_expr, "__dict__") and ' + '%r in __exprinfo_expr.__dict__' % self.attrname, + '?', 'eval') + try: + from_instance = frame.is_true( + frame.eval(co, __exprinfo_expr=expr.result)) + except passthroughex: + raise + except: + from_instance = True + if from_instance: + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +# == Re-interpretation of full statements == + +class Assert(Interpretable): + __view__ = ast.Assert + + def run(self, frame): + test = Interpretable(self.test) + test.eval(frame) + # simplify 'assert False where False = ...' + if (test.explanation.startswith('False\n{False = ') and + test.explanation.endswith('\n}')): + test.explanation = test.explanation[15:-2] + # print the result as 'assert ' + self.result = test.result + self.explanation = 'assert ' + test.explanation + if not frame.is_true(test.result): + try: + raise BuiltinAssertionError + except passthroughex: + raise + except: + raise Failure(self) + +class Assign(Interpretable): + __view__ = ast.Assign + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = '... = ' + expr.explanation + # fall-back-run the rest of the assignment + ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) + mod = ast.Module(None, ast.Stmt([ass])) + mod.filename = '' + co = pycodegen.ModuleCodeGenerator(mod).getCode() + try: + frame.exec_(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + +class Discard(Interpretable): + __view__ = ast.Discard + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = expr.explanation + +class Stmt(Interpretable): + __view__ = ast.Stmt + + def run(self, frame): + for stmt in self.nodes: + stmt = Interpretable(stmt) + stmt.run(frame) + + +def report_failure(e): + explanation = e.node.nice_explanation() + if explanation: + explanation = ", in: " + explanation + else: + explanation = "" + sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) + +def check(s, frame=None): + if frame is None: + import sys + frame = sys._getframe(1) + frame = py.code.Frame(frame) + expr = parse(s, 'eval') + assert isinstance(expr, ast.Expression) + node = Interpretable(expr.node) + try: + node.eval(frame) + except passthroughex: + raise + except Failure: + e = sys.exc_info()[1] + report_failure(e) + else: + if not frame.is_true(node.result): + sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) + + +########################################################### +# API / Entry points +# ######################################################### + +def interpret(source, frame, should_fail=False): + module = Interpretable(parse(source, 'exec').node) + #print "got module", module + if isinstance(frame, py.std.types.FrameType): + frame = py.code.Frame(frame) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + return getfailure(e) + except passthroughex: + raise + except: + import traceback + traceback.print_exc() + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --nomagic)") + else: + return None + +def getmsg(excinfo): + if isinstance(excinfo, tuple): + excinfo = py.code.ExceptionInfo(excinfo) + #frame, line = gettbline(tb) + #frame = py.code.Frame(frame) + #return interpret(line, frame) + + tb = excinfo.traceback[-1] + source = str(tb.statement).strip() + x = interpret(source, tb.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + return x + +def getfailure(e): + explanation = e.node.nice_explanation() + if str(e.value): + lines = explanation.split('\n') + lines[0] += " << %s" % (e.value,) + explanation = '\n'.join(lines) + text = "%s: %s" % (e.exc.__name__, explanation) + if text.startswith('AssertionError: assert '): + text = text[16:] + return text + +def run(s, frame=None): + if frame is None: + import sys + frame = sys._getframe(1) + frame = py.code.Frame(frame) + module = Interpretable(parse(s, 'exec').node) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + report_failure(e) + + +if __name__ == '__main__': + # example: + def f(): + return 5 + def g(): + return 3 + def h(x): + return 'never' + check("f() * g() == 5") + check("not f()") + check("not (f() and g() or 0)") + check("f() == g()") + i = 4 + check("i == f()") + check("len(f()) == 0") + check("isinstance(2+3+4, float)") + + run("x = i") + check("x == 5") + + run("assert not f(), 'oops'") + run("a, b, c = 1, 2") + run("a, b, c = f()") + + check("max([f(),g()]) == 4") + check("'hello'[g()] == 'h'") + run("'guk%d' % h(f())") --- /dev/null +++ b/_py/cmdline/pylookup.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +"""\ +py.lookup [search_directory] SEARCH_STRING [options] + +Looks recursively at Python files for a SEARCH_STRING, starting from the +present working directory. Prints the line, with the filename and line-number +prepended.""" + +import sys, os +import py +from _py.io.terminalwriter import ansi_print, terminal_width +import re + +def rec(p): + return p.check(dotfile=0) + +parser = py.std.optparse.OptionParser(usage=__doc__) +parser.add_option("-i", "--ignore-case", action="store_true", dest="ignorecase", + help="ignore case distinctions") +parser.add_option("-C", "--context", action="store", type="int", dest="context", + default=0, help="How many lines of output to show") + +def find_indexes(search_line, string): + indexes = [] + before = 0 + while 1: + i = search_line.find(string, before) + if i == -1: + break + indexes.append(i) + before = i + len(string) + return indexes + +def main(): + (options, args) = parser.parse_args() + if len(args) == 2: + search_dir, string = args + search_dir = py.path.local(search_dir) + else: + search_dir = py.path.local() + string = args[0] + if options.ignorecase: + string = string.lower() + for x in search_dir.visit('*.py', rec): + # match filename directly + s = x.relto(search_dir) + if options.ignorecase: + s = s.lower() + if s.find(string) != -1: + sys.stdout.write("%s: filename matches %r" %(x, string) + "\n") + + try: + s = x.read() + except py.error.ENOENT: + pass # whatever, probably broken link (ie emacs lock) + searchs = s + if options.ignorecase: + searchs = s.lower() + if s.find(string) != -1: + lines = s.splitlines() + if options.ignorecase: + searchlines = s.lower().splitlines() + else: + searchlines = lines + for i, (line, searchline) in enumerate(zip(lines, searchlines)): + indexes = find_indexes(searchline, string) + if not indexes: + continue + if not options.context: + sys.stdout.write("%s:%d: " %(x.relto(search_dir), i+1)) + last_index = 0 + for index in indexes: + sys.stdout.write(line[last_index: index]) + ansi_print(line[index: index+len(string)], + file=sys.stdout, esc=31, newline=False) + last_index = index + len(string) + sys.stdout.write(line[last_index:] + "\n") + else: + context = (options.context)/2 + for count in range(max(0, i-context), min(len(lines) - 1, i+context+1)): + print("%s:%d: %s" %(x.relto(search_dir), count+1, lines[count].rstrip())) + print("-" * terminal_width) --- /dev/null +++ b/_py/_com.py @@ -0,0 +1,125 @@ +""" +py lib plugins and plugin call management +""" + +import py +import inspect + +__all__ = ['Registry', 'MultiCall', 'comregistry', 'HookRelay'] + +class MultiCall: + """ execute a call into multiple python functions/methods. """ + + def __init__(self, methods, kwargs, firstresult=False): + self.methods = methods[:] + self.kwargs = kwargs.copy() + self.kwargs['__multicall__'] = self + self.results = [] + self.firstresult = firstresult + + def __repr__(self): + status = "%d results, %d meths" % (len(self.results), len(self.methods)) + return "" %(status, self.kwargs) + + def execute(self): + while self.methods: + method = self.methods.pop() + kwargs = self.getkwargs(method) + res = method(**kwargs) + if res is not None: + self.results.append(res) + if self.firstresult: + return res + if not self.firstresult: + return self.results + + def getkwargs(self, method): + kwargs = {} + for argname in varnames(method): + try: + kwargs[argname] = self.kwargs[argname] + except KeyError: + pass # might be optional param + return kwargs + +def varnames(func): + ismethod = inspect.ismethod(func) + rawcode = py.code.getrawcode(func) + try: + return rawcode.co_varnames[ismethod:] + except AttributeError: + return () + +class Registry: + """ + Manage Plugins: register/unregister call calls to plugins. + """ + def __init__(self, plugins=None): + if plugins is None: + plugins = [] + self._plugins = plugins + + def register(self, plugin): + assert not isinstance(plugin, str) + assert not plugin in self._plugins + self._plugins.append(plugin) + + def unregister(self, plugin): + self._plugins.remove(plugin) + + def isregistered(self, plugin): + return plugin in self._plugins + + def __iter__(self): + return iter(self._plugins) + + def listattr(self, attrname, plugins=None, extra=(), reverse=False): + l = [] + if plugins is None: + plugins = self._plugins + candidates = list(plugins) + list(extra) + for plugin in candidates: + try: + l.append(getattr(plugin, attrname)) + except AttributeError: + continue + if reverse: + l.reverse() + return l + +class HookRelay: + def __init__(self, hookspecs, registry): + self._hookspecs = hookspecs + self._registry = registry + for name, method in vars(hookspecs).items(): + if name[:1] != "_": + setattr(self, name, self._makecall(name)) + + def _makecall(self, name, extralookup=None): + hookspecmethod = getattr(self._hookspecs, name) + firstresult = getattr(hookspecmethod, 'firstresult', False) + return HookCaller(self, name, firstresult=firstresult, + extralookup=extralookup) + + def _getmethods(self, name, extralookup=()): + return self._registry.listattr(name, extra=extralookup) + + def _performcall(self, name, multicall): + return multicall.execute() + +class HookCaller: + def __init__(self, hookrelay, name, firstresult, extralookup=None): + self.hookrelay = hookrelay + self.name = name + self.firstresult = firstresult + self.extralookup = extralookup and [extralookup] or () + + def __repr__(self): + return "" %(self.name,) + + def __call__(self, **kwargs): + methods = self.hookrelay._getmethods(self.name, self.extralookup) + mc = MultiCall(methods, kwargs, firstresult=self.firstresult) + return self.hookrelay._performcall(self.name, mc) + +comregistry = Registry([]) From commits-noreply at bitbucket.org Mon Oct 5 01:45:15 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 4 Oct 2009 23:45:15 +0000 (UTC) Subject: [py-svn] py-trunk commit 24cf00dd82ff: * don't add distributed command line options when 'execnet' is not Message-ID: <20091004234515.DDF909A7A3@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1254515362 -7200 # Node ID 24cf00dd82ff0ddb529b1545fbccd8d2c3e2ae95 # Parent b571b7e9a9b2f2f44d63a58360476de7868fd312 * don't add distributed command line options when 'execnet' is not installed, report a nice message. * fix tests and code to work with non-existing execnet * point execnet doc to the new package --- a/py/test/parseopt.py +++ b/py/test/parseopt.py @@ -24,12 +24,16 @@ class Parser: self._groups = [self._anonymous] self._processopt = processopt self._usage = usage + self.epilog = "" def processoption(self, option): if self._processopt: if option.dest: self._processopt(option) + def addnote(self, note): + self._notes.append(note) + def addgroup(self, name, description=""): for group in self._groups: if group.name == name: @@ -51,6 +55,7 @@ class Parser: def parse(self, args): optparser = optparse.OptionParser(usage=self._usage) # make sure anaonymous group is at the end + optparser.epilog = self.epilog groups = self._groups[1:] + [self._groups[0]] for group in groups: if group.options: --- /dev/null +++ b/testing/pytest/dist/conftest.py @@ -0,0 +1,4 @@ +try: + import execnet +except ImportError: + collect_ignore = ['.'] --- a/testing/pytest/plugin/test_pytest_pdb.py +++ b/testing/pytest/plugin/test_pytest_pdb.py @@ -44,7 +44,8 @@ class TestPDB: if child.isalive(): child.wait() - def test_incompatibility_messages(self, testdir): + def test_dist_incompatibility_messages(self, testdir): + py.test.importorskip("execnet") Error = py.test.config.Error py.test.raises(Error, "testdir.parseconfigure('--pdb', '--looponfail')") result = testdir.runpytest("--pdb", "-n", "3") --- a/testing/pytest/test_config.py +++ b/testing/pytest/test_config.py @@ -218,6 +218,7 @@ class TestOptionEffects: config = py.test.config._reparse([tmpdir]) config.initsession() assert not config.option.boxed + py.test.importorskip("execnet") config = py.test.config._reparse(['-d', tmpdir]) config.initsession() assert not config.option.boxed --- a/py/test/dist/mypickle.py +++ b/py/test/dist/mypickle.py @@ -13,7 +13,6 @@ """ import py -from execnet.gateway_base import Channel import sys, os, struct #debug = open("log-mypickle-%d" % os.getpid(), 'w') @@ -139,6 +138,7 @@ class PickleChannel(object): self.RemoteError = channel.RemoteError def send(self, obj): + from execnet.gateway_base import Channel if not isinstance(obj, Channel): pickled_obj = self._ipickle.dumps(obj) self._channel.send(pickled_obj) --- a/doc/index.txt +++ b/doc/index.txt @@ -7,8 +7,6 @@ documentation on the most interesting on `py.test`_ write and deploy unit- and functional tests to multiple machines. -`py.execnet`_ elastic distributed programming. - `py.code`_: generate code and use advanced introspection/traceback support. `py.path`_: use path objects to transparently access local and svn filesystems. @@ -33,7 +31,6 @@ For the latest Release, see `PyPI projec .. _`download and installation`: download.html .. _`py-dev at codespeak net`: http://codespeak.net/mailman/listinfo/py-dev -.. _`py.execnet`: execnet.html .. _`py.log`: log.html .. _`py.io`: io.html .. _`py.path`: path.html --- a/doc/path.txt +++ b/doc/path.txt @@ -246,7 +246,7 @@ on the `svn` command line, not on the bi It makes sense now to directly use the bindings. Moreover, it would be good, also considering -`py.execnet`_ distribution of programs, to +`execnet`_ distribution of programs, to be able to manipulate Windows Paths on Linux and vice versa. So we'd like to consider refactoring the path implementations @@ -269,5 +269,5 @@ the quite full interface without requiri to know about all details of the full path implementation. -.. _`py.execnet`: execnet.html +.. _`execnet`: execnet.html --- a/doc/test/dist.txt +++ b/doc/test/dist.txt @@ -11,10 +11,8 @@ synchronizes your program source code to are reported back and displayed to your local test session. You may specify different Python versions and interpreters. -Synchronisation and running of tests only requires -a bare Python installation on the remote side. No -special software is installed - this is realized -by use of the **zero installation** `py.execnet`_ mechanisms. +**Requirements**: you need to install the `execnet`_ package +to perform distributed test runs. Speed up test runs by sending tests to multiple CPUs ---------------------------------------------------------- @@ -90,13 +88,13 @@ The basic command to run tests on multip If you specify a windows host, an OSX host and a Linux environment this command will send each tests to all platforms - and report back failures from all platforms -at once. The provided specifications strings -use the `xspec syntax`_. +at once. The specifications strings use the `xspec syntax`_. -.. _`xspec syntax`: ../execnet.html#xspec +.. _`xspec syntax`: http://codespeak.net/execnet/trunk/basics.html#xspec .. _`socketserver.py`: http://codespeak.net/svn/py/dist/py/execnet/script/socketserver.py -.. _`py.execnet`: ../execnet.html + +.. _`execnet`: http://codespeak.net/execnet Specifying test exec environments in a conftest.py ------------------------------------------------------------- --- a/testing/pytest/plugin/test_pytest_terminal.py +++ b/testing/pytest/plugin/test_pytest_terminal.py @@ -3,6 +3,10 @@ terminal reporting of the full testing p """ import py import sys +try: + import execnet +except ImportError: + execnet = None # =============================================================================== # plugin tests @@ -42,7 +46,7 @@ def pytest_generate_tests(metafunc): funcargs={'option': Option(verbose=True)} ) nodist = getattr(metafunc.function, 'nodist', False) - if not nodist: + if execnet and not nodist: metafunc.addcall( id="verbose-dist", funcargs={'option': Option(dist='each', verbose=True)} @@ -602,9 +606,10 @@ class TestTerminalFunctional: "*test_verbose_reporting.py:10: test_gen*FAIL*", ]) assert result.ret == 1 - result = testdir.runpytest(p1, '-v', '-n 1') - result.stdout.fnmatch_lines([ - "*FAIL*test_verbose_reporting.py:2: test_fail*", - ]) - assert result.ret == 1 + if execnet: + result = testdir.runpytest(p1, '-v', '-n 1') + result.stdout.fnmatch_lines([ + "*FAIL*test_verbose_reporting.py:2: test_fail*", + ]) + assert result.ret == 1 --- a/doc/xml.txt +++ b/doc/xml.txt @@ -15,7 +15,6 @@ The py lib strives to offer enough funct itself and especially its API in html or xml. .. _xist: http://www.livinglogic.de/Python/xist/index.html -.. _`exchange data`: execnet.html#exchange-data a pythonic object model , please ================================ --- a/testing/pytest/looponfail/test_remote.py +++ b/testing/pytest/looponfail/test_remote.py @@ -1,4 +1,5 @@ import py +py.test.importorskip("execnet") from py.__.test.looponfail.remote import LooponfailingSession, LoopState, RemoteControl class TestRemoteControl: --- a/testing/pytest/test_parseopt.py +++ b/testing/pytest/test_parseopt.py @@ -8,6 +8,12 @@ class TestParser: out, err = capsys.readouterr() assert out.find("xyz") != -1 + def test_epilog(self): + parser = parseopt.Parser() + assert not parser.epilog + parser.epilog += "hello" + assert parser.epilog == "hello" + def test_group_add_and_get(self): parser = parseopt.Parser() group = parser.addgroup("hello", description="desc") @@ -70,6 +76,15 @@ class TestParser: args = parser.parse_setoption([], option) assert option.hello == "x" + def test_parser_epilog(self, testdir): + testdir.makeconftest(""" + def pytest_addoption(parser): + parser.epilog = "hello world" + """) + result = testdir.runpytest('--help') + #assert result.ret != 0 + assert result.stdout.fnmatch_lines(["*hello world*"]) + def test_parse_setoption(self): parser = parseopt.Parser() parser.addoption("--hello", dest="hello", action="store") --- a/py/test/plugin/pytest_default.py +++ b/py/test/plugin/pytest_default.py @@ -3,6 +3,11 @@ import sys import py +try: + import execnet +except ImportError: + execnet = None + def pytest_pyfunc_call(__multicall__, pyfuncitem): if not __multicall__.execute(): testfunction = pyfuncitem.obj @@ -63,14 +68,22 @@ def pytest_addoption(parser): help="traceback verboseness (long/short/no).") group._addoption('-p', action="append", dest="plugins", default = [], help=("load the specified plugin after command line parsing. ")) - group._addoption('-f', '--looponfail', - action="store_true", dest="looponfail", default=False, - help="run tests, re-run failing test set until all pass.") + if execnet: + group._addoption('-f', '--looponfail', + action="store_true", dest="looponfail", default=False, + help="run tests, re-run failing test set until all pass.") group = parser.addgroup("debugconfig", "test process debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", help="base temporary directory for this test run.") + if execnet: + add_dist_options(parser) + else: + parser.epilog = ( + "execnet missing: --looponfailing and distributed testing not available.") + +def add_dist_options(parser): group = parser.addgroup("dist", "distributed testing") # see http://pytest.org/help/dist") group._addoption('--dist', metavar="distmode", action="store", choices=['load', 'each', 'no'], @@ -97,18 +110,19 @@ def pytest_configure(config): setsession(config) def fixoptions(config): - if config.option.numprocesses: - config.option.dist = "load" - config.option.tx = ['popen'] * int(config.option.numprocesses) - if config.option.distload: - config.option.dist = "load" + if execnet: + if config.option.numprocesses: + config.option.dist = "load" + config.option.tx = ['popen'] * int(config.option.numprocesses) + if config.option.distload: + config.option.dist = "load" def setsession(config): val = config.getvalue if val("collectonly"): from py.__.test.session import Session config.setsessionclass(Session) - else: + elif execnet: if val("looponfail"): from py.__.test.looponfail.remote import LooponfailingSession config.setsessionclass(LooponfailingSession) --- a/py/test/plugin/pytest_pdb.py +++ b/py/test/plugin/pytest_pdb.py @@ -4,6 +4,10 @@ interactive debugging with the Python De import py import pdb, sys, linecache from py.__.test.outcome import Skipped +try: + import execnet +except ImportError: + execnet = None def pytest_addoption(parser): group = parser.getgroup("general") @@ -14,10 +18,11 @@ def pytest_addoption(parser): def pytest_configure(config): if config.option.usepdb: - if config.getvalue("looponfail"): - raise config.Error("--pdb incompatible with --looponfail.") - if config.option.dist != "no": - raise config.Error("--pdb incompatible with distributing tests.") + if execnet: + if config.getvalue("looponfail"): + raise config.Error("--pdb incompatible with --looponfail.") + if config.option.dist != "no": + raise config.Error("--pdb incompatible with distributing tests.") config.pluginmanager.register(PdbInvoke()) class PdbInvoke: --- a/testing/pytest/plugin/test_pytest_default.py +++ b/testing/pytest/plugin/test_pytest_default.py @@ -10,6 +10,7 @@ def test_implied_different_sessions(tmpd return Exception return getattr(config._sessionclass, '__name__', None) assert x() == None + py.test.importorskip("execnet") assert x('-d') == 'DSession' assert x('--dist=each') == 'DSession' assert x('-n3') == 'DSession' @@ -31,6 +32,8 @@ def test_plugin_already_exists(testdir): class TestDistOptions: + def setup_method(self, method): + py.test.importorskip("execnet") def test_getxspecs(self, testdir): config = testdir.parseconfigure("--tx=popen", "--tx", "ssh=xyz") xspecs = config.getxspecs() @@ -64,13 +67,13 @@ class TestDistOptions: assert py.path.local('z') in roots assert testdir.tmpdir.join('x') in roots -def test_dist_options(testdir): - config = testdir.parseconfigure("-n 2") - assert config.option.dist == "load" - assert config.option.tx == ['popen'] * 2 - - config = testdir.parseconfigure("-d") - assert config.option.dist == "load" + def test_dist_options(self, testdir): + config = testdir.parseconfigure("-n 2") + assert config.option.dist == "load" + assert config.option.tx == ['popen'] * 2 + + config = testdir.parseconfigure("-d") + assert config.option.dist == "load" def test_pytest_report_iteminfo(): class FakeItem(object): --- a/py/test/plugin/pytest_terminal.py +++ b/py/test/plugin/pytest_terminal.py @@ -165,7 +165,7 @@ class TerminalReporter: self.stats.setdefault('deselected', []).append(items) def pytest_itemstart(self, item, node=None): - if self.config.option.dist != "no": + if getattr(self.config.option, 'dist', 'no') != "no": # for dist-testing situations itemstart means we # queued the item for sending, not interesting (unless debugging) if self.config.option.debug: From commits-noreply at bitbucket.org Mon Oct 5 02:10:17 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 5 Oct 2009 00:10:17 +0000 (UTC) Subject: [py-svn] apipkg commit 78f6f2825345: better examples Message-ID: <20091005001017.BD3AC9A7A1@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1254701406 -7200 # Node ID 78f6f28253457f50ac06a429cde0d88234c0e0a8 # Parent cfef4422c9219b146af23eb8dff4b8b89169a8f9 better examples --- a/readme.txt +++ b/readme.txt @@ -25,10 +25,16 @@ and exports two objects imported from di } The package is initialized with a dictionary as namespace. + +You need to create a ``_mypkg`` package with a ``somemodule.py`` +and ``othermodule.py`` containing the respective classes. +The ``_mypkg`` is not special - it's a completely +regular python package. + Namespace dictionaries contain ``name: value`` mappings where the value may be another namespace dictionary or a string specifying an import location. On accessing -the according attribute an import will be performed:: +an namespace attribute an import will be performed:: >>> import mypkg >>> mypkg.path @@ -39,15 +45,22 @@ the according attribute an import will b The ``mypkg.sub`` namespace and both its classes are -lazy loaded and no imports apart from the root -``import mypkg`` is required. +lazy loaded. Note that **no imports apart from the root +'import mypkg' is required**. This means that whoever +uses your Api only ever needs this one import. Of course +you can still use the import statement like so:: + + from mypkg.sub import Class1 + Including apipkg in your package -------------------------------------- -If you don't want to add a depdency to your package you -can copy the `apipkg.py`_ somewhere to your own package, -e.g. ``_mypkg/apipkg.py`` in the above example. +If you don't want to add an ``apipkg`` dependency to your package you +can copy the `apipkg.py`_ file somewhere to your own package, +for example ``_mypkg/apipkg.py`` in the above example. You +then import the ``initpkg`` function from that new place and +are good to go. .. _`small pure python module`: .. _`apipkg.py`: http://bitbucket.org/hpk42/apipkg/src/tip/apipkg.py From commits-noreply at bitbucket.org Mon Oct 5 02:10:19 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 5 Oct 2009 00:10:19 +0000 (UTC) Subject: [py-svn] apipkg commit cfef4422c921: improve docs Message-ID: <20091005001019.744FE9A7A2@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1254700360 -7200 # Node ID cfef4422c9219b146af23eb8dff4b8b89169a8f9 # Parent f1e60132ead5c39e79d356d82ebf574de83dd04b improve docs --- a/readme.txt +++ b/readme.txt @@ -3,7 +3,7 @@ Welcome to apipkg! With apipkg you can control the exported namespace of a python package and greatly reduce the number of imports for your users. -It is a small pure python module that works on virtually all Python +It is a `small pure python module`_ that works on virtually all Python versions, including CPython2.3 to Python3.1, Jython and PyPy. It co-operates well with Python's ``help()`` system and common command line completion tools. Usage is very simple: you can require 'apipkg' as a dependency @@ -49,6 +49,7 @@ If you don't want to add a depdency to y can copy the `apipkg.py`_ somewhere to your own package, e.g. ``_mypkg/apipkg.py`` in the above example. +.. _`small pure python module`: .. _`apipkg.py`: http://bitbucket.org/hpk42/apipkg/src/tip/apipkg.py Feedback? From commits-noreply at bitbucket.org Mon Oct 5 02:22:59 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 5 Oct 2009 00:22:59 +0000 (UTC) Subject: [py-svn] py-trunk commit c31c2af5ace9: forgot to commit the verbatim copy of apipkg in _py/apipkg.py Message-ID: <20091005002259.070BE9A7A1@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1254702168 -7200 # Node ID c31c2af5ace95a067fdfd2a82129cc8f3f0d29dd # Parent c09184574eca41143f883ea1f091cb3d4b54bb7b forgot to commit the verbatim copy of apipkg in _py/apipkg.py --- /dev/null +++ b/_py/apipkg.py @@ -0,0 +1,71 @@ +""" +apipkg: control the exported namespace of a python package. + +see http://pypi.python.org/pypi/apipkg + +(c) holger krekel, 2009 - MIT license +""" +import os, sys +from types import ModuleType + +__version__ = "1.0b1" + +def initpkg(pkgname, exportdefs): + """ initialize given package from the export definitions. """ + pkgmodule = sys.modules[pkgname] + mod = ApiModule(pkgname, exportdefs) + for name, value in mod.__dict__.items(): + if name[:2] != "__" or name == "__all__": + setattr(pkgmodule, name, value) + +def importobj(importspec): + """ return object specified by importspec.""" + modpath, attrname = importspec.split(":") + module = __import__(modpath, None, None, ['__doc__']) + return getattr(module, attrname) + +class ApiModule(ModuleType): + def __init__(self, name, importspec, parent=None): + self.__name__ = name + self.__all__ = list(importspec) + self.__map__ = {} + if parent: + fullname = parent.__fullname__ + "." + name + setattr(parent, name, self) + else: + fullname = name + self.__fullname__ = fullname + for name, importspec in importspec.items(): + if isinstance(importspec, dict): + apimod = ApiModule(name, importspec, parent=self) + sys.modules[apimod.__fullname__] = apimod + else: + if not importspec.count(":") == 1: + raise ValueError("invalid importspec %r" % (importspec,)) + if name == '__doc__': + self.__doc__ = importobj(importspec) + else: + self.__map__[name] = importspec + + def __repr__(self): + return '' % (self.__fullname__,) + + def __getattr__(self, name): + try: + importspec = self.__map__.pop(name) + except KeyError: + raise AttributeError(name) + else: + result = importobj(importspec) + setattr(self, name, result) + return result + + def __dict__(self): + # force all the content of the module to be loaded when __dict__ is read + dictdescr = ModuleType.__dict__['__dict__'] + dict = dictdescr.__get__(self) + if dict is not None: + for name in self.__all__: + hasattr(self, name) # force attribute load, ignore errors + return dict + __dict__ = property(__dict__) From commits-noreply at bitbucket.org Fri Oct 9 15:27:08 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 9 Oct 2009 13:27:08 +0000 (UTC) Subject: [py-svn] py-trunk commit 6717b729d59a: ignore more dirs and files Message-ID: <20091009132708.E77B58386F@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255001911 -7200 # Node ID 6717b729d59a8dabd18a0d922bc2102a2565c1aa # Parent c31c2af5ace95a067fdfd2a82129cc8f3f0d29dd ignore more dirs and files --- a/.hgignore +++ b/.hgignore @@ -12,6 +12,8 @@ syntax:glob *.swp *.html *.class +*.orig build/ +dist/ py.egg-info From commits-noreply at bitbucket.org Fri Oct 9 15:27:10 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 9 Oct 2009 13:27:10 +0000 (UTC) Subject: [py-svn] py-trunk commit bb70d1a4e812: resolves #59 - robustify unittest collection Message-ID: <20091009132710.A797883870@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255093766 -7200 # Node ID bb70d1a4e812d729bf65bf19e56c575fe5d24b20 # Parent 6717b729d59a8dabd18a0d922bc2102a2565c1aa resolves #59 - robustify unittest collection --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* fix issue #59 - robustify unittest test collection + * make bpython/help interaction work by adding an __all__ attribute to ApiModule, cleanup initpkg --- a/_py/test/plugin/pytest_unittest.py +++ b/_py/test/plugin/pytest_unittest.py @@ -19,8 +19,13 @@ import sys def pytest_pycollect_makeitem(collector, name, obj): if 'unittest' not in sys.modules: return # nobody could have possibly derived a subclass - if py.std.inspect.isclass(obj) and issubclass(obj, py.std.unittest.TestCase): - return UnitTestCase(name, parent=collector) + try: + isunit = issubclass(obj, py.std.unittest.TestCase) + except TypeError: + pass + else: + if isunit: + return UnitTestCase(name, parent=collector) class UnitTestCase(py.test.collect.Class): def collect(self): --- a/testing/pytest/plugin/test_pytest_unittest.py +++ b/testing/pytest/plugin/test_pytest_unittest.py @@ -1,6 +1,5 @@ import py - def test_simple_unittest(testdir): testpath = testdir.makepyfile(""" import unittest @@ -15,6 +14,17 @@ def test_simple_unittest(testdir): assert reprec.matchreport("testpassing").passed assert reprec.matchreport("test_failing").failed +def test_isclasscheck_issue53(testdir): + testpath = testdir.makepyfile(""" + import unittest + class _E(object): + def __getattr__(self, tag): + pass + E = _E() + """) + result = testdir.runpytest(testpath) + assert result.ret == 0 + def test_setup(testdir): testpath = testdir.makepyfile(test_two=""" import unittest From commits-noreply at bitbucket.org Fri Oct 9 15:27:12 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 9 Oct 2009 13:27:12 +0000 (UTC) Subject: [py-svn] py-trunk commit 60b5a4f2ca9a: fix some tests after the py/_py split Message-ID: <20091009132712.650A083871@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255094806 -7200 # Node ID 60b5a4f2ca9ad6894347afd4c449a1d16f66f523 # Parent bb70d1a4e812d729bf65bf19e56c575fe5d24b20 fix some tests after the py/_py split --- a/testing/pytest/plugin/test_pytest_default.py +++ b/testing/pytest/plugin/test_pytest_default.py @@ -50,7 +50,7 @@ class TestDistOptions: def test_getrsyncdirs(self, testdir): config = testdir.parseconfigure('--rsyncdir=' + str(testdir.tmpdir)) roots = config.getrsyncdirs() - assert len(roots) == 1 + 1 + assert len(roots) == 1 + 2 assert testdir.tmpdir in roots def test_getrsyncdirs_with_conftest(self, testdir): @@ -62,7 +62,7 @@ class TestDistOptions: """) config = testdir.parseconfigure(testdir.tmpdir, '--rsyncdir=y', '--rsyncdir=z') roots = config.getrsyncdirs() - assert len(roots) == 3 + 1 + assert len(roots) == 3 + 2 assert py.path.local('y') in roots assert py.path.local('z') in roots assert testdir.tmpdir.join('x') in roots From commits-noreply at bitbucket.org Mon Oct 12 16:08:19 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 12 Oct 2009 14:08:19 +0000 (UTC) Subject: [py-svn] py-trunk commit 16b15d5f3c9e: adding the console-runtest helper as discussed on py-dev Message-ID: <20091012140819.550BF7EE86@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255339727 -7200 # Node ID 16b15d5f3c9ef9ea02c37d86f1f961d49d6e762f # Parent eefec6c36925a832818b091e742705939557d505 adding the console-runtest helper as discussed on py-dev --- /dev/null +++ b/contrib/runtesthelper.py @@ -0,0 +1,19 @@ +""" +this little helper allows to run tests multiple times +in the same process. useful for running tests from +a console. +""" +import py, sys + +def pytest(argv=None): + if argv is None: + argv = [] + try: + sys.argv[1:] = argv + py.cmdline.pytest() + except SystemExit: + pass + # we need to reset the global py.test.config object + py._com.comregistry = py._com.comregistry.__class__([]) + py.test.config = py.test.config.__class__( + pluginmanager=py.test._PluginManager(py._com.comregistry)) From commits-noreply at bitbucket.org Mon Oct 12 16:08:21 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 12 Oct 2009 14:08:21 +0000 (UTC) Subject: [py-svn] py-trunk commit eefec6c36925: introduce "-d" to py.cleanup Message-ID: <20091012140821.1D3837EEEF@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255339481 -7200 # Node ID eefec6c36925a832818b091e742705939557d505 # Parent 60b5a4f2ca9ad6894347afd4c449a1d16f66f523 introduce "-d" to py.cleanup --- a/_py/cmdline/pycleanup.py +++ b/_py/cmdline/pycleanup.py @@ -18,6 +18,8 @@ def main(): action="store_true", help="display would-be-removed filenames" ) + parser.add_option("-d", action="store_true", dest="removedir", + help="remove empty directories") (options, args) = parser.parse_args() if not args: args = ["."] @@ -29,8 +31,16 @@ def main(): path = py.path.local(arg) py.builtin.print_("cleaning path", path, "of extensions", ext) for x in path.visit(shouldremove, lambda x: x.check(dotfile=0, link=0)): - if options.dryrun: - py.builtin.print_("would remove", x) - else: - py.builtin.print_("removing", x) - x.remove() + remove(x, options) + if options.removedir: + for x in path.visit(lambda x: x.check(dir=1), + lambda x: x.check(dotfile=0, link=0)): + remove(x, options) + +def remove(path, options): + if options.dryrun: + py.builtin.print_("would remove", path) + else: + py.builtin.print_("removing", path) + path.remove() + --- a/testing/cmdline/test_cmdline.py +++ b/testing/cmdline/test_cmdline.py @@ -26,3 +26,24 @@ class TestPyLookup: result.stdout.fnmatch_lines( ["%s:1: stuff = x" % (searched.basename,)] ) + +class TestPyCleanup: + def test_basic(self, testdir, tmpdir): + p = tmpdir.ensure("hello.py") + result = testdir.runpybin("py.cleanup", tmpdir) + assert result.ret == 0 + assert p.check() + pyc = p.new(ext='pyc') + pyc.ensure() + result = testdir.runpybin("py.cleanup", tmpdir) + assert not pyc.check() + + def test_dir_remove(self, testdir, tmpdir): + p = tmpdir.mkdir("a") + result = testdir.runpybin("py.cleanup", tmpdir) + assert result.ret == 0 + assert p.check() + result = testdir.runpybin("py.cleanup", tmpdir, '-d') + assert result.ret == 0 + assert not p.check() + --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* introduce and test "py.cleanup -d" to remove empty directories + * fix issue #59 - robustify unittest test collection * make bpython/help interaction work by adding an __all__ attribute From commits-noreply at bitbucket.org Thu Oct 15 16:24:36 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 15 Oct 2009 14:24:36 +0000 (UTC) Subject: [py-svn] py-trunk commit 6d27121e78c2: resolve issue 54 Message-ID: <20091015142436.92A0E7EF12@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255557241 -7200 # Node ID 6d27121e78c29ae55573e4c8a152f4489a02dae3 # Parent 16b15d5f3c9ef9ea02c37d86f1f961d49d6e762f resolve issue 54 triggered by @haypo's issue and patch the process.cmdexec function now always uses subprocess under the hood. Also fixed some 3k related encoding issues. --- a/testing/process/test_cmdexec.py +++ b/testing/process/test_cmdexec.py @@ -18,6 +18,8 @@ class Test_exec_cmd: except cmdexec.Error: e = exvalue() assert e.status == 1 + assert py.builtin._istext(e.out) + assert py.builtin._istext(e.err) def test_err(self): try: @@ -28,16 +30,3 @@ class Test_exec_cmd: assert hasattr(e, 'err') assert hasattr(e, 'out') assert e.err or e.out - -def test_cmdexec_selection(): - from _py.process import cmdexec - if py.std.sys.platform == "win32": - assert py.process.cmdexec == cmdexec.win32_exec_cmd - elif hasattr(py.std.sys, 'pypy') or hasattr(py.std.sys, 'pypy_objspaceclass'): - assert py.process.cmdexec == cmdexec.popen3_exec_cmd - else: - assert py.process.cmdexec == cmdexec.posix_exec_cmd - - - - --- a/_py/process/cmdexec.py +++ b/_py/process/cmdexec.py @@ -1,151 +1,28 @@ """ -module defining basic hook for executing commands -in a - as much as possible - platform independent way. - -Current list: - - exec_cmd(cmd) executes the given command and returns output - or ExecutionFailed exception (if exit status!=0) - """ import os, sys +import subprocess import py from subprocess import Popen, PIPE -#----------------------------------------------------------- -# posix external command execution -#----------------------------------------------------------- -def posix_exec_cmd(cmd): - """ return output of executing 'cmd'. +def cmdexec(cmd): + """ return output of executing 'cmd' in a separate process. - raise ExecutionFailed exeception if the command failed. + raise cmdexec.ExecutionFailed exeception if the command failed. the exception will provide an 'err' attribute containing the error-output from the command. """ - #__tracebackhide__ = True - - import errno - - child = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True) - stdin, stdout, stderr = child.stdin, child.stdout, child.stderr - - # XXX sometimes we get a blocked r.read() call (see below) - # although select told us there is something to read. - # only the next three lines appear to prevent - # the read call from blocking infinitely. - import fcntl - def set_non_block(fd): - flags = fcntl.fcntl(fd, fcntl.F_GETFL) - flags = flags | os.O_NONBLOCK - fcntl.fcntl(fd, fcntl.F_SETFL, flags) - set_non_block(stdout.fileno()) - set_non_block(stderr.fileno()) - #fcntl.fcntl(stdout, fcntl.F_SETFL, os.O_NONBLOCK) - #fcntl.fcntl(stderr, fcntl.F_SETFL, os.O_NONBLOCK) - - import select - out, err = [], [] - while 1: - r_list = [x for x in [stdout, stderr] if x and not x.closed] - if not r_list: - break - try: - r_list = select.select(r_list, [], [])[0] - except (select.error, IOError): - se = sys.exc_info()[1] - if se.args[0] == errno.EINTR: - continue - else: - raise - for r in r_list: - try: - data = r.read() # XXX see XXX above - except IOError: - io = sys.exc_info()[1] - if io.args[0] == errno.EAGAIN: - continue - # Connection Lost - raise - except OSError: - ose = sys.exc_info()[1] - if ose.errno == errno.EPIPE: - # Connection Lost - raise - if ose.errno == errno.EAGAIN: # MacOS-X does this - continue - raise - - if not data: - r.close() - continue - if r is stdout: - out.append(data) - else: - err.append(data) - pid, systemstatus = os.waitpid(child.pid, 0) - if pid != child.pid: - raise ExecutionFailed("child process disappeared during: "+ cmd) - if systemstatus: - if os.WIFSIGNALED(systemstatus): - status = os.WTERMSIG(systemstatus) + 128 - else: - status = os.WEXITSTATUS(systemstatus) - raise ExecutionFailed(status, systemstatus, cmd, - joiner(out), joiner(err)) - return joiner(out) - -def joiner(out): - encoding = sys.getdefaultencoding() - return "".join([py.builtin._totext(x, encoding) for x in out]) - -#----------------------------------------------------------- -# simple win32 external command execution -#----------------------------------------------------------- -def win32_exec_cmd(cmd): - """ return output of executing 'cmd'. - - raise ExecutionFailed exeception if the command failed. - the exception will provide an 'err' attribute containing - the error-output from the command. - - Note that this method can currently deadlock because - we don't have WaitForMultipleObjects in the std-python api. - - Further note that the rules for quoting are very special - under Windows. Do a HELP CMD in a shell, and tell me if - you understand this. For now, I try to do a fix. - """ - #print "*****", cmd - - # the following quoting is only valid for CMD.EXE, not COMMAND.COM - cmd_quoting = True - try: - if os.environ['COMSPEC'].upper().endswith('COMMAND.COM'): - cmd_quoting = False - except KeyError: - pass - if cmd_quoting: - if '"' in cmd and not cmd.startswith('""'): - cmd = '"%s"' % cmd - - return popen3_exec_cmd(cmd) - -def popen3_exec_cmd(cmd): - stdin, stdout, stderr = os.popen3(cmd) - out = stdout.read() - err = stderr.read() - stdout.close() - stderr.close() - status = stdin.close() + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = process.communicate() + out = py.builtin._totext(out, sys.getdefaultencoding()) + err = py.builtin._totext(err, sys.getdefaultencoding()) + status = process.poll() if status: raise ExecutionFailed(status, status, cmd, out, err) return out -def pypy_exec_cmd(cmd): - return popen3_exec_cmd(cmd) - class ExecutionFailed(py.error.Error): def __init__(self, status, systemstatus, cmd, out, err): Exception.__init__(self) @@ -157,16 +34,6 @@ class ExecutionFailed(py.error.Error): def __str__(self): return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err) -# -# choose correct platform-version -# - -if sys.platform == 'win32': - cmdexec = win32_exec_cmd -elif hasattr(sys, 'pypy') or hasattr(sys, 'pypy_objspaceclass'): - cmdexec = popen3_exec_cmd -else: - cmdexec = posix_exec_cmd # export the exception under the name 'py.process.cmdexec.Error' cmdexec.Error = ExecutionFailed From commits-noreply at bitbucket.org Thu Oct 15 16:24:38 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 15 Oct 2009 14:24:38 +0000 (UTC) Subject: [py-svn] py-trunk commit 94601d28b6dd: generalize skipping Message-ID: <20091015142438.7FE847EF15@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255616337 -7200 # Node ID 94601d28b6dd85f3afe5bde05d7850c6ae1ce34a # Parent 6d27121e78c29ae55573e4c8a152f4489a02dae3 generalize skipping - rename pytest_xfail to pytest_skip - dynamic "skipif" and "xfail" decorators - move most skipping code to the plugin also coming with this commit: - extend mark keyword to accept positional args + docs - fix a few documentation related issues - leave version as "trunk" for now --- a/testing/pytest/plugin/test_pytest_runner.py +++ b/testing/pytest/plugin/test_pytest_runner.py @@ -27,6 +27,12 @@ class TestSetupState: ss.teardown_all() assert not l + def test_teardown_exact_stack_empty(self, testdir): + item = testdir.getitem("def test_func(): pass") + ss = runner.SetupState() + ss.teardown_exact(item) + ss.teardown_exact(item) + ss.teardown_exact(item) class BaseFunctionalTests: def test_passfunction(self, testdir): --- a/testing/pytest/test_outcome.py +++ b/testing/pytest/test_outcome.py @@ -15,26 +15,6 @@ class TestRaises: def test_raises_function(self): py.test.raises(ValueError, int, 'hello') -def test_importorskip(): - from _py.test.outcome import Skipped - try: - sys = py.test.importorskip("sys") - assert sys == py.std.sys - #path = py.test.importorskip("os.path") - #assert path == py.std.os.path - py.test.raises(Skipped, "py.test.importorskip('alskdj')") - py.test.raises(SyntaxError, "py.test.importorskip('x y z')") - py.test.raises(SyntaxError, "py.test.importorskip('x=y')") - path = py.test.importorskip("py", minversion=".".join(py.__version__)) - mod = py.std.types.ModuleType("hello123") - mod.__version__ = "1.3" - py.test.raises(Skipped, """ - py.test.importorskip("hello123", minversion="5.0") - """) - except Skipped: - print(py.code.ExceptionInfo()) - py.test.fail("spurious skip") - def test_pytest_exit(): try: py.test.exit("hello") --- a/doc/test/funcargs.txt +++ b/doc/test/funcargs.txt @@ -276,6 +276,7 @@ methods in a convenient way. .. _`conftest plugin`: customize.html#conftestplugin .. _`funcarg factory`: +.. _factory: funcarg factories: setting up test function arguments ============================================================== --- a/testing/pytest/conftest.py +++ b/testing/pytest/conftest.py @@ -1,3 +1,3 @@ -pytest_plugins = "pytest_xfail", "pytest_pytester", "pytest_tmpdir" +pytest_plugins = "skipping", "pytester", "tmpdir" --- a/testing/pytest/plugin/test_pytest_xfail.py +++ /dev/null @@ -1,21 +0,0 @@ - -def test_xfail(testdir): - p = testdir.makepyfile(test_one=""" - import py - @py.test.mark.xfail - def test_this(): - assert 0 - - @py.test.mark.xfail - def test_that(): - assert 1 - """) - result = testdir.runpytest(p) - extra = result.stdout.fnmatch_lines([ - "*expected failures*", - "*test_one.test_this*test_one.py:4*", - "*UNEXPECTEDLY PASSING*", - "*test_that*", - ]) - assert result.ret == 1 - --- a/doc/test/features.txt +++ b/doc/test/features.txt @@ -125,22 +125,11 @@ a PDB `Python debugger`_ when a test fai advanced skipping of tests ------------------------------- -If you want to skip tests you can use ``py.test.skip`` within -test or setup functions. Example:: +py.test has builtin support for skipping tests or expecting +failures on tests on certain platforms. Apart from the +minimal py.test style also unittest- and nose-style tests +can make use of this feature. - def test_hello(): - if sys.platform != "win32": - py.test.skip("only win32 supported") - -You can also use a helper to skip on a failing import:: - - docutils = py.test.importorskip("docutils") - -or to skip if a library does not have the right version:: - - docutils = py.test.importorskip("docutils", minversion="0.3") - -The version will be read from the specified module's ``__version__`` attribute. .. _`funcargs mechanism`: funcargs.html .. _`unittest.py`: http://docs.python.org/library/unittest.html --- a/testing/pytest/test_parseopt.py +++ b/testing/pytest/test_parseopt.py @@ -10,7 +10,7 @@ class TestParser: def test_epilog(self): parser = parseopt.Parser() - assert not parser.epilog + assert not parser.epilog parser.epilog += "hello" assert parser.epilog == "hello" @@ -76,15 +76,6 @@ class TestParser: args = parser.parse_setoption([], option) assert option.hello == "x" - def test_parser_epilog(self, testdir): - testdir.makeconftest(""" - def pytest_addoption(parser): - parser.epilog = "hello world" - """) - result = testdir.runpytest('--help') - #assert result.ret != 0 - assert result.stdout.fnmatch_lines(["*hello world*"]) - def test_parse_setoption(self): parser = parseopt.Parser() parser.addoption("--hello", dest="hello", action="store") @@ -109,3 +100,14 @@ class TestParser: option, args = parser.parse([]) assert option.hello == "world" assert option.this == 42 + + at py.test.mark.skipif("sys.version_info < (2,5)") +def test_addoption_parser_epilog(testdir): + testdir.makeconftest(""" + def pytest_addoption(parser): + parser.epilog = "hello world" + """) + result = testdir.runpytest('--help') + #assert result.ret != 0 + assert result.stdout.fnmatch_lines(["*hello world*"]) + --- a/doc/test/plugin/xfail.txt +++ /dev/null @@ -1,34 +0,0 @@ - -pytest_xfail plugin -=================== - -mark python test functions as expected-to-fail and report them separately. - -.. contents:: - :local: - -usage ------------- - -Use the generic mark decorator to mark your test functions as -'expected to fail':: - - @py.test.mark.xfail - def test_hello(): - ... - -This test will be executed but no traceback will be reported -when it fails. Instead terminal reporting will list it in the -"expected to fail" section or "unexpectedly passing" section. - -Start improving this plugin in 30 seconds -========================================= - - -1. Download `pytest_xfail.py`_ plugin source code -2. put it somewhere as ``pytest_xfail.py`` into your import path -3. a subsequent ``py.test`` run will use your local version - -Checkout customize_, other plugins_ or `get in contact`_. - -.. include:: links.txt --- a/_py/test/outcome.py +++ b/_py/test/outcome.py @@ -56,25 +56,6 @@ def skip(msg=""): __tracebackhide__ = True raise Skipped(msg=msg) -def importorskip(modname, minversion=None): - """ return imported module or skip() """ - compile(modname, '', 'eval') # to catch syntaxerrors - try: - mod = __import__(modname) - except ImportError: - py.test.skip("could not import %r" %(modname,)) - if minversion is None: - return mod - verattr = getattr(mod, '__version__', None) - if isinstance(minversion, str): - minver = minversion.split(".") - else: - minver = list(minversion) - if verattr is None or verattr.split(".") < minver: - py.test.skip("module %r has __version__ %r, required is: %r" %( - modname, verattr, minversion)) - return mod - def fail(msg="unknown failure"): """ fail with the given Message. """ __tracebackhide__ = True --- a/_py/test/plugin/pytest_runner.py +++ b/_py/test/plugin/pytest_runner.py @@ -276,7 +276,7 @@ class SetupState(object): assert not self._finalizers def teardown_exact(self, item): - if item == self.stack[-1]: + if self.stack and item == self.stack[-1]: self._pop_and_teardown() else: self._callfinalizers(item) --- /dev/null +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -0,0 +1,109 @@ +import py + +def test_xfail_decorator(testdir): + p = testdir.makepyfile(test_one=""" + import py + @py.test.mark.xfail + def test_this(): + assert 0 + + @py.test.mark.xfail + def test_that(): + assert 1 + """) + result = testdir.runpytest(p) + extra = result.stdout.fnmatch_lines([ + "*expected failures*", + "*test_one.test_this*test_one.py:4*", + "*UNEXPECTEDLY PASSING*", + "*test_that*", + "*1 xfailed*" + ]) + assert result.ret == 1 + +def test_skipif_decorator(testdir): + p = testdir.makepyfile(""" + import py + @py.test.mark.skipif("hasattr(sys, 'platform')") + def test_that(): + assert 0 + """) + result = testdir.runpytest(p) + extra = result.stdout.fnmatch_lines([ + "*Skipped*platform*", + "*1 skipped*" + ]) + assert result.ret == 0 + +def test_skipif_class(testdir): + p = testdir.makepyfile(""" + import py + class TestClass: + skipif = "True" + def test_that(self): + assert 0 + def test_though(self): + assert 0 + """) + result = testdir.runpytest(p) + extra = result.stdout.fnmatch_lines([ + "*2 skipped*" + ]) + +def test_getexpression(testdir): + from _py.test.plugin.pytest_skipping import getexpression + l = testdir.getitems(""" + import py + mod = 5 + class TestClass: + cls = 4 + @py.test.mark.func(3) + def test_func(self): + pass + @py.test.mark.just + def test_other(self): + pass + """) + item, item2 = l + assert getexpression(item, 'xyz') is None + assert getexpression(item, 'func') == 3 + assert getexpression(item, 'cls') == 4 + assert getexpression(item, 'mod') == 5 + + assert getexpression(item2, 'just') + +def test_evalexpression_cls_config_example(testdir): + from _py.test.plugin.pytest_skipping import evalexpression + item, = testdir.getitems(""" + class TestClass: + skipif = "config._hackxyz" + def test_func(self): + pass + """) + item.config._hackxyz = 3 + x, y = evalexpression(item, 'skipif') + assert x == 'config._hackxyz' + assert y == 3 + +def test_importorskip(): + from _py.test.outcome import Skipped + from _py.test.plugin.pytest_skipping import importorskip + assert importorskip == py.test.importorskip + try: + sys = importorskip("sys") + assert sys == py.std.sys + #path = py.test.importorskip("os.path") + #assert path == py.std.os.path + py.test.raises(Skipped, "py.test.importorskip('alskdj')") + py.test.raises(SyntaxError, "py.test.importorskip('x y z')") + py.test.raises(SyntaxError, "py.test.importorskip('x=y')") + path = importorskip("py", minversion=".".join(py.__version__)) + mod = py.std.types.ModuleType("hello123") + mod.__version__ = "1.3" + py.test.raises(Skipped, """ + py.test.importorskip("hello123", minversion="5.0") + """) + except Skipped: + print(py.code.ExceptionInfo()) + py.test.fail("spurious skip") + --- a/doc/test/plugin/keyword.txt +++ b/doc/test/plugin/keyword.txt @@ -14,22 +14,29 @@ By default, all filename parts and class function are put into the set of keywords for a given test. You can specify additional kewords like this:: - @py.test.mark.webtest + @py.test.mark.webtest def test_send_http(): ... -This will set an attribute 'webtest' on the given test function -and by default all such attributes signal keywords. You can -also set values in this attribute which you could read from -a hook in order to do something special with respect to -the test function:: +This will set an attribute 'webtest' to True on the given test function. +You can read the value 'webtest' from the functions __dict__ later. - @py.test.mark.timeout(seconds=5) +You can also set values for an attribute which are put on an empty +dummy object:: + + @py.test.mark.webtest(firefox=30) def test_receive(): ... -This will set the "timeout" attribute with a Marker object -that has a 'seconds' attribute. +after which ``test_receive.webtest.firefox == 30`` holds true. + +In addition to keyword arguments you can also use positional arguments:: + + @py.test.mark.webtest("triangular") + def test_receive(): + ... + +after which ``test_receive.webtest._1 == 'triangular`` hold true. Start improving this plugin in 30 seconds ========================================= --- a/_py/test/defaultconftest.py +++ b/_py/test/defaultconftest.py @@ -10,5 +10,5 @@ Generator = py.test.collect.Generator Function = py.test.collect.Function Instance = py.test.collect.Instance -pytest_plugins = "default runner capture terminal keyword xfail tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split() +pytest_plugins = "default runner capture terminal keyword skipping tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split() --- a/_py/test/plugin/pytest_keyword.py +++ b/_py/test/plugin/pytest_keyword.py @@ -8,22 +8,29 @@ By default, all filename parts and class function are put into the set of keywords for a given test. You can specify additional kewords like this:: - @py.test.mark.webtest + @py.test.mark.webtest def test_send_http(): ... -This will set an attribute 'webtest' on the given test function -and by default all such attributes signal keywords. You can -also set values in this attribute which you could read from -a hook in order to do something special with respect to -the test function:: +This will set an attribute 'webtest' to True on the given test function. +You can read the value 'webtest' from the functions __dict__ later. - @py.test.mark.timeout(seconds=5) +You can also set values for an attribute which are put on an empty +dummy object:: + + @py.test.mark.webtest(firefox=30) def test_receive(): ... -This will set the "timeout" attribute with a Marker object -that has a 'seconds' attribute. +after which ``test_receive.webtest.firefox == 30`` holds true. + +In addition to keyword arguments you can also use positional arguments:: + + @py.test.mark.webtest("triangular") + def test_receive(): + ... + +after which ``test_receive.webtest._1 == 'triangular`` hold true. """ import py @@ -49,20 +56,20 @@ class MarkerDecorator: return "" %(name, d) def __call__(self, *args, **kwargs): - if not args: - if hasattr(self, 'kwargs'): - raise TypeError("double mark-keywords?") - self.kwargs = kwargs.copy() - return self - else: - if not len(args) == 1 or not hasattr(args[0], '__dict__'): - raise TypeError("need exactly one function to decorate, " - "got %r" %(args,)) - func = args[0] - mh = MarkHolder(getattr(self, 'kwargs', {})) - setattr(func, self.markname, mh) - return func - + if args: + if hasattr(args[0], '__call__'): + func = args[0] + mh = MarkHolder(getattr(self, 'kwargs', {})) + setattr(func, self.markname, mh) + return func + # not a function so we memorize all args/kwargs settings + for i, arg in enumerate(args): + kwargs["_" + str(i)] = arg + if hasattr(self, 'kwargs'): + raise TypeError("double mark-keywords?") + self.kwargs = kwargs.copy() + return self + class MarkHolder: def __init__(self, kwargs): self.__dict__.update(kwargs) --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -5,22 +5,23 @@ .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_monkeypatch.py .. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_keyword.py .. _`pastebin`: pastebin.html +.. _`skipping`: skipping.html .. _`plugins`: index.html -.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_capture.py .. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_doctest.py .. _`capture`: capture.html .. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_nose.py .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_restdoc.py -.. _`xfail`: xfail.html +.. _`restdoc`: restdoc.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_pastebin.py .. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_figleaf.py .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_hooklog.py +.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../download.html#checkout .. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html .. _`get in contact`: ../../contact.html -.. _`pytest_xfail.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_xfail.py +.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_capture.py .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html @@ -30,7 +31,6 @@ .. _`monkeypatch`: monkeypatch.html .. _`resultlog`: resultlog.html .. _`keyword`: keyword.html -.. _`restdoc`: restdoc.html .. _`django`: django.html .. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_unittest.py .. _`nose`: nose.html --- a/testing/path/test_local.py +++ b/testing/path/test_local.py @@ -208,7 +208,7 @@ class TestLocalPath(common.CommonFSTests assert l[2] == p3 class TestExecutionOnWindows: - disabled = py.std.sys.platform != 'win32' + skipif = "sys.platform != 'win32'" def test_sysfind(self): x = py.path.local.sysfind('cmd') @@ -216,7 +216,7 @@ class TestExecutionOnWindows: assert py.path.local.sysfind('jaksdkasldqwe') is None class TestExecution: - disabled = py.std.sys.platform == 'win32' + skipif = "sys.platform == 'win32'" def test_sysfind(self): x = py.path.local.sysfind('test') @@ -346,8 +346,7 @@ def test_homedir(): assert homedir.check(dir=1) class TestWINLocalPath: - #root = local(TestLocalPath.root) - disabled = py.std.sys.platform != 'win32' + skipif = "sys.platform != 'win32'" def test_owner_group_not_implemented(self): py.test.raises(NotImplementedError, "path1.stat().owner") @@ -396,7 +395,7 @@ class TestWINLocalPath: old.chdir() class TestPOSIXLocalPath: - disabled = py.std.sys.platform == 'win32' + skipif = "sys.platform == 'win32'" def test_samefile(self, tmpdir): assert tmpdir.samefile(tmpdir) --- a/_py/test/plugin/pytest_xfail.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -mark python test functions as expected-to-fail and report them separately. - -usage ------------- - -Use the generic mark decorator to mark your test functions as -'expected to fail':: - - @py.test.mark.xfail - def test_hello(): - ... - -This test will be executed but no traceback will be reported -when it fails. Instead terminal reporting will list it in the -"expected to fail" section or "unexpectedly passing" section. - -""" - -import py - -def pytest_runtest_makereport(__multicall__, item, call): - if call.when != "call": - return - if hasattr(item, 'obj') and py.builtin._getfuncdict(item.obj): - if 'xfail' in py.builtin._getfuncdict(item.obj): - res = __multicall__.execute() - if call.excinfo: - res.skipped = True - res.failed = res.passed = False - else: - res.skipped = res.passed = False - res.failed = True - return res - -def pytest_report_teststatus(report): - if 'xfail' in report.keywords: - if report.skipped: - return "xfailed", "x", "xfail" - elif report.failed: - return "xpassed", "P", "xpass" - -# called by the terminalreporter instance/plugin -def pytest_terminal_summary(terminalreporter): - tr = terminalreporter - xfailed = tr.stats.get("xfailed") - if xfailed: - tr.write_sep("_", "expected failures") - for rep in xfailed: - entry = rep.longrepr.reprcrash - modpath = rep.item.getmodpath(includemodule=True) - pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno) - reason = rep.longrepr.reprcrash.message - i = reason.find("\n") - if i != -1: - reason = reason[:i] - tr._tw.line("%s %s" %(pos, reason)) - - xpassed = terminalreporter.stats.get("xpassed") - if xpassed: - tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS") - for rep in xpassed: - fspath, lineno, modpath = rep.item.reportinfo() - pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno) - tr._tw.line(pos) --- /dev/null +++ b/doc/test/plugin/skipping.txt @@ -0,0 +1,115 @@ + +pytest_skipping plugin +====================== + +mark python test functions, classes or modules for conditional + +.. contents:: + :local: + +skipping (skipif) or as expected-to-fail (xfail). Both declarations +lead to special reporting and both can be systematically associated +with functions, whole classes or modules. The difference between +the two is that 'xfail' will still execute test functions +but it will revert the outcome. A passing test is now +a failure and failing test is expected. All skip conditions +are reported at the end of test run through the terminal +reporter. + +.. _skipif: + +skip a test function conditionally +------------------------------------------- + +Here is an example for skipping a test function on Python3:: + + @py.test.mark.skipif("sys.version_info >= (3,0)") + def test_function(): + ... + +Conditions are specified as python expressions +and can access the ``sys`` module. They can also +access the config object and thus depend on command +line or conftest options:: + + @py.test.mark.skipif("config.getvalue('db') is None") + def test_function(...): + ... + +conditionally mark a function as "expected to fail" +------------------------------------------------------- + +You can use the ``xfail`` keyword to mark your test functions as +'expected to fail':: + + @py.test.mark.xfail + def test_hello(): + ... + +This test will be executed but no traceback will be reported +when it fails. Instead terminal reporting will list it in the +"expected to fail" or "unexpectedly passing" sections. +As with skipif_ you may selectively expect a failure +depending on platform:: + + @py.test.mark.xfail("sys.version_info >= (3,0)") + def test_function(): + ... + +skip/xfail a whole test class or module +------------------------------------------- + +Instead of marking single functions you can skip +a whole class of tests when runnign on a specific +platform:: + + class TestSomething: + skipif = "sys.platform == 'win32'" + +Or you can mark all test functions as expected +to fail for a specific test configuration:: + + xfail = "config.getvalue('db') == 'mysql'" + + +skip if a dependency cannot be imported +--------------------------------------------- + +You can use a helper to skip on a failing import:: + + docutils = py.test.importorskip("docutils") + +You can use this helper at module level or within +a test or setup function. + +You can aslo skip if a library does not have the right version:: + + docutils = py.test.importorskip("docutils", minversion="0.3") + +The version will be read from the specified module's ``__version__`` attribute. + + +dynamically skip from within a test or setup +------------------------------------------------- + +If you want to skip the execution of a test you can call +``py.test.skip()`` within a test, a setup or from a +`funcarg factory`_ function. Example:: + + def test_function(): + if not valid_config(): + py.test.skip("unsuppored configuration") + +.. _`funcarg factory`: ../funcargs.html#factory + +Start improving this plugin in 30 seconds +========================================= + + +1. Download `pytest_skipping.py`_ plugin source code +2. put it somewhere as ``pytest_skipping.py`` into your import path +3. a subsequent ``py.test`` run will use your local version + +Checkout customize_, other plugins_ or `get in contact`_. + +.. include:: links.txt --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -2,7 +2,7 @@ plugins for Python test functions ================================= -xfail_ mark python test functions as expected-to-fail and report them separately. +skipping_ mark python test functions, classes or modules for conditional figleaf_ write and report coverage data with 'figleaf'. --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,11 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* generalized skipping: a new way to mark python functions with skipif or xfail + at function, class and modules level based on platform or sys-module attributes. + +* extend py.test.mark decorator to allow for positional args + * introduce and test "py.cleanup -d" to remove empty directories * fix issue #59 - robustify unittest test collection --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -5,7 +5,7 @@ WIDTH = 75 plugins = [ ('plugins for Python test functions', - 'xfail figleaf monkeypatch capture recwarn',), + 'skipping figleaf monkeypatch capture recwarn',), ('plugins for other testing styles and languages', 'oejskit unittest nose django doctest restdoc'), ('plugins for generic reporting and failure logging', @@ -252,7 +252,7 @@ class PluginDoc(RestWriter): warn("missing docstring", func) def emit_options(self, plugin): - from py.__.test.parseopt import Parser + from _py.test.parseopt import Parser options = [] parser = Parser(processopt=options.append) if hasattr(plugin, 'pytest_addoption'): --- a/doc/confrest.py +++ b/doc/confrest.py @@ -1,5 +1,5 @@ import py -from py.__.rest.resthtml import convert_rest_html, strip_html_header +from _py.rest.resthtml import convert_rest_html, strip_html_header html = py.xml.html --- a/_py/test/plugin/pytest_restdoc.py +++ b/_py/test/plugin/pytest_restdoc.py @@ -175,7 +175,7 @@ class ReSTSyntaxTest(py.test.collect.Ite 'to the py package') % (text,) relpath = '/'.join(text.split('/')[1:]) if check: - pkgroot = py.__pkg__.getpath() + pkgroot = py.path.local(py._py.__file__).dirpath() abspath = pkgroot.join(relpath) assert pkgroot.join(relpath).check(), ( 'problem with linkrole :source:`%s`: ' --- a/py/__init__.py +++ b/py/__init__.py @@ -15,7 +15,7 @@ For questions please check out http://py (c) Holger Krekel and others, 2009 """ -version = "1.1.0b1" +version = "trunk" __version__ = version = version or "1.1.x" import _py.apipkg @@ -53,7 +53,6 @@ _py.apipkg.initpkg(__name__, dict( '_PluginManager' : '_py.test.pluginmanager:PluginManager', 'raises' : '_py.test.outcome:raises', 'skip' : '_py.test.outcome:skip', - 'importorskip' : '_py.test.outcome:importorskip', 'fail' : '_py.test.outcome:fail', 'exit' : '_py.test.outcome:exit', # configuration/initialization related test api --- a/testing/path/test_svnurl.py +++ b/testing/path/test_svnurl.py @@ -50,12 +50,11 @@ class TestSvnURLCommandPath(CommonSvnTes def test_svnurl_characters_tilde_end(self, path1): py.path.svnurl("http://host.com/some/file~") + @py.test.mark.xfail("sys.platform == 'win32'") def test_svnurl_characters_colon_path(self, path1): - if py.std.sys.platform == 'win32': - # colons are allowed on win32, because they're part of the drive - # part of an absolute path... however, they shouldn't be allowed in - # other parts, I think - py.test.skip('XXX fixme win32') + # colons are allowed on win32, because they're part of the drive + # part of an absolute path... however, they shouldn't be allowed in + # other parts, I think py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo:bar")') def test_export(self, path1, tmpdir): --- a/doc/test/plugin/hookspec.txt +++ b/doc/test/plugin/hookspec.txt @@ -139,6 +139,15 @@ hook specification sourcecode # distributed testing # ------------------------------------------------------------------------- + def pytest_gwmanage_newgateway(gateway, platinfo): + """ called on new raw gateway creation. """ + + def pytest_gwmanage_rsyncstart(source, gateways): + """ called before rsyncing a directory to remote gateways takes place. """ + + def pytest_gwmanage_rsyncfinish(source, gateways): + """ called after rsyncing a directory to remote gateways takes place. """ + def pytest_testnodeready(node): """ Test Node is ready to operate. """ --- /dev/null +++ b/_py/test/plugin/pytest_skipping.py @@ -0,0 +1,201 @@ +""" +mark python test functions, classes or modules for conditional +skipping (skipif) or as expected-to-fail (xfail). Both declarations +lead to special reporting and both can be systematically associated +with functions, whole classes or modules. The difference between +the two is that 'xfail' will still execute test functions +but it will revert the outcome. A passing test is now +a failure and failing test is expected. All skip conditions +are reported at the end of test run through the terminal +reporter. + +.. _skipif: + +skip a test function conditionally +------------------------------------------- + +Here is an example for skipping a test function on Python3:: + + @py.test.mark.skipif("sys.version_info >= (3,0)") + def test_function(): + ... + +Conditions are specified as python expressions +and can access the ``sys`` module. They can also +access the config object and thus depend on command +line or conftest options:: + + @py.test.mark.skipif("config.getvalue('db') is None") + def test_function(...): + ... + +conditionally mark a function as "expected to fail" +------------------------------------------------------- + +You can use the ``xfail`` keyword to mark your test functions as +'expected to fail':: + + @py.test.mark.xfail + def test_hello(): + ... + +This test will be executed but no traceback will be reported +when it fails. Instead terminal reporting will list it in the +"expected to fail" or "unexpectedly passing" sections. +As with skipif_ you may selectively expect a failure +depending on platform:: + + @py.test.mark.xfail("sys.version_info >= (3,0)") + def test_function(): + ... + +skip/xfail a whole test class or module +------------------------------------------- + +Instead of marking single functions you can skip +a whole class of tests when runnign on a specific +platform:: + + class TestSomething: + skipif = "sys.platform == 'win32'" + +Or you can mark all test functions as expected +to fail for a specific test configuration:: + + xfail = "config.getvalue('db') == 'mysql'" + + +skip if a dependency cannot be imported +--------------------------------------------- + +You can use a helper to skip on a failing import:: + + docutils = py.test.importorskip("docutils") + +You can use this helper at module level or within +a test or setup function. + +You can aslo skip if a library does not have the right version:: + + docutils = py.test.importorskip("docutils", minversion="0.3") + +The version will be read from the specified module's ``__version__`` attribute. + + +dynamically skip from within a test or setup +------------------------------------------------- + +If you want to skip the execution of a test you can call +``py.test.skip()`` within a test, a setup or from a +`funcarg factory`_ function. Example:: + + def test_function(): + if not valid_config(): + py.test.skip("unsuppored configuration") + +.. _`funcarg factory`: ../funcargs.html#factory + +""" +# XXX not all skip-related code is contained in +# this plugin yet, some remains in outcome.py and +# the Skipped Exception is imported here and there. + + +import py + +def pytest_namespace(): + return {'importorskip': importorskip} + +def pytest_runtest_setup(item): + expr, result = evalexpression(item, 'skipif') + if result: + py.test.skip(expr) + +def pytest_runtest_makereport(__multicall__, item, call): + if call.when != "call": + return + if hasattr(item, 'obj'): + expr, result = evalexpression(item, 'xfail') + if result: + res = __multicall__.execute() + if call.excinfo: + res.skipped = True + res.failed = res.passed = False + else: + res.skipped = res.passed = False + res.failed = True + return res + +def pytest_report_teststatus(report): + if 'xfail' in report.keywords: + if report.skipped: + return "xfailed", "x", "xfail" + elif report.failed: + return "xpassed", "P", "xpass" + +# called by the terminalreporter instance/plugin +def pytest_terminal_summary(terminalreporter): + tr = terminalreporter + xfailed = tr.stats.get("xfailed") + if xfailed: + tr.write_sep("_", "expected failures") + for rep in xfailed: + entry = rep.longrepr.reprcrash + modpath = rep.item.getmodpath(includemodule=True) + pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno) + reason = rep.longrepr.reprcrash.message + i = reason.find("\n") + if i != -1: + reason = reason[:i] + tr._tw.line("%s %s" %(pos, reason)) + + xpassed = terminalreporter.stats.get("xpassed") + if xpassed: + tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS") + for rep in xpassed: + fspath, lineno, modpath = rep.item.reportinfo() + pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno) + tr._tw.line(pos) + +def importorskip(modname, minversion=None): + """ return imported module or perform a dynamic skip() """ + compile(modname, '', 'eval') # to catch syntaxerrors + try: + mod = __import__(modname) + except ImportError: + py.test.skip("could not import %r" %(modname,)) + if minversion is None: + return mod + verattr = getattr(mod, '__version__', None) + if isinstance(minversion, str): + minver = minversion.split(".") + else: + minver = list(minversion) + if verattr is None or verattr.split(".") < minver: + py.test.skip("module %r has __version__ %r, required is: %r" %( + modname, verattr, minversion)) + return mod + +def getexpression(item, keyword): + if isinstance(item, py.test.collect.Function): + val = getattr(item.obj, keyword, None) + val = getattr(val, '_0', val) + if val is not None: + return val + cls = item.getparent(py.test.collect.Class) + if cls and hasattr(cls.obj, keyword): + return getattr(cls.obj, keyword) + mod = item.getparent(py.test.collect.Module) + return getattr(mod.obj, keyword, None) + +def evalexpression(item, keyword): + expr = getexpression(item, keyword) + result = None + if expr: + if isinstance(expr, str): + d = {'sys': py.std.sys, 'config': item.config} + result = eval(expr, d) + else: + result = expr + return expr, result + --- a/testing/pytest/plugin/test_pytest_keyword.py +++ b/testing/pytest/plugin/test_pytest_keyword.py @@ -14,12 +14,14 @@ def test_pytest_mark_api(): assert f.world.x == 3 assert f.world.y == 4 + mark.world("hello")(f) + assert f.world._0 == "hello" + py.test.raises(TypeError, "mark.some(x=3)(f=5)") def test_mark_plugin(testdir): p = testdir.makepyfile(""" import py - pytest_plugins = "keyword" @py.test.mark.hello def test_hello(): assert hasattr(test_hello, 'hello') From commits-noreply at bitbucket.org Thu Oct 15 21:01:27 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 15 Oct 2009 19:01:27 +0000 (UTC) Subject: [py-svn] py-trunk commit be400218cdcc: - make importorskip static at py.test.importorskip because it's Message-ID: <20091015190127.A4F217EF12@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255630206 -7200 # Node ID be400218cdccbab6c8dd57fc5dade0c2e77a56e1 # Parent 94601d28b6dd85f3afe5bde05d7850c6ae1ce34a - make importorskip static at py.test.importorskip because it's used for conditional plugin loading - fix case where xfail is defined at module/class level - fixes and improvements to docs, correct links to plugins - use new skip facilities here and there --- a/testing/pytest/plugin/test_pytest_runner.py +++ b/testing/pytest/plugin/test_pytest_runner.py @@ -218,9 +218,8 @@ class TestExecutionNonForked(BaseFunctio py.test.fail("did not raise") class TestExecutionForked(BaseFunctionalTests): + skipif = "not hasattr(os, 'fork')" def getrunner(self): - if not hasattr(py.std.os, 'fork'): - py.test.skip("no os.fork available") return runner.forked_run_report def test_suicide(self, testdir): @@ -262,10 +261,8 @@ class TestCollectionReports: assert not rep.passed assert rep.skipped - + at py.test.mark.skipif("not hasattr(os, 'fork')") def test_functional_boxed(testdir): - if not hasattr(py.std.os, 'fork'): - py.test.skip("needs os.fork") p1 = testdir.makepyfile(""" import os def test_function(): --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,38 +1,38 @@ .. _`helpconfig`: helpconfig.html .. _`terminal`: terminal.html -.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_recwarn.py +.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_recwarn.py .. _`unittest`: unittest.html -.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_monkeypatch.py -.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_keyword.py +.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_monkeypatch.py +.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_keyword.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html .. _`plugins`: index.html -.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_doctest.py +.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_doctest.py .. _`capture`: capture.html -.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_nose.py -.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_restdoc.py +.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_nose.py +.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html -.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_pastebin.py -.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_figleaf.py -.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_hooklog.py -.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_skipping.py +.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pastebin.py +.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_figleaf.py +.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_hooklog.py +.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../download.html#checkout -.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_helpconfig.py +.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html .. _`get in contact`: ../../contact.html -.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_capture.py +.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_capture.py .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html -.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_terminal.py +.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_terminal.py .. _`recwarn`: recwarn.html -.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_pdb.py +.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`resultlog`: resultlog.html .. _`keyword`: keyword.html .. _`django`: django.html -.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_unittest.py +.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_unittest.py .. _`nose`: nose.html -.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_resultlog.py +.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_resultlog.py .. _`pdb`: pdb.html --- a/testing/code/test_assertion.py +++ b/testing/code/test_assertion.py @@ -135,11 +135,7 @@ def test_assert_with_brokenrepr_arg(): class TestView: def setup_class(cls): - try: - from _py.code._assertionold import View - except ImportError: - py.test.skip("requires the compile package") - cls.View = View + cls.View = py.test.importorskip("_py.code._assertionold").View def test_class_dispatch(self): ### Use a custom class hierarchy with existing instances --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -1,13 +1,12 @@ """ -mark python test functions, classes or modules for conditional -skipping (skipif) or as expected-to-fail (xfail). Both declarations -lead to special reporting and both can be systematically associated -with functions, whole classes or modules. The difference between -the two is that 'xfail' will still execute test functions -but it will revert the outcome. A passing test is now -a failure and failing test is expected. All skip conditions -are reported at the end of test run through the terminal -reporter. +advanced conditional skipping for python test functions, classes or modules. + +You can mark functions, classes or modules for for conditional +skipping (skipif) or as expected-to-fail (xfail). The difference +between the two is that 'xfail' will still execute test functions +but it will invert the outcome: a passing test becomes a failure and +a failing test is a semi-passing one. All skip conditions are +reported at the end of test run through the terminal reporter. .. _skipif: @@ -20,15 +19,18 @@ Here is an example for skipping a test f def test_function(): ... -Conditions are specified as python expressions -and can access the ``sys`` module. They can also -access the config object and thus depend on command -line or conftest options:: +The 'skipif' marker accepts an **arbitrary python expression** +as a condition. When setting up the test function the condition +is evaluated by calling ``eval(expr, namespace)``. The namespace +contains the ``sys`` and ``os`` modules as well as the +test ``config`` object. The latter allows you to skip based +on a test configuration value e.g. like this:: - @py.test.mark.skipif("config.getvalue('db') is None") + @py.test.mark.skipif("not config.getvalue('db')") def test_function(...): ... + conditionally mark a function as "expected to fail" ------------------------------------------------------- @@ -53,7 +55,7 @@ skip/xfail a whole test class or module ------------------------------------------- Instead of marking single functions you can skip -a whole class of tests when runnign on a specific +a whole class of tests when running on a specific platform:: class TestSomething: @@ -75,13 +77,12 @@ You can use a helper to skip on a failin You can use this helper at module level or within a test or setup function. -You can aslo skip if a library does not have the right version:: +You can also skip if a library does not come with a high enough version:: docutils = py.test.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. - dynamically skip from within a test or setup ------------------------------------------------- @@ -96,16 +97,11 @@ If you want to skip the execution of a t .. _`funcarg factory`: ../funcargs.html#factory """ -# XXX not all skip-related code is contained in -# this plugin yet, some remains in outcome.py and -# the Skipped Exception is imported here and there. - +# XXX py.test.skip, .importorskip and the Skipped class +# should also be defined in this plugin, requires thought/changes import py -def pytest_namespace(): - return {'importorskip': importorskip} - def pytest_runtest_setup(item): expr, result = evalexpression(item, 'skipif') if result: @@ -117,14 +113,15 @@ def pytest_runtest_makereport(__multical if hasattr(item, 'obj'): expr, result = evalexpression(item, 'xfail') if result: - res = __multicall__.execute() + rep = __multicall__.execute() if call.excinfo: - res.skipped = True - res.failed = res.passed = False + rep.skipped = True + rep.failed = rep.passed = False else: - res.skipped = res.passed = False - res.failed = True - return res + rep.skipped = rep.passed = False + rep.failed = True + rep.keywords['xfail'] = True # expr + return rep def pytest_report_teststatus(report): if 'xfail' in report.keywords: @@ -157,24 +154,6 @@ def pytest_terminal_summary(terminalrepo pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno) tr._tw.line(pos) -def importorskip(modname, minversion=None): - """ return imported module or perform a dynamic skip() """ - compile(modname, '', 'eval') # to catch syntaxerrors - try: - mod = __import__(modname) - except ImportError: - py.test.skip("could not import %r" %(modname,)) - if minversion is None: - return mod - verattr = getattr(mod, '__version__', None) - if isinstance(minversion, str): - minver = minversion.split(".") - else: - minver = list(minversion) - if verattr is None or verattr.split(".") < minver: - py.test.skip("module %r has __version__ %r, required is: %r" %( - modname, verattr, minversion)) - return mod def getexpression(item, keyword): if isinstance(item, py.test.collect.Function): @@ -193,7 +172,7 @@ def evalexpression(item, keyword): result = None if expr: if isinstance(expr, str): - d = {'sys': py.std.sys, 'config': item.config} + d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config} result = eval(expr, d) else: result = expr --- a/doc/test/plugin/skipping.txt +++ b/doc/test/plugin/skipping.txt @@ -2,19 +2,17 @@ pytest_skipping plugin ====================== -mark python test functions, classes or modules for conditional +advanced conditional skipping for python test functions, classes or modules. .. contents:: :local: -skipping (skipif) or as expected-to-fail (xfail). Both declarations -lead to special reporting and both can be systematically associated -with functions, whole classes or modules. The difference between -the two is that 'xfail' will still execute test functions -but it will revert the outcome. A passing test is now -a failure and failing test is expected. All skip conditions -are reported at the end of test run through the terminal -reporter. +You can mark functions, classes or modules for for conditional +skipping (skipif) or as expected-to-fail (xfail). The difference +between the two is that 'xfail' will still execute test functions +but it will invert the outcome: a passing test becomes a failure and +a failing test is a semi-passing one. All skip conditions are +reported at the end of test run through the terminal reporter. .. _skipif: @@ -27,15 +25,18 @@ Here is an example for skipping a test f def test_function(): ... -Conditions are specified as python expressions -and can access the ``sys`` module. They can also -access the config object and thus depend on command -line or conftest options:: +The 'skipif' marker accepts an **arbitrary python expression** +as a condition. When setting up the test function the condition +is evaluated by calling ``eval(expr, namespace)``. The namespace +contains the ``sys`` and ``os`` modules as well as the +test ``config`` object. The latter allows you to skip based +on a test configuration value e.g. like this:: - @py.test.mark.skipif("config.getvalue('db') is None") + @py.test.mark.skipif("not config.getvalue('db')") def test_function(...): ... + conditionally mark a function as "expected to fail" ------------------------------------------------------- @@ -60,7 +61,7 @@ skip/xfail a whole test class or module ------------------------------------------- Instead of marking single functions you can skip -a whole class of tests when runnign on a specific +a whole class of tests when running on a specific platform:: class TestSomething: @@ -82,13 +83,12 @@ You can use a helper to skip on a failin You can use this helper at module level or within a test or setup function. -You can aslo skip if a library does not have the right version:: +You can also skip if a library does not come with a high enough version:: docutils = py.test.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. - dynamically skip from within a test or setup ------------------------------------------------- --- a/testing/process/test_killproc.py +++ b/testing/process/test_killproc.py @@ -13,5 +13,4 @@ def test_kill(): if sys.platform == "win32" and ret == 0: py.test.skip("XXX on win32, subprocess.Popen().wait() on a killed " "process does not yield return value != 0") - assert ret != 0 --- a/testing/io_/test_terminalwriter.py +++ b/testing/io_/test_terminalwriter.py @@ -2,13 +2,6 @@ import py import os, sys from _py.io import terminalwriter -def skip_win32(): - if sys.platform == 'win32': - py.test.skip('Not relevant on win32') - -import os -import py - def test_terminal_width_COLUMNS(monkeypatch): """ Dummy test for get_terminal_width """ @@ -82,14 +75,14 @@ class BaseTests: assert len(l) == 1 assert l[0] == "-" * 26 + " hello " + "-" * 27 + "\n" + @py.test.mark.skipif("sys.platform == 'win32'") def test__escaped(self): - skip_win32() tw = self.getwriter() text2 = tw._escaped("hello", (31)) assert text2.find("hello") != -1 + @py.test.mark.skipif("sys.platform == 'win32'") def test_markup(self): - skip_win32() tw = self.getwriter() for bold in (True, False): for color in ("red", "green"): @@ -104,9 +97,9 @@ class BaseTests: tw.line("x", bold=True) tw.write("x\n", red=True) l = self.getlines() - skip_win32() - assert len(l[0]) > 2, l - assert len(l[1]) > 2, l + if sys.platform != "win32": + assert len(l[0]) > 2, l + assert len(l[1]) > 2, l def test_attr_fullwidth(self): tw = self.getwriter() --- a/py/__init__.py +++ b/py/__init__.py @@ -53,6 +53,7 @@ _py.apipkg.initpkg(__name__, dict( '_PluginManager' : '_py.test.pluginmanager:PluginManager', 'raises' : '_py.test.outcome:raises', 'skip' : '_py.test.outcome:skip', + 'importorskip' : '_py.test.outcome:importorskip', 'fail' : '_py.test.outcome:fail', 'exit' : '_py.test.outcome:exit', # configuration/initialization related test api --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -21,6 +21,21 @@ def test_xfail_decorator(testdir): ]) assert result.ret == 1 +def test_xfail_at_module(testdir): + p = testdir.makepyfile(""" + xfail = 'True' + + def test_intentional_xfail(): + assert 0 + """) + result = testdir.runpytest(p) + extra = result.stdout.fnmatch_lines([ + "*expected failures*", + "*test_intentional_xfail*:4*", + "*1 xfailed*" + ]) + assert result.ret == 0 + def test_skipif_decorator(testdir): p = testdir.makepyfile(""" import py @@ -84,26 +99,3 @@ def test_evalexpression_cls_config_examp x, y = evalexpression(item, 'skipif') assert x == 'config._hackxyz' assert y == 3 - -def test_importorskip(): - from _py.test.outcome import Skipped - from _py.test.plugin.pytest_skipping import importorskip - assert importorskip == py.test.importorskip - try: - sys = importorskip("sys") - assert sys == py.std.sys - #path = py.test.importorskip("os.path") - #assert path == py.std.os.path - py.test.raises(Skipped, "py.test.importorskip('alskdj')") - py.test.raises(SyntaxError, "py.test.importorskip('x y z')") - py.test.raises(SyntaxError, "py.test.importorskip('x=y')") - path = importorskip("py", minversion=".".join(py.__version__)) - mod = py.std.types.ModuleType("hello123") - mod.__version__ = "1.3" - py.test.raises(Skipped, """ - py.test.importorskip("hello123", minversion="5.0") - """) - except Skipped: - print(py.code.ExceptionInfo()) - py.test.fail("spurious skip") - --- a/testing/pytest/test_outcome.py +++ b/testing/pytest/test_outcome.py @@ -29,3 +29,30 @@ def test_exception_printing_skip(): excinfo = py.code.ExceptionInfo() s = excinfo.exconly(tryshort=True) assert s.startswith("Skipped") + +def test_importorskip(): + from _py.test.outcome import Skipped, importorskip + assert importorskip == py.test.importorskip + try: + sys = importorskip("sys") + assert sys == py.std.sys + #path = py.test.importorskip("os.path") + #assert path == py.std.os.path + py.test.raises(Skipped, "py.test.importorskip('alskdj')") + py.test.raises(SyntaxError, "py.test.importorskip('x y z')") + py.test.raises(SyntaxError, "py.test.importorskip('x=y')") + path = importorskip("py", minversion=".".join(py.__version__)) + mod = py.std.types.ModuleType("hello123") + mod.__version__ = "1.3" + py.test.raises(Skipped, """ + py.test.importorskip("hello123", minversion="5.0") + """) + except Skipped: + print(py.code.ExceptionInfo()) + py.test.fail("spurious skip") + +def test_importorskip_imports_last_module_part(): + import os + ospath = py.test.importorskip("os.path") + assert os.path == ospath + --- a/_py/test/plugin/pytest_figleaf.py +++ b/_py/test/plugin/pytest_figleaf.py @@ -4,7 +4,8 @@ write and report coverage data with 'fig """ import py -figleaf = py.test.importorskip("figleaf.annotate_html") +py.test.importorskip("figleaf.annotate_html") +import figleaf def pytest_addoption(parser): group = parser.addgroup('figleaf options') --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -2,7 +2,7 @@ plugins for Python test functions ================================= -skipping_ mark python test functions, classes or modules for conditional +skipping_ advanced conditional skipping for python test functions, classes or modules. figleaf_ write and report coverage data with 'figleaf'. --- a/doc/test/features.txt +++ b/doc/test/features.txt @@ -123,14 +123,14 @@ command line. Using the `--pdb`` option a PDB `Python debugger`_ when a test fails. advanced skipping of tests -------------------------------- +====================================== -py.test has builtin support for skipping tests or expecting +py.test has `advanced support for skipping tests`_ or expecting failures on tests on certain platforms. Apart from the minimal py.test style also unittest- and nose-style tests can make use of this feature. - +.. _`advanced support for skipping tests`: plugin/skipping.html .. _`funcargs mechanism`: funcargs.html .. _`unittest.py`: http://docs.python.org/library/unittest.html .. _`doctest.py`: http://docs.python.org/library/doctest.html --- a/testing/process/test_forkedfunc.py +++ b/testing/process/test_forkedfunc.py @@ -1,9 +1,6 @@ import py, sys, os -def setup_module(mod): - if not hasattr(os, 'fork'): - py.test.skip("forkedfunc requires os.fork") - mod.tmpdir = py.test.ensuretemp(mod.__file__) +skipif = "not hasattr(os, 'fork')" def test_waitfinish_removes_tempdir(): ff = py.process.ForkedFunc(boxf1) @@ -56,7 +53,7 @@ def test_forkedfunc_on_fds(): def test_forkedfunc_signal(): result = py.process.ForkedFunc(boxseg).waitfinish() assert result.retval is None - if py.std.sys.version_info < (2,4): + if sys.version_info < (2,4): py.test.skip("signal detection does not work with python prior 2.4") assert result.signal == 11 --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -21,11 +21,13 @@ plugins = [ ] externals = { - 'oejskit': 'run javascript tests in real life browsers', - 'django': 'support for testing django applications', + 'oejskit': "run javascript tests in real life browsers", + 'django': "support for testing django applications", +# 'coverage': "support for using Ned's coverage module", +# 'xmlresult': "support for generating xml reports " +# "and CruiseControl integration", +} -} - def warn(*args): msg = " ".join(map(str, args)) print >>sys.stderr, "WARN:", msg @@ -123,7 +125,7 @@ class RestWriter: self.out.close() print "wrote", self.target del self.out - + class PluginOverview(RestWriter): def makerest(self, config): plugindir = py.path.local(py.__file__).dirpath("test", "plugin") @@ -145,7 +147,6 @@ class PluginOverview(RestWriter): self.Print() class HookSpec(RestWriter): - def makerest(self, config): module = config.pluginmanager.hook._hookspecs source = py.code.Source(module) @@ -212,7 +213,7 @@ class PluginDoc(RestWriter): # "py/test/plugin/%s" %(hg_changeset, basename))) self.links.append((basename, "http://bitbucket.org/hpk42/py-trunk/raw/%s/" - "py/test/plugin/%s" %(pyversion, basename))) + "_py/test/plugin/%s" %(pyversion, basename))) self.links.append(('customize', '../customize.html')) self.links.append(('plugins', 'index.html')) self.links.append(('get in contact', '../../contact.html')) --- a/_py/test/outcome.py +++ b/_py/test/outcome.py @@ -94,6 +94,25 @@ def raises(ExpectedException, *args, **k raise ExceptionFailure(msg="DID NOT RAISE", expr=args, expected=ExpectedException) +def importorskip(modname, minversion=None): + """ return imported module or perform a dynamic skip() """ + compile(modname, '', 'eval') # to catch syntaxerrors + try: + mod = __import__(modname, None, None, ['__doc__']) + except ImportError: + py.test.skip("could not import %r" %(modname,)) + if minversion is None: + return mod + verattr = getattr(mod, '__version__', None) + if isinstance(minversion, str): + minver = minversion.split(".") + else: + minver = list(minversion) + if verattr is None or verattr.split(".") < minver: + py.test.skip("module %r has __version__ %r, required is: %r" %( + modname, verattr, minversion)) + return mod + # exitcodes for the command line EXIT_OK = 0 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -191,9 +191,8 @@ class TestSourceParsingAndCompiling: assert len(source) == 9 assert source.getstatementrange(5) == (0, 9) + @py.test.mark.skipif("sys.version_info < (2,6)") def test_compile_to_ast(self): - if sys.version_info < (2, 6): - py.test.skip("requires Python 2.6") import ast source = Source("x = 4") mod = source.compile(flag=ast.PyCF_ONLY_AST) @@ -257,7 +256,6 @@ def test_getstartingblock_multiline(): assert len(l) == 4 def test_getline_finally(): - #py.test.skip("inner statements cannot be located yet.") def c(): pass excinfo = py.test.raises(TypeError, """ teardown = None From hpk at codespeak.net Fri Oct 16 19:53:53 2009 From: hpk at codespeak.net (hpk at codespeak.net) Date: Fri, 16 Oct 2009 19:53:53 +0200 (CEST) Subject: [py-svn] r68543 - py/extradoc/talk/pycon-us-2009/pytest-introduction Message-ID: <20091016175353.9E3FE168069@codespeak.net> Author: hpk Date: Fri Oct 16 19:53:53 2009 New Revision: 68543 Modified: py/extradoc/talk/pycon-us-2009/pytest-introduction/pytest-introduction.html (props changed) Log: setting mime type From hpk at codespeak.net Fri Oct 16 21:38:28 2009 From: hpk at codespeak.net (hpk at codespeak.net) Date: Fri, 16 Oct 2009 21:38:28 +0200 (CEST) Subject: [py-svn] r68544 - in py/extradoc/talk/pycon-us-2010: . tutorial Message-ID: <20091016193828.830CC168035@codespeak.net> Author: hpk Date: Fri Oct 16 21:38:25 2009 New Revision: 68544 Added: py/extradoc/talk/pycon-us-2010/ py/extradoc/talk/pycon-us-2010/proposal-execnet.txt py/extradoc/talk/pycon-us-2010/proposal-pytest.txt py/extradoc/talk/pycon-us-2010/tutorial/ py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt - copied, changed from r66010, py/extradoc/talk/pycon-us-2009/proposal-pytest-advanced.txt py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt - copied, changed from r66010, py/extradoc/talk/pycon-us-2009/proposal-pytest-begin.txt Log: adding a few notes and drafts of my various pycon us 2010 proposals Added: py/extradoc/talk/pycon-us-2010/proposal-execnet.txt ============================================================================== --- (empty file) +++ py/extradoc/talk/pycon-us-2010/proposal-execnet.txt Fri Oct 16 21:38:25 2009 @@ -0,0 +1,28 @@ +The Ring of Python - on competition and collaboration +------------------------------------------------------ + +CPython 2.5/2.6/3.1, Jython, IronPython, PyPy, +StacklessPython, UnladdenSwallow, Cython ... what do we +make of all these cool interpreter projects and versions? +Where does competition help and where does it hamper? +What about the standard library, packaging and running +applications on multiple interpreters? +I am going to present my own insights and ideas and foster +real-time audience discussion through audio and chat. + +execnet: connecting Python interpreters +------------------------------------------------- + +Want to step-by-step cross the Python3 version chasm? Want to +use some Java libraries from your otherwise CPython application? +Want to manage multiple nodes in a cloud in custom ways? +Then the new and improved 'execnet' package may be for you as it helps you to +connect all kinds of Python interpreters. I'll introduce its simple +programming model and show example for connecting CPython, Jython +and PyPy to each other. execnet only requires installation at the +initiating Python interpreter and dynamically installs itself into +other local or remote interpreters. This zero-installation property +makes it easy to for example have a Python3 library use some parts +installed in a Python2 environment. I'll discuss some real-life +experiences, methods and tools for developing protocols and +interactions with execnet. Added: py/extradoc/talk/pycon-us-2010/proposal-pytest.txt ============================================================================== --- (empty file) +++ py/extradoc/talk/pycon-us-2010/proposal-pytest.txt Fri Oct 16 21:38:25 2009 @@ -0,0 +1,27 @@ +rapid multi-purpose testing +-------------------------------------------------------- + +Want to test code, javascript or documents on different +Python Interpreters and versions? This talk gives an overview +on new py.test features of the last year and how you can use +py.test for a growing number of needs, for example: + +* run tests written for nose or unittest +* ad-hoc distribute tests to local or remote Python environments +* test compatibility with Python3 +* run javascript unit-tests in real browsers + +I'll conclude with upcoming features regarding testing-in-the-cloud +and my ideas on test tool convergence. + +biography +------------ + +Holger Krekel is a co-founder of the PyPy project, +and the initiator and maintainer of py.test, vadm +and a few other open source tools. In 2004 he founded +merlinux, a european Python R&D company consulting +on Python matters. + +Contact: holger at merlinux.eu, hpk42 on twitter + Added: py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt ============================================================================== --- (empty file) +++ py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt Fri Oct 16 21:38:25 2009 @@ -0,0 +1,100 @@ +Title: Writing hybrid apps with C/J/PyPy Python interpreters +Presenter: Holger Krekel +Tutorial format: interactive lecture +Recording: I give permission to record and publish my PyCon tutorial for free distribution. +Intended Audience: advanced Python programmers +Maximum number of students: maybe 30 +Perequisites/knowledge: having worked with two different Python versions or interpreters +Requirements: Laptop with as many different Python interpreters as you can install on it, +VirtualBox, Parallels or other VM tech is a plus. + +Presenter bio: + +Holger Krekel is a co-founder of the PyPy Python Interpreter project. +He is the initiator and maintainer of the popular py.test and other +tools. In 2004 he founded merlinux, a company focusing on tools +and practises for Python environments. Holger has spoken at many +EuroPython and PyCon confereces and gave well received testing +tutorials at the last Pycon-US 2009 and several european +conferences. + +Tutorial Summary: + +Want to use Python3 in a Python2 environment? Jython together +with CPython? Or execute some user code in a PyPy sandbox for +maximum security? In this highly interactive tutorial you'll learn +about execnet_, a lean package to write and deploy hybrid applications +in Python. We'll discuss basic ad-hoc deployment, elastic remote execution +concepts and do basic exercises for writing cross-interpreter applications. +We'll end with discussing learnings and potential future directions. + +.. _execnet: http://codespeak.net/execnet + +Overview Python Interpreters (10 minutes) +---------------------------------------------- + +- CPython 2.4, 2.5, 2.6: the mainstream +- CPython 3.1: the future +- Jython 2.5.1: the Java world +- IronPython 2.6: the .NET world + +execnet basics (10 minutes) +------------------------------- + +- cross connect Interpreters and platforms +- zero-install deployment +- local communication protocols + +installation (20 minutes) +------------------------------ + +- exercise: install execnet and write a simple + one sub interpreter example + +exchanging data (30 minutes) +------------------------------- + +interactive examples: + +- working with channel send/receive +- working with callbacks +- working with queues and multiple interpreters + +freestyle exercises and questions + +python2/python3 interactions (20 minutes) +------------------------------------------------ + +- string / byte differences +- syntax differences +- stream encoding + +exercise: run code in python2 from python3 and vice versa + +------------------------------------------------ +Break +------------------------------------------------ + +Using Jython <-> CPython (15 minutes) +------------------------------------------------ + +interactive examples: +- accessing Java libraries from CPython 2 or 3 +- accessing CPython extensions from Jython + +Writing and testing local protocols (30 minutes) +------------------------------------------------ + +- the command pattern +- testing each side of communication + +exercise: write a remote file server + +Interacting with PyPy sandboxing (30 minutes) +----------------------------------------------------- + +interactive walk through: +- Basics of PyPy sandboxing +- running a PyPy sandbox +- setting memory and CPU limits +- using remote_exec into a PyPy sandbox Copied: py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt (from r66010, py/extradoc/talk/pycon-us-2009/proposal-pytest-advanced.txt) ============================================================================== --- py/extradoc/talk/pycon-us-2009/proposal-pytest-advanced.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt Fri Oct 16 21:38:25 2009 @@ -1,73 +1,121 @@ -Title: py.test - cross-platform and distributed testing -Presenter: Holger Krekel , Brian +Title: py.test II - cross-platform and distributed testing +Presenter: Holger Krekel Tutorial format: interactive lecture Recording: I give permission to record and publish my PyCon tutorial for free distribution. Intended Audience: Python programmers -Maximum number of students: maybe 30 +Maximum number of students: 30 Perequisites/knowledge: good knowledge of python programming, basic familiarity with automated testing -Requirements: Attendees are welcome to bring their laptops with Python installed (version 2.4 or higher). -Notes to reviewer: visting the beginner-oriented tutorial "rapid testing with minimal effort" is recommended, but not required. +Requirements: Laptops with Python and py.test installed +Notes to reviewer: visting Part I, the beginner-oriented tutorial "rapid testing with minimal effort" is recommended, not required. This version of this advanced tutorial will be an improved version +of what was given at Pycon 2009, see http://www.scribd.com/doc/21183340/Pycon2009-Pytest-Advanced Tutorial Summary: Want to know more about advanced automated testing with Python? -Use a tool that allows you to ad-hoc distribute tests to multiple -CPUs for speed and to multiple platforms for compatibility checks? -With tons of debugging help in failure situations? - This tutorial provides in-depth information on advanced usages -of the popular py.test tool. We highlight its current feature set -including using and writing extensions for generating HTML pages, -testing Javascript or ReST documents. We showcase and discuss ways -of distributing tests across CPUs and platforms and will leave -time to discuss and tackle specific scenarios brought up -during the session. - -The tutorial format will be an interactive lecture with plenty -of time for questions. +of the popular py.test tool. We show best practises for +managing test function arguments and generating multiple test calls +from specifications. We'll introduce writing of project-specific +extensions and showcase existing one for integration +with Continous Integration systems. We discuss ways of +distributing tests across CPUs and platforms, dive deeper into +managing test function arguments and will leave time +to discuss and tackle specific scenarios brought up during the session. +We conclude with a brief look on other testing tools, running traditional +unittests and doctests and do a Q&A session on other plugins. Outline for review: -Terminology/Overview (20 minutes) -- developer and "customer" tests -- small/medium/big tests aka unit/functional/integration -- acceptance tests -- benefits of automated testing -- existing python testing tools - -Walkthrough py.test basic features (30 minutes) -- example of test module -- working with failures, tracebacks -- generative tests -- skipping chunks within doctests -- looponfailing: run large test set until all tests pass +Intro and Terminology (20 mins) +---------------------------------------- + +- small/medium/big tests aka unit/integration/functional +- developer and customer tests +- py.test overview and basic usage + +Extension basics (10 mins) +---------------------------------------- + +interactive lecture: +- what are plugins +- how to do conftest.py files +- what are hooks + +Python test functions (10 mins) +------------------------------------ + +interactive lecture: +- test functions and classes +- test function arguments and factory lookup +- example: monkeypatch funcarg + +Generating tests (30 mins) +------------------------------------ + +interactive lecture: +- the pytest_generate_tests hook +- parametrization schemes +- py.test's mark mechanism + +exercise: write test functions and invoke +them with different values, specified via +the 'mark' decorator. + +funcarg request objects (30 mins) +------------------------------------ + +- caching complex values +- setup/teardown mechanism +- skipping tests from factories + +exercise: extend previous code to use +caching, write test functions and invoke +them with different values, specified via +the 'mark' decorator. + + +------------------------------------ +break +------------------------------------ + +distributed testing (20 mins) +--------------------------------------- + +- distribution model +- load-balance testing +- multi-platform/interpreter testing + +advanced testing modes (20 mins) +--------------------------------------- + +- continously looping on failing tests +- protecting against crashing interpreters +- integrating nose/unittests style tests + +exercise: use the modes/styles with your code + +using funcargs (20 mins) +----------------------------------------------- + +- monkeypatch funcarg +- tempdir for creating per-test filesystem setup +- recwarn for assertions about warnings + +exercise: use the funcargs with your code + +using extensions (20 mins) +----------------------------------------------- -Using extensions (40 minutes) - integrate collection/run of traditional unit-tests -- run functions in their own tempdir -- testing ReST documents +- testing ReST syntax - running Javascript tests -- running Prolog tests -- html reporting for nightly runs +- producing xml/log files for buildbod/CruiseControl + +exercise: install/use the extensions -Break +feedback / Q&A (10 mins) +----------------------------------------------- -Writing extensions (30 minutes) -- overview on extensibility -- per-project hooks for customization -- conftest.py mechanism -- global hooks for plugins -- event system for custom reporting -- test collection hooks -- test running hooks -- writing cross-project plugins - -Distributed testing (45 minutes) -- motivation/vision -- run your tests on multiple CPUs/multicore -- running tests on multiple machines at once -- running tests on windows, driven by Unix -- do's and dont's for cross-process testing +feedback round and general Q&A -Buffer, Q&A (15 minutes) Copied: py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt (from r66010, py/extradoc/talk/pycon-us-2009/proposal-pytest-begin.txt) ============================================================================== --- py/extradoc/talk/pycon-us-2009/proposal-pytest-begin.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt Fri Oct 16 21:38:25 2009 @@ -1,120 +1,139 @@ -Title: py.test - rapid testing with minimal effort -Presenter: Brian Dorsey , Holger Krekel +Title: py.test I - rapid testing with minimal effort +Presenter: Holger Krekel Tutorial format: interactive lecture -Recording: We give permission to record and publish my PyCon tutorial for free distribution. +Recording: I give permission to record and publish my PyCon tutorial for free distribution. Intended Audience: beginner programmers Maximum number of students: maybe 30 Perequisites/knowledge: basic knowledge of python programming -Requirements: Laptop with Python 2.4 or greater installed. Or pair with someone. :) +Requirements: Laptop with Python 2.4 or greater installed. Pairing welcome. + Presenter bio: -Brian Dorsey is a database and Python developer living in Seattle Washington, -USA. He mostly writes command line tools, windows services and more recntly -simple web apps. He is a long-time user of and occasional contributor to -py.test. He is a co-organizer of the Seattle Python Interest Group -(www.seapig.org), a member of www.saturdayhouse.org and co-founder of a -co-working space in Seattle (www.giraffelabs.com). He also loves lunch and -created www.noonhat.com to help feed that addiction. He doesn't like natto or -talking about himself in the third person. - -Holger Krekel is a co-founder of the PyPy project and -participates on many levels. He is the initiator and -maintainer of the popular py.test and a few other tools. In -2004 he founded merlinux, a company focusing on PyPy and py.test -developments. Holger has spoken at many EuroPython and -PyCon confereces and gave well received testing -tutorials at EuroPython 2008 and Pycon-UK 2008. +Holger Krekel is a co-founder of the PyPy Python Interpreter project. +He is the initiator and maintainer of the popular py.test and other +tools. In 2004 he founded merlinux, a company focusing on tools +and practises for Python environments. Holger has spoken at many +EuroPython and PyCon confereces and gave well received testing +tutorials at the last Pycon-US 2009 and several european +conferences. Tutorial Summary: -XXX: this needs to be shorter: max 100 words. +Want to learn writing tests in Python? This tutorial introduces +basic practises using the popular py.test (http://pytest.org) tool. +We start with an overview on testing and tools in Python and then discuss +and exercise basic idioms in a step-by-step manner. We cover the basic +writing, organisation and running of tests and debugging options. +In the second part we discuss setup and teardown of test state, usage +and configuration of test functions arguments and introduce coverage testing. +We will close with time to answer questions and discuss topics from the students. + + +Motivation and testing terms (15 minutes) +------------------------------------------------------- + +- why automated testing +- testing types +- basic test organisation -This tutorial introduces automated testing for Python using py.test -(http://codespeak.net/py/dist/test.html). We begin with a very short overview -of testing in Python and where unit testing py.test fit in. The rest of the -tutorial is hands on: a short introduction of something new followed by -exercises to experiment with it. First is basic usage of py.test and the -features which simplify your test writing experience. We walk through the -implementation of tests, setup and teardown of test state, debugging -facilities and point out similarities/differences to other test tools. We talk -about command line options which save time running and debugging your tests. -We then drop briefly into lecture mode and discuss additional features useful -in particular circumstances, such as running Javascript tests, testing the -format of text documents, or using py.test in a build system. We will close -with time to answer questions and discuss topics from the students. - -Outline for review: - -Motivation / why automated testing? (10 minutes) -- what is unit testing and how does it compare to other types of testing -- why do automated testing? benefits, etc -- existing python testing tools - -What you get with py.test (10 minutes) -- overview of all the basic benefits - automatic test discovery, simplicity, 'print' debugging (output redirection), function or class, assert introspection, etc -- extra benefits, there if you need them, ignore if not - multiple python version, distributed testing, doctests, etc, etc -- similarities and differences between nose and py.test +Installation. (15 minutes) +--------------------------------------------------------- -Installation, basic test functions. (30 minutes) +exercise: - installation -- test functions -- 20 minute work time - - Basic setup and working through inevitable setup problems. Some people will finish very quickly - ask them to help others get setup. +- basic test discovery +- writing and running simple test functions + +Basic usage of py.test (15 minutes) +------------------------------------------ -Basic usage of py.test (40 minutes) +interactive lecture: - reinterpretation of asserts -- working with failures - debug with print -- exceptions -- 10 minute work time - exercises -- test classes -- setup and teardown test state -- skipping tests -- 10 minute work time - exercises +- asserting exceptions +- debugging options, pdb + +test function arguments (15) +-------------------------------- + +interactive lecture: +- creating values for test functions +- understanding setup and teardown +- using helpers of the funcarg request object -Break -Options (25 minutes) -- --exitfirst -- --looponfailing - run large test set until all tests pass -- -k to run tests matching name or keyword -- --exec to use different Python interpreters -- different options to control traceback generation -- 10 minute work time - exercises +application scenario setup (30) +-------------------------------- + +exercise: implement the testing "mysetup" pattern +- separate test setup and test code -Branching out (20 minutes) -- generative tests -- skipping tests -- --pdb -- 10 minute work time - exercises - -Using doctests (25 minutes) -- what are doctests -- two usage scenarios - docstrings & stand alone files -- how they work demo and examples. -- 10 minute work time - exercises - -Wrapping up and questions (20 minutes) -- where to go from here -- quesitons and student topics +-------------------------------- +Break +-------------------------------- + +marking tests (15) +---------------------------------- + +interactive lecture: +- marking tests for skip and xfail +- marking classes and modules +- selectively running tests + +hooks and plugins (15) +-------------------------------- + +interactive lecture: +- conftest plugins +- plugin discovery +- pytest hook functions + +new command line option (15) +---------------------------------- + +exercise: +- add a new cmdline option for the "mysetup" setup object +- skip test depending on a cmdline option + +mocking and monkeypatching (15) +---------------------------------- + +interactive lecture: +- when you need monkeypatching +- the monkeypatch funcarg +exercise: test code involving os.getpwd() handling + +coverage testing (15) +---------------------------------- + +- figleaf and coverage tools +- installing figleaf or coverage +- exercise: get html coverage for your code + +overview on advanced usages (15) +--------------------------------------- + +interactive lecture: +- looponfailing +- distributed testing (part II) +- generative tests (part II) +- doctests (part II) +- javascript testing Notes for reviewers: -We're happy to adjust the content of the talk to match attendees interests and would greatly value reviewer input into specific areas they expect people to be interested (or not) in. +This version of the tutorial will be an improved version +of what was given at Pycon 2009, see also: + +http://tinyurl.com/yjluwwy +http://www.scribd.com/doc/21183340/Pycon2009-Pytest-Advanced Previous speaking/teaching experience: -Brian: -I co-presented a talk at PyCon last year and gave an earlier version of this -talk at the Vancouver Python Workshop. Most months, I moderate our loose -disscussion oriented meetings for the Seattle Python Interest Group. I'm -comfortable in front of audiences speaking and teaching. I taught High School -English in Japan for two years. For an example of my speaking style in short -talks, see: http://www.youtube.com/watch?v=2f4c1dQW3vY and a regular talk format from PyCon last year: http://www.youtube.com/watch?v=OCZ19R0KD4o This tutorial would be a different speaking style Holger: I have talked at numerous Pycon and EuroPython conferences about -PyPyand testing tools. In 2008 i gave a 1h non-hands on version -of the proposed tutorial which was received very well. I've +PyPy and testing tools. In 2009 i started giving well received +testing tutorials at Pycon US, EuroPython and Pycon UK. I've also co-ordinated many sprints, gave teaching lectures at university about computer science and generally enjoy interacting with an interested learning group. From cfbolz at codespeak.net Fri Oct 16 23:25:11 2009 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 16 Oct 2009 23:25:11 +0200 (CEST) Subject: [py-svn] r68548 - py/extradoc/talk/pycon-us-2010/tutorial Message-ID: <20091016212511.5CD7A168067@codespeak.net> Author: cfbolz Date: Fri Oct 16 23:25:10 2009 New Revision: 68548 Modified: py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt Log: a typo. of course PyPy is the future too :-) Modified: py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt ============================================================================== --- py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt Fri Oct 16 23:25:10 2009 @@ -15,7 +15,7 @@ tools. In 2004 he founded merlinux, a company focusing on tools and practises for Python environments. Holger has spoken at many EuroPython and PyCon confereces and gave well received testing -tutorials at the last Pycon-US 2009 and several european +tutorials at the last Pycon-US 2009 and several European conferences. Tutorial Summary: @@ -34,7 +34,7 @@ ---------------------------------------------- - CPython 2.4, 2.5, 2.6: the mainstream -- CPython 3.1: the future +- CPython 3.1, PyPy: the future - Jython 2.5.1: the Java world - IronPython 2.6: the .NET world From cfbolz at codespeak.net Fri Oct 16 23:28:36 2009 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Fri, 16 Oct 2009 23:28:36 +0200 (CEST) Subject: [py-svn] r68549 - py/extradoc/talk/pycon-us-2010/tutorial Message-ID: <20091016212836.879F1168067@codespeak.net> Author: cfbolz Date: Fri Oct 16 23:28:35 2009 New Revision: 68549 Modified: py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt Log: few small things Modified: py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt ============================================================================== --- py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt Fri Oct 16 23:28:35 2009 @@ -88,7 +88,7 @@ advanced testing modes (20 mins) --------------------------------------- -- continously looping on failing tests +- continuously looping on failing tests - protecting against crashing interpreters - integrating nose/unittests style tests Modified: py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt ============================================================================== --- py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt Fri Oct 16 23:28:35 2009 @@ -15,7 +15,7 @@ tools. In 2004 he founded merlinux, a company focusing on tools and practises for Python environments. Holger has spoken at many EuroPython and PyCon confereces and gave well received testing -tutorials at the last Pycon-US 2009 and several european +tutorials at the last Pycon-US 2009 and several European conferences. Tutorial Summary: @@ -27,7 +27,7 @@ writing, organisation and running of tests and debugging options. In the second part we discuss setup and teardown of test state, usage and configuration of test functions arguments and introduce coverage testing. -We will close with time to answer questions and discuss topics from the students. +We will close with time to answer questions and discuss topics from the participants. Motivation and testing terms (15 minutes) @@ -99,7 +99,7 @@ ---------------------------------- interactive lecture: -- when you need monkeypatching +- when do you need monkeypatching? - the monkeypatch funcarg exercise: test code involving os.getpwd() handling From hpk at codespeak.net Sat Oct 17 10:38:02 2009 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 17 Oct 2009 10:38:02 +0200 (CEST) Subject: [py-svn] r68572 - py/extradoc/talk/pycon-us-2010/tutorial Message-ID: <20091017083802.6451A16800D@codespeak.net> Author: hpk Date: Sat Oct 17 10:38:01 2009 New Revision: 68572 Added: py/extradoc/talk/pycon-us-2010/tutorial/hybrid-python.txt - copied, changed from r68568, py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt py/extradoc/talk/pycon-us-2010/tutorial/pytest-I-rapidtesting.txt - copied, changed from r68568, py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt py/extradoc/talk/pycon-us-2010/tutorial/pytest-II-advanced.txt - copied, changed from r68568, py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt Removed: py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt Log: finalizing tutorials Copied: py/extradoc/talk/pycon-us-2010/tutorial/hybrid-python.txt (from r68568, py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt) ============================================================================== --- py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/hybrid-python.txt Sat Oct 17 10:38:01 2009 @@ -12,13 +12,16 @@ Holger Krekel is a co-founder of the PyPy Python Interpreter project. He is the initiator and maintainer of the popular py.test and other -tools. In 2004 he founded merlinux, a company focusing on tools -and practises for Python environments. Holger has spoken at many -EuroPython and PyCon confereces and gave well received testing -tutorials at the last Pycon-US 2009 and several European -conferences. - -Tutorial Summary: +tools. In 2004 he founded merlinux, a company focusing on productive +Python environments environments. Holger has spoken at many +EuroPython and PyCon conferences and gave well received testing +tutorials at Pycon 2009 and several European conferences. He has +also co-ordinated many sprints, teached CS courses and +and enjoys interacting with an interested learning group. + +=============================== +Tutorial Summary +=============================== Want to use Python3 in a Python2 environment? Jython together with CPython? Or execute some user code in a PyPy sandbox for @@ -30,26 +33,37 @@ .. _execnet: http://codespeak.net/execnet +=============================== +outline for review +=============================== +this is a new tutorial, also relates to my invited talk here: +http://us.pycon.org/2010/conference/keynotes/ + + Overview Python Interpreters (10 minutes) ---------------------------------------------- - CPython 2.4, 2.5, 2.6: the mainstream -- CPython 3.1, PyPy: the future - Jython 2.5.1: the Java world - IronPython 2.6: the .NET world +- CPython 3.1, PyPy: the future execnet basics (10 minutes) ------------------------------- -- cross connect Interpreters and platforms -- zero-install deployment -- local communication protocols +- instantiating gateways between Interpreters +- remote execution of code +- exchanging structured data installation (20 minutes) ------------------------------ -- exercise: install execnet and write a simple - one sub interpreter example +- zero-install principles +- self-bootstrapping + +exercise: +install execnet and write a simple +one sub interpreter example exchanging data (30 minutes) ------------------------------- @@ -60,7 +74,7 @@ - working with callbacks - working with queues and multiple interpreters -freestyle exercises and questions +exercise: play with examples for each mode python2/python3 interactions (20 minutes) ------------------------------------------------ @@ -75,11 +89,12 @@ Break ------------------------------------------------ -Using Jython <-> CPython (15 minutes) +Using Jython <-> CPython (20 minutes) ------------------------------------------------ interactive examples: - accessing Java libraries from CPython 2 or 3 +- accessing C# classes from CPython 2 or 3 - accessing CPython extensions from Jython Writing and testing local protocols (30 minutes) @@ -90,11 +105,18 @@ exercise: write a remote file server -Interacting with PyPy sandboxing (30 minutes) +Interacting with PyPy sandboxing (20 minutes) ----------------------------------------------------- -interactive walk through: +joint interactive walk through: - Basics of PyPy sandboxing - running a PyPy sandbox - setting memory and CPU limits - using remote_exec into a PyPy sandbox + +conclusion (10 minutes) +----------------------------------------------------- + +- summary +- outlook +- feedback session Deleted: /py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt ============================================================================== --- /py/extradoc/talk/pycon-us-2010/tutorial/proposal-execnet.txt Sat Oct 17 10:38:01 2009 +++ (empty file) @@ -1,100 +0,0 @@ -Title: Writing hybrid apps with C/J/PyPy Python interpreters -Presenter: Holger Krekel -Tutorial format: interactive lecture -Recording: I give permission to record and publish my PyCon tutorial for free distribution. -Intended Audience: advanced Python programmers -Maximum number of students: maybe 30 -Perequisites/knowledge: having worked with two different Python versions or interpreters -Requirements: Laptop with as many different Python interpreters as you can install on it, -VirtualBox, Parallels or other VM tech is a plus. - -Presenter bio: - -Holger Krekel is a co-founder of the PyPy Python Interpreter project. -He is the initiator and maintainer of the popular py.test and other -tools. In 2004 he founded merlinux, a company focusing on tools -and practises for Python environments. Holger has spoken at many -EuroPython and PyCon confereces and gave well received testing -tutorials at the last Pycon-US 2009 and several European -conferences. - -Tutorial Summary: - -Want to use Python3 in a Python2 environment? Jython together -with CPython? Or execute some user code in a PyPy sandbox for -maximum security? In this highly interactive tutorial you'll learn -about execnet_, a lean package to write and deploy hybrid applications -in Python. We'll discuss basic ad-hoc deployment, elastic remote execution -concepts and do basic exercises for writing cross-interpreter applications. -We'll end with discussing learnings and potential future directions. - -.. _execnet: http://codespeak.net/execnet - -Overview Python Interpreters (10 minutes) ----------------------------------------------- - -- CPython 2.4, 2.5, 2.6: the mainstream -- CPython 3.1, PyPy: the future -- Jython 2.5.1: the Java world -- IronPython 2.6: the .NET world - -execnet basics (10 minutes) -------------------------------- - -- cross connect Interpreters and platforms -- zero-install deployment -- local communication protocols - -installation (20 minutes) ------------------------------- - -- exercise: install execnet and write a simple - one sub interpreter example - -exchanging data (30 minutes) -------------------------------- - -interactive examples: - -- working with channel send/receive -- working with callbacks -- working with queues and multiple interpreters - -freestyle exercises and questions - -python2/python3 interactions (20 minutes) ------------------------------------------------- - -- string / byte differences -- syntax differences -- stream encoding - -exercise: run code in python2 from python3 and vice versa - ------------------------------------------------- -Break ------------------------------------------------- - -Using Jython <-> CPython (15 minutes) ------------------------------------------------- - -interactive examples: -- accessing Java libraries from CPython 2 or 3 -- accessing CPython extensions from Jython - -Writing and testing local protocols (30 minutes) ------------------------------------------------- - -- the command pattern -- testing each side of communication - -exercise: write a remote file server - -Interacting with PyPy sandboxing (30 minutes) ------------------------------------------------------ - -interactive walk through: -- Basics of PyPy sandboxing -- running a PyPy sandbox -- setting memory and CPU limits -- using remote_exec into a PyPy sandbox Deleted: /py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt ============================================================================== --- /py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt Sat Oct 17 10:38:01 2009 +++ (empty file) @@ -1,121 +0,0 @@ -Title: py.test II - cross-platform and distributed testing -Presenter: Holger Krekel -Tutorial format: interactive lecture -Recording: I give permission to record and publish my PyCon tutorial for free distribution. -Intended Audience: Python programmers -Maximum number of students: 30 -Perequisites/knowledge: good knowledge of python programming, basic familiarity with automated testing -Requirements: Laptops with Python and py.test installed -Notes to reviewer: visting Part I, the beginner-oriented tutorial "rapid testing with minimal effort" is recommended, not required. This version of this advanced tutorial will be an improved version -of what was given at Pycon 2009, see http://www.scribd.com/doc/21183340/Pycon2009-Pytest-Advanced - -Tutorial Summary: - -Want to know more about advanced automated testing with Python? -This tutorial provides in-depth information on advanced usages -of the popular py.test tool. We show best practises for -managing test function arguments and generating multiple test calls -from specifications. We'll introduce writing of project-specific -extensions and showcase existing one for integration -with Continous Integration systems. We discuss ways of -distributing tests across CPUs and platforms, dive deeper into -managing test function arguments and will leave time -to discuss and tackle specific scenarios brought up during the session. -We conclude with a brief look on other testing tools, running traditional -unittests and doctests and do a Q&A session on other plugins. - -Outline for review: - -Intro and Terminology (20 mins) ----------------------------------------- - -- small/medium/big tests aka unit/integration/functional -- developer and customer tests -- py.test overview and basic usage - -Extension basics (10 mins) ----------------------------------------- - -interactive lecture: -- what are plugins -- how to do conftest.py files -- what are hooks - -Python test functions (10 mins) ------------------------------------- - -interactive lecture: -- test functions and classes -- test function arguments and factory lookup -- example: monkeypatch funcarg - -Generating tests (30 mins) ------------------------------------- - -interactive lecture: -- the pytest_generate_tests hook -- parametrization schemes -- py.test's mark mechanism - -exercise: write test functions and invoke -them with different values, specified via -the 'mark' decorator. - -funcarg request objects (30 mins) ------------------------------------- - -- caching complex values -- setup/teardown mechanism -- skipping tests from factories - -exercise: extend previous code to use -caching, write test functions and invoke -them with different values, specified via -the 'mark' decorator. - - ------------------------------------- -break ------------------------------------- - -distributed testing (20 mins) ---------------------------------------- - -- distribution model -- load-balance testing -- multi-platform/interpreter testing - -advanced testing modes (20 mins) ---------------------------------------- - -- continuously looping on failing tests -- protecting against crashing interpreters -- integrating nose/unittests style tests - -exercise: use the modes/styles with your code - -using funcargs (20 mins) ------------------------------------------------ - -- monkeypatch funcarg -- tempdir for creating per-test filesystem setup -- recwarn for assertions about warnings - -exercise: use the funcargs with your code - -using extensions (20 mins) ------------------------------------------------ - -- integrate collection/run of traditional unit-tests -- testing ReST syntax -- running Javascript tests -- producing xml/log files for buildbod/CruiseControl - -exercise: install/use the extensions - -feedback / Q&A (10 mins) ------------------------------------------------ - -feedback round and general Q&A - - Deleted: /py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt ============================================================================== --- /py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt Sat Oct 17 10:38:01 2009 +++ (empty file) @@ -1,139 +0,0 @@ -Title: py.test I - rapid testing with minimal effort -Presenter: Holger Krekel -Tutorial format: interactive lecture -Recording: I give permission to record and publish my PyCon tutorial for free distribution. -Intended Audience: beginner programmers -Maximum number of students: maybe 30 -Perequisites/knowledge: basic knowledge of python programming -Requirements: Laptop with Python 2.4 or greater installed. Pairing welcome. - - -Presenter bio: - -Holger Krekel is a co-founder of the PyPy Python Interpreter project. -He is the initiator and maintainer of the popular py.test and other -tools. In 2004 he founded merlinux, a company focusing on tools -and practises for Python environments. Holger has spoken at many -EuroPython and PyCon confereces and gave well received testing -tutorials at the last Pycon-US 2009 and several European -conferences. - -Tutorial Summary: - -Want to learn writing tests in Python? This tutorial introduces -basic practises using the popular py.test (http://pytest.org) tool. -We start with an overview on testing and tools in Python and then discuss -and exercise basic idioms in a step-by-step manner. We cover the basic -writing, organisation and running of tests and debugging options. -In the second part we discuss setup and teardown of test state, usage -and configuration of test functions arguments and introduce coverage testing. -We will close with time to answer questions and discuss topics from the participants. - - -Motivation and testing terms (15 minutes) -------------------------------------------------------- - -- why automated testing -- testing types -- basic test organisation - -Installation. (15 minutes) ---------------------------------------------------------- - -exercise: -- installation -- basic test discovery -- writing and running simple test functions - -Basic usage of py.test (15 minutes) ------------------------------------------- - -interactive lecture: -- reinterpretation of asserts -- asserting exceptions -- debugging options, pdb - -test function arguments (15) --------------------------------- - -interactive lecture: -- creating values for test functions -- understanding setup and teardown -- using helpers of the funcarg request object - - -application scenario setup (30) --------------------------------- - -exercise: implement the testing "mysetup" pattern -- separate test setup and test code - --------------------------------- -Break --------------------------------- - -marking tests (15) ----------------------------------- - -interactive lecture: -- marking tests for skip and xfail -- marking classes and modules -- selectively running tests - -hooks and plugins (15) --------------------------------- - -interactive lecture: -- conftest plugins -- plugin discovery -- pytest hook functions - -new command line option (15) ----------------------------------- - -exercise: -- add a new cmdline option for the "mysetup" setup object -- skip test depending on a cmdline option - -mocking and monkeypatching (15) ----------------------------------- - -interactive lecture: -- when do you need monkeypatching? -- the monkeypatch funcarg -exercise: test code involving os.getpwd() handling - -coverage testing (15) ----------------------------------- - -- figleaf and coverage tools -- installing figleaf or coverage -- exercise: get html coverage for your code - -overview on advanced usages (15) ---------------------------------------- - -interactive lecture: -- looponfailing -- distributed testing (part II) -- generative tests (part II) -- doctests (part II) -- javascript testing - - -Notes for reviewers: -This version of the tutorial will be an improved version -of what was given at Pycon 2009, see also: - -http://tinyurl.com/yjluwwy -http://www.scribd.com/doc/21183340/Pycon2009-Pytest-Advanced - -Previous speaking/teaching experience: - -Holger: -I have talked at numerous Pycon and EuroPython conferences about -PyPy and testing tools. In 2009 i started giving well received -testing tutorials at Pycon US, EuroPython and Pycon UK. I've -also co-ordinated many sprints, gave teaching lectures -at university about computer science and generally enjoy -interacting with an interested learning group. Copied: py/extradoc/talk/pycon-us-2010/tutorial/pytest-I-rapidtesting.txt (from r68568, py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt) ============================================================================== --- py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-begin.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/pytest-I-rapidtesting.txt Sat Oct 17 10:38:01 2009 @@ -1,4 +1,6 @@ -Title: py.test I - rapid testing with minimal effort +py.test I - rapid testing with minimal effort +--------------------------------------------------- + Presenter: Holger Krekel Tutorial format: interactive lecture Recording: I give permission to record and publish my PyCon tutorial for free distribution. @@ -12,13 +14,16 @@ Holger Krekel is a co-founder of the PyPy Python Interpreter project. He is the initiator and maintainer of the popular py.test and other -tools. In 2004 he founded merlinux, a company focusing on tools -and practises for Python environments. Holger has spoken at many -EuroPython and PyCon confereces and gave well received testing -tutorials at the last Pycon-US 2009 and several European -conferences. +tools. In 2004 he founded merlinux, a company focusing on productive +Python environments environments. Holger has spoken at many +EuroPython and PyCon conferences and gave well received testing +tutorials at Pycon 2009 and several European conferences. He has +also co-ordinated many sprints, teached CS courses and +and enjoys interacting with an interested learning group. +=============================== Tutorial Summary: +=============================== Want to learn writing tests in Python? This tutorial introduces basic practises using the popular py.test (http://pytest.org) tool. @@ -29,6 +34,22 @@ and configuration of test functions arguments and introduce coverage testing. We will close with time to answer questions and discuss topics from the participants. +=============================== +Notes for reviewers: +=============================== +This version of the tutorial will be an improved version +of what was given at Pycon 2009, see also: +http://tinyurl.com/yjluwwy +http://www.scribd.com/doc/21183340/Pycon2009-Pytest-Advanced + + +=============================== +outline for review: +=============================== + +where not noted otherwise I'll present the contents +in a highly interactive manner. + Motivation and testing terms (15 minutes) ------------------------------------------------------- @@ -65,8 +86,9 @@ application scenario setup (30) -------------------------------- -exercise: implement the testing "mysetup" pattern -- separate test setup and test code +exercise: separate your test setup and test code and +implement the "mysetup" testing pattern + -------------------------------- Break @@ -88,29 +110,21 @@ - plugin discovery - pytest hook functions -new command line option (15) +new command line option (20) ---------------------------------- exercise: - add a new cmdline option for the "mysetup" setup object - skip test depending on a cmdline option -mocking and monkeypatching (15) ----------------------------------- - -interactive lecture: -- when do you need monkeypatching? -- the monkeypatch funcarg -exercise: test code involving os.getpwd() handling - -coverage testing (15) +coverage testing (20) ---------------------------------- - figleaf and coverage tools - installing figleaf or coverage - exercise: get html coverage for your code -overview on advanced usages (15) +overview on advanced usages (10) --------------------------------------- interactive lecture: @@ -120,20 +134,8 @@ - doctests (part II) - javascript testing +conclusion (10 mins) +----------------------------------------------------- -Notes for reviewers: -This version of the tutorial will be an improved version -of what was given at Pycon 2009, see also: - -http://tinyurl.com/yjluwwy -http://www.scribd.com/doc/21183340/Pycon2009-Pytest-Advanced - -Previous speaking/teaching experience: - -Holger: -I have talked at numerous Pycon and EuroPython conferences about -PyPy and testing tools. In 2009 i started giving well received -testing tutorials at Pycon US, EuroPython and Pycon UK. I've -also co-ordinated many sprints, gave teaching lectures -at university about computer science and generally enjoy -interacting with an interested learning group. +- summary +- feedback session Copied: py/extradoc/talk/pycon-us-2010/tutorial/pytest-II-advanced.txt (from r68568, py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt) ============================================================================== --- py/extradoc/talk/pycon-us-2010/tutorial/proposal-pytest-advanced.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/pytest-II-advanced.txt Sat Oct 17 10:38:01 2009 @@ -1,4 +1,7 @@ -Title: py.test II - cross-platform and distributed testing + +Title: py.test II - advanced cross-platform and distributed testing +------------------------------------------------------------- + Presenter: Holger Krekel Tutorial format: interactive lecture Recording: I give permission to record and publish my PyCon tutorial for free distribution. @@ -6,10 +9,28 @@ Maximum number of students: 30 Perequisites/knowledge: good knowledge of python programming, basic familiarity with automated testing Requirements: Laptops with Python and py.test installed -Notes to reviewer: visting Part I, the beginner-oriented tutorial "rapid testing with minimal effort" is recommended, not required. This version of this advanced tutorial will be an improved version -of what was given at Pycon 2009, see http://www.scribd.com/doc/21183340/Pycon2009-Pytest-Advanced -Tutorial Summary: +Notes to reviewer + +visting Part I, the beginner-oriented tutorial "rapid testing with minimal effort", is recommended +if you haven't been exposed to testing practises in Python. The below outlined version of this +advanced tutorial will be an improved version of what I gave at Pycon 2009, see +http://www.scribd.com/doc/21183340/Pycon2009-Pytest-Advanced + +Presenter bio: + +Holger Krekel is a co-founder of the PyPy Python Interpreter project. +He is the initiator and maintainer of the popular py.test and other +tools. In 2004 he founded merlinux, a company focusing on productive +Python environments environments. Holger has spoken at many +EuroPython and PyCon conferences and gave well received testing +tutorials at Pycon 2009 and several European conferences. He has +also co-ordinated many sprints, teached CS courses and +and enjoys interacting with an interested learning group. + +=============================== +Tutorial Summary +=============================== Want to know more about advanced automated testing with Python? This tutorial provides in-depth information on advanced usages @@ -24,7 +45,9 @@ We conclude with a brief look on other testing tools, running traditional unittests and doctests and do a Q&A session on other plugins. -Outline for review: +=============================== +Outline for review +=============================== Intro and Terminology (20 mins) ---------------------------------------- @@ -49,7 +72,7 @@ - test function arguments and factory lookup - example: monkeypatch funcarg -Generating tests (30 mins) +Generating tests (20 mins) ------------------------------------ interactive lecture: @@ -61,7 +84,7 @@ them with different values, specified via the 'mark' decorator. -funcarg request objects (30 mins) +funcarg request objects (20 mins) ------------------------------------ - caching complex values @@ -73,6 +96,13 @@ them with different values, specified via the 'mark' decorator. +mocking and monkeypatching (10) +---------------------------------- + +interactive demo: +- when do you need monkeypatching? +- the monkeypatch funcarg + ------------------------------------ break From hpk at codespeak.net Sat Oct 17 10:52:26 2009 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 17 Oct 2009 10:52:26 +0200 (CEST) Subject: [py-svn] r68573 - py/extradoc/talk/pycon-us-2010/tutorial Message-ID: <20091017085226.EE23116800D@codespeak.net> Author: hpk Date: Sat Oct 17 10:52:25 2009 New Revision: 68573 Modified: py/extradoc/talk/pycon-us-2010/tutorial/pytest-II-advanced.txt Log: fixed summary Modified: py/extradoc/talk/pycon-us-2010/tutorial/pytest-II-advanced.txt ============================================================================== --- py/extradoc/talk/pycon-us-2010/tutorial/pytest-II-advanced.txt (original) +++ py/extradoc/talk/pycon-us-2010/tutorial/pytest-II-advanced.txt Sat Oct 17 10:52:25 2009 @@ -1,5 +1,5 @@ -Title: py.test II - advanced cross-platform and distributed testing +Title: py.test II - advanced testing ------------------------------------------------------------- Presenter: Holger Krekel @@ -35,15 +35,14 @@ Want to know more about advanced automated testing with Python? This tutorial provides in-depth information on advanced usages of the popular py.test tool. We show best practises for -managing test function arguments and generating multiple test calls -from specifications. We'll introduce writing of project-specific -extensions and showcase existing one for integration -with Continous Integration systems. We discuss ways of -distributing tests across CPUs and platforms, dive deeper into -managing test function arguments and will leave time -to discuss and tackle specific scenarios brought up during the session. -We conclude with a brief look on other testing tools, running traditional -unittests and doctests and do a Q&A session on other plugins. +managing test function arguments and how to parametrize and +generate tests from specifications. We'll introduce writing of +project-specific extensions and walk through some interesting +helpers and extensions to grow your testing tool box. We exercise +ways of distributing tests across CPUs and platforms and +give an overview of 3rd party extensions. We conclude with a brief +look on running traditional unittests and doctests and a +feedback session. =============================== Outline for review From commits-noreply at bitbucket.org Sat Oct 17 17:48:39 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 17 Oct 2009 15:48:39 +0000 (UTC) Subject: [py-svn] py-trunk commit a416dddd664e: reshuffle/refine option grouping, introduce "terminal reporting options" Message-ID: <20091017154839.55F4A7EEE1@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255794239 -7200 # Node ID a416dddd664e20bb15fa5a03eb16887c1b4cefe0 # Parent a8c60fc9842288e4be3c75ca2a9ad31fdbe65288 reshuffle/refine option grouping, introduce "terminal reporting options" --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* reshuffle / refine command line grouping + * deprecate parser.addgroup in favour of getgroup which creates option group * add --report command line option that allows to control showing of skipped/xfailed sections --- a/_py/test/plugin/pytest_default.py +++ b/_py/test/plugin/pytest_default.py @@ -44,9 +44,7 @@ def pytest_report_iteminfo(item): return item.reportinfo() def pytest_addoption(parser): - group = parser.getgroup("general", "general testing options") - group._addoption('-v', '--verbose', action="count", - dest="verbose", default=0, help="increase verbosity."), + group = parser.getgroup("general", "running and selection options") group._addoption('-x', '--exitfirst', action="store_true", dest="exitfirst", default=False, help="exit instantly on first error or failed test."), @@ -56,16 +54,6 @@ def pytest_addoption(parser): "space separated keywords. precede a keyword with '-' to negate. " "Terminate the expression with ':' to treat a match as a signal " "to run all subsequent tests. ") - group._addoption('-l', '--showlocals', - action="store_true", dest="showlocals", default=False, - help="show locals in tracebacks (disabled by default).") - #group._addoption('--showskipsummary', - # action="store_true", dest="showskipsummary", default=False, - # help="always show summary of skipped tests") - group._addoption('--tb', metavar="style", - action="store", dest="tbstyle", default='long', - type="choice", choices=['long', 'short', 'no'], - help="traceback verboseness (long/short/no).") group._addoption('-p', action="append", dest="plugins", default = [], help=("load the specified plugin after command line parsing. ")) if execnet: --- a/_py/test/plugin/pytest_capture.py +++ b/_py/test/plugin/pytest_capture.py @@ -89,11 +89,11 @@ import os def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('-s', action="store_const", const="no", dest="capture", - help="shortcut for --capture=no.") group._addoption('--capture', action="store", default=None, metavar="method", type="choice", choices=['fd', 'sys', 'no'], help="set capturing method during tests: fd (default)|sys|no.") + group._addoption('-s', action="store_const", const="no", dest="capture", + help="shortcut for --capture=no.") def addouterr(rep, outerr): repr = getattr(rep, 'longrepr', None) --- a/_py/test/plugin/pytest_terminal.py +++ b/_py/test/plugin/pytest_terminal.py @@ -7,6 +7,23 @@ import py import sys def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", after="general") + group._addoption('-v', '--verbose', action="count", + dest="verbose", default=0, help="increase verbosity."), + group._addoption('-l', '--showlocals', + action="store_true", dest="showlocals", default=False, + help="show locals in tracebacks (disabled by default).") + group.addoption('--report', + action="store", dest="report", default=None, metavar="opts", + help="comma separated reporting options") + group._addoption('--tb', metavar="style", + action="store", dest="tbstyle", default='long', + type="choice", choices=['long', 'short', 'no'], + help="traceback verboseness (long/short/no).") + group._addoption('--fulltrace', + action="store_true", dest="fulltrace", default=False, + help="don't cut any tracebacks (default is to cut).") + group = parser.getgroup("debugconfig") group.addoption('--collectonly', action="store_true", dest="collectonly", @@ -17,15 +34,9 @@ def pytest_addoption(parser): group._addoption('--nomagic', action="store_true", dest="nomagic", default=False, help="don't reinterpret asserts, no traceback cutting. ") - group._addoption('--fulltrace', - action="store_true", dest="fulltrace", default=False, - help="don't cut any tracebacks (default is to cut).") group.addoption('--debug', action="store_true", dest="debug", default=False, - help="generate and show debugging information.") - group.addoption('--report', - action="store", dest="report", default=None, metavar="opts", - help="comma separated reporting options") + help="generate and show internal debugging information.") def pytest_configure(config): From commits-noreply at bitbucket.org Sat Oct 17 17:48:41 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 17 Oct 2009 15:48:41 +0000 (UTC) Subject: [py-svn] py-trunk commit 647c31b19f6f: fix formatting of session log output Message-ID: <20091017154841.573787EF4F@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255641291 -7200 # Node ID 647c31b19f6f38f577552aac66276915511af192 # Parent be400218cdccbab6c8dd57fc5dade0c2e77a56e1 fix formatting of session log output --- a/_py/test/plugin/pytest_pastebin.py +++ b/_py/test/plugin/pytest_pastebin.py @@ -53,7 +53,7 @@ def pytest_unconfigure(config): del config._pastebinfile proxyid = getproxy().newPaste("python", sessionlog) pastebinurl = "%s%s" % (url.show, proxyid) - sys.stderr.write("session-log: %s" % pastebinurl) + sys.stderr.write("pastebin session-log: %s\n" % pastebinurl) tr = config.pluginmanager.impname2plugin['terminalreporter'] del tr._tw.__dict__['write'] From commits-noreply at bitbucket.org Sat Oct 17 17:48:43 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 17 Oct 2009 15:48:43 +0000 (UTC) Subject: [py-svn] py-trunk commit 78bfab622759: streamline pluginmanager api and test/beautify printing of plugins with --trace Message-ID: <20091017154843.19AC37EF50@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255777019 -7200 # Node ID 78bfab622759667c10a1ac7f1f158104ede57da7 # Parent 647c31b19f6f38f577552aac66276915511af192 streamline pluginmanager api and test/beautify printing of plugins with --trace --- a/testing/pytest/plugin/test_pytest_terminal.py +++ b/testing/pytest/plugin/test_pytest_terminal.py @@ -613,3 +613,9 @@ class TestTerminalFunctional: ]) assert result.ret == 1 + def test_trace_reporting(self, testdir): + result = testdir.runpytest("--trace") + assert result.stdout.fnmatch_lines([ + "*active plugins*" + ]) + assert result.ret == 0 --- a/testing/pytest/plugin/test_pytest_pdb.py +++ b/testing/pytest/plugin/test_pytest_pdb.py @@ -6,7 +6,7 @@ class TestPDB: pdblist = [] def mypdb(*args): pdblist.append(args) - plugin = request.config.pluginmanager.impname2plugin['pytest_pdb'] + plugin = request.config.pluginmanager.getplugin('pdb') monkeypatch.setattr(plugin, 'post_mortem', mypdb) return pdblist --- a/testing/pytest/test_pluginmanager.py +++ b/testing/pytest/test_pluginmanager.py @@ -121,9 +121,10 @@ class TestBootstrapping: a1, a2 = A(), A() pp.register(a1) assert pp.isregistered(a1) - pp.register(a2) + pp.register(a2, "hello") assert pp.isregistered(a2) assert pp.getplugins() == [a1, a2] + assert pp.getplugin('hello') == a2 pp.unregister(a1) assert not pp.isregistered(a1) pp.unregister(a2) @@ -142,6 +143,15 @@ class TestBootstrapping: #assert not pp.isregistered(mod2) assert pp.getplugins() == [mod] # does not actually modify plugins + def test_canonical_import(self, monkeypatch): + mod = py.std.types.ModuleType("pytest_xyz") + monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod) + pp = PluginManager() + pp.import_plugin('xyz') + assert pp.getplugin('xyz') == mod + assert pp.getplugin('pytest_xyz') == mod + assert pp.isregistered(mod) + def test_register_mismatch_method(self): pp = PluginManager() class hello: --- a/_py/test/plugin/pytest_pastebin.py +++ b/_py/test/plugin/pytest_pastebin.py @@ -38,7 +38,7 @@ def pytest_configure(__multicall__, conf __multicall__.execute() if config.option.pastebin == "all": config._pastebinfile = tempfile.TemporaryFile('w+') - tr = config.pluginmanager.impname2plugin['terminalreporter'] + tr = config.pluginmanager.getplugin('terminalreporter') oldwrite = tr._tw.write def tee_write(s, **kwargs): oldwrite(s, **kwargs) @@ -54,7 +54,7 @@ def pytest_unconfigure(config): proxyid = getproxy().newPaste("python", sessionlog) pastebinurl = "%s%s" % (url.show, proxyid) sys.stderr.write("pastebin session-log: %s\n" % pastebinurl) - tr = config.pluginmanager.impname2plugin['terminalreporter'] + tr = config.pluginmanager.getplugin('terminalreporter') del tr._tw.__dict__['write'] def getproxy(): --- a/_py/test/plugin/pytest_terminal.py +++ b/_py/test/plugin/pytest_terminal.py @@ -236,14 +236,15 @@ class TerminalReporter: if self.config.option.debug or self.config.option.traceconfig: self.write_line("using py lib: %s" % (py.path.local(py.__file__).dirpath())) if self.config.option.traceconfig: + self.write_line("active plugins:") plugins = [] - for plugin in self.config.pluginmanager.comregistry: - name = getattr(plugin, '__name__', None) - if name is None: - name = plugin.__class__.__name__ - plugins.append(name) - plugins = ", ".join(plugins) - self.write_line("active plugins: %s" %(plugins,)) + items = self.config.pluginmanager._name2plugin.items() + for name, plugin in items: + repr_plugin = repr(plugin) + fullwidth = getattr(self._tw, 'fullwidth', sys.maxint) + if len(repr_plugin)+26 > fullwidth: + repr_plugin = repr_plugin[:(fullwidth-30)] + '...' + self.write_line(" %-20s: %s" %(name, repr_plugin)) for i, testarg in enumerate(self.config.args): self.write_line("test object %d: %s" %(i+1, testarg)) --- a/_py/test/plugin/pytest_pdb.py +++ b/_py/test/plugin/pytest_pdb.py @@ -28,8 +28,8 @@ def pytest_configure(config): class PdbInvoke: def pytest_runtest_makereport(self, item, call): if call.excinfo and not call.excinfo.errisinstance(Skipped): - # XXX hack hack hack to play well with capturing - capman = item.config.pluginmanager.impname2plugin['capturemanager'] + # play well with capturing, slightly hackish + capman = item.config.pluginmanager.getplugin('capturemanager') capman.suspendcapture() tw = py.io.TerminalWriter() @@ -37,7 +37,6 @@ class PdbInvoke: repr.toterminal(tw) post_mortem(call.excinfo._excinfo[2]) - # XXX hack end capman.resumecapture_item(item) class Pdb(py.std.pdb.Pdb): --- a/_py/test/plugin/pytest_pytester.py +++ b/_py/test/plugin/pytest_pytester.py @@ -189,7 +189,6 @@ class TmpTestdir: plugin = PseudoPlugin(plugin) if not config.pluginmanager.isregistered(plugin): config.pluginmanager.register(plugin) - #print "config.pluginmanager.impname2plugin", config.pluginmanager.impname2plugin return config def parseconfig(self, *args): --- a/testing/pytest/plugin/test_pytest_pastebin.py +++ b/testing/pytest/plugin/test_pytest_pastebin.py @@ -6,7 +6,7 @@ class TestPasting: class MockProxy: def newPaste(self, language, code): pastebinlist.append((language, code)) - plugin = request.config.pluginmanager.impname2plugin['pytest_pastebin'] + plugin = request.config.pluginmanager.getplugin('pastebin') mp.setattr(plugin, 'getproxy', MockProxy) return pastebinlist --- a/_py/test/pluginmanager.py +++ b/_py/test/pluginmanager.py @@ -16,7 +16,7 @@ class PluginManager(object): if comregistry is None: comregistry = py._com.Registry() self.comregistry = comregistry - self.impname2plugin = {} + self._name2plugin = {} self.hook = py._com.HookRelay( hookspecs=hookspec, @@ -33,9 +33,9 @@ class PluginManager(object): def register(self, plugin, name=None): assert not self.isregistered(plugin) name = self._getpluginname(plugin, name) - if name in self.impname2plugin: + if name in self._name2plugin: return False - self.impname2plugin[name] = plugin + self._name2plugin[name] = plugin self.hook.pytest_plugin_registered(plugin=plugin) self._checkplugin(plugin) self.comregistry.register(plugin) @@ -44,19 +44,26 @@ class PluginManager(object): def unregister(self, plugin): self.hook.pytest_plugin_unregistered(plugin=plugin) self.comregistry.unregister(plugin) - for name, value in list(self.impname2plugin.items()): + for name, value in list(self._name2plugin.items()): if value == plugin: - del self.impname2plugin[name] + del self._name2plugin[name] def isregistered(self, plugin, name=None): - return self._getpluginname(plugin, name) in self.impname2plugin + if self._getpluginname(plugin, name) in self._name2plugin: + return True + for val in self._name2plugin.values(): + if plugin == val: + return True def getplugins(self): return list(self.comregistry) - def getplugin(self, importname): - impname = canonical_importname(importname) - return self.impname2plugin[impname] + def getplugin(self, name): + try: + return self._name2plugin[name] + except KeyError: + impname = canonical_importname(name) + return self._name2plugin[impname] # API for bootstrapping # @@ -94,7 +101,7 @@ class PluginManager(object): def import_plugin(self, spec): assert isinstance(spec, str) modname = canonical_importname(spec) - if modname in self.impname2plugin: + if modname in self._name2plugin: return try: mod = importplugin(modname) From commits-noreply at bitbucket.org Sat Oct 17 17:48:45 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 17 Oct 2009 15:48:45 +0000 (UTC) Subject: [py-svn] py-trunk commit 201d49c4a1f7: add --report cmdline option, shift refined xfailed and skipped reporting to skipping plugin Message-ID: <20091017154845.516057EF55@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255794160 -7200 # Node ID 201d49c4a1f7599904b959bad0787a06c46b7199 # Parent 78bfab622759667c10a1ac7f1f158104ede57da7 add --report cmdline option, shift refined xfailed and skipped reporting to skipping plugin --- a/testing/pytest/plugin/test_pytest_terminal.py +++ b/testing/pytest/plugin/test_pytest_terminal.py @@ -14,7 +14,7 @@ except ImportError: # =============================================================================== from _py.test.plugin.pytest_terminal import TerminalReporter, \ - CollectonlyReporter, repr_pythonversion, folded_skips + CollectonlyReporter, repr_pythonversion, getreportopt from _py.test.plugin import pytest_runner as runner def basic_run_report(item): @@ -289,28 +289,6 @@ class TestTerminal: ]) result.stdout.fnmatch_lines(['*KEYBOARD INTERRUPT*']) - def test_skip_reasons_folding(self): - class longrepr: - class reprcrash: - path = 'xyz' - lineno = 3 - message = "justso" - - ev1 = runner.CollectReport(None, None) - ev1.when = "execute" - ev1.skipped = True - ev1.longrepr = longrepr - - ev2 = runner.ItemTestReport(None, excinfo=longrepr) - ev2.skipped = True - - l = folded_skips([ev1, ev2]) - assert len(l) == 1 - num, fspath, lineno, reason = l[0] - assert num == 2 - assert fspath == longrepr.reprcrash.path - assert lineno == longrepr.reprcrash.lineno - assert reason == longrepr.reprcrash.message class TestCollectonly: def test_collectonly_basic(self, testdir, linecomp): @@ -473,37 +451,6 @@ class TestFixtureReporting: ]) class TestTerminalFunctional: - def test_skipped_reasons(self, testdir): - testdir.makepyfile( - test_one=""" - from conftest import doskip - def setup_function(func): - doskip() - def test_func(): - pass - class TestClass: - def test_method(self): - doskip() - """, - test_two = """ - from conftest import doskip - doskip() - """, - conftest = """ - import py - def doskip(): - py.test.skip('test') - """ - ) - result = testdir.runpytest() - extra = result.stdout.fnmatch_lines([ - "*test_one.py ss", - "*test_two.py S", - "___* skipped test summary *_", - "*conftest.py:3: *3* Skipped: 'test'", - ]) - assert result.ret == 0 - def test_deselected(self, testdir): testpath = testdir.makepyfile(""" def test_one(): @@ -613,6 +560,27 @@ class TestTerminalFunctional: ]) assert result.ret == 1 + +def test_getreportopt(): + assert getreportopt(None) == {} + assert getreportopt("hello") == {'hello': True} + assert getreportopt("hello, world") == dict(hello=True, world=True) + assert getreportopt("nohello") == dict(hello=False) + +def test_terminalreporter_reportopt_conftestsetting(testdir): + testdir.makeconftest("option_report = 'skipped'") + p = testdir.makepyfile(""" + def pytest_funcarg__tr(request): + tr = request.config.pluginmanager.getplugin("terminalreporter") + return tr + def test_opt(tr): + assert tr.hasopt('skipped') + assert not tr.hasopt('qwe') + """) + result = testdir.runpytest() + assert result.stdout.fnmatch_lines([ + "*1 passed*" + ]) def test_trace_reporting(self, testdir): result = testdir.runpytest("--trace") assert result.stdout.fnmatch_lines([ --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* add --report command line option that allows to control showing of skipped/xfailed sections + * generalized skipping: a new way to mark python functions with skipif or xfail at function, class and modules level based on platform or sys-module attributes. --- a/_py/test/plugin/pytest_terminal.py +++ b/_py/test/plugin/pytest_terminal.py @@ -23,6 +23,10 @@ def pytest_addoption(parser): group.addoption('--debug', action="store_true", dest="debug", default=False, help="generate and show debugging information.") + group.addoption('--report', + action="store", dest="report", default=None, metavar="opts", + help="comma separated reporting options") + def pytest_configure(config): if config.option.collectonly: @@ -38,6 +42,18 @@ def pytest_configure(config): setattr(reporter._tw, name, getattr(config, attr)) config.pluginmanager.register(reporter, 'terminalreporter') +def getreportopt(optvalue): + d = {} + if optvalue: + for setting in optvalue.split(","): + setting = setting.strip() + val = True + if setting.startswith("no"): + val = False + setting = setting[2:] + d[setting] = val + return d + class TerminalReporter: def __init__(self, config, file=None): self.config = config @@ -48,6 +64,10 @@ class TerminalReporter: self._tw = py.io.TerminalWriter(file) self.currentfspath = None self.gateway2info = {} + self._reportopt = getreportopt(config.getvalue('report')) + + def hasopt(self, name): + return self._reportopt.get(name, False) def write_fspath_result(self, fspath, res): fspath = self.curdir.bestrelpath(fspath) @@ -254,7 +274,6 @@ class TerminalReporter: if exitstatus in (0, 1, 2): self.summary_errors() self.summary_failures() - self.summary_skips() self.config.hook.pytest_terminal_summary(terminalreporter=self) if exitstatus == 2: self._report_keyboardinterrupt() @@ -389,14 +408,6 @@ class TerminalReporter: self.write_sep("=", "%d tests deselected by %r" %( len(self.stats['deselected']), self.config.option.keyword), bold=True) - def summary_skips(self): - if 'skipped' in self.stats: - if 'failed' not in self.stats: # or self.config.option.showskipsummary: - fskips = folded_skips(self.stats['skipped']) - if fskips: - self.write_sep("_", "skipped test summary") - for num, fspath, lineno, reason in fskips: - self._tw.line("%s:%d: [%d] %s" %(fspath, lineno, num, reason)) class CollectonlyReporter: INDENT = " " @@ -435,16 +446,6 @@ class CollectonlyReporter: for rep in self._failed: rep.toterminal(self.out) -def folded_skips(skipped): - d = {} - for event in skipped: - entry = event.longrepr.reprcrash - key = entry.path, entry.lineno, entry.message - d.setdefault(key, []).append(event) - l = [] - for key, events in d.items(): - l.append((len(events),) + key) - return l def repr_pythonversion(v=None): if v is None: --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -132,9 +132,19 @@ def pytest_report_teststatus(report): # called by the terminalreporter instance/plugin def pytest_terminal_summary(terminalreporter): + show_xfailed(terminalreporter) + show_skipped(terminalreporter) + +def show_xfailed(terminalreporter): tr = terminalreporter xfailed = tr.stats.get("xfailed") if xfailed: + if not tr.hasopt('xfailed'): + if tr.config.getvalue("verbose"): + tr.write_line( + "%d expected failures, use --report=xfailed for more info" % + len(xfailed)) + return tr.write_sep("_", "expected failures") for rep in xfailed: entry = rep.longrepr.reprcrash @@ -178,3 +188,29 @@ def evalexpression(item, keyword): result = expr return expr, result +def folded_skips(skipped): + d = {} + for event in skipped: + entry = event.longrepr.reprcrash + key = entry.path, entry.lineno, entry.message + d.setdefault(key, []).append(event) + l = [] + for key, events in d.items(): + l.append((len(events),) + key) + return l + +def show_skipped(terminalreporter): + tr = terminalreporter + skipped = tr.stats.get('skipped', []) + if skipped: + if not tr.hasopt('skipped'): + if tr.config.getvalue("verbose"): + tr.write_line( + "%d skipped tests, use --report=skipped for more info" % + len(skipped)) + return + fskips = folded_skips(skipped) + if fskips: + tr.write_sep("_", "skipped test summary") + for num, fspath, lineno, reason in fskips: + tr._tw.line("%s:%d: [%d] %s" %(fspath, lineno, num, reason)) --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -1,5 +1,28 @@ import py +def test_xfail_not_report_default(testdir): + p = testdir.makepyfile(test_one=""" + import py + @py.test.mark.xfail + def test_this(): + assert 0 + """) + result = testdir.runpytest(p, '-v') + extra = result.stdout.fnmatch_lines([ + "*1 expected failures*--report=xfailed*", + ]) + +def test_skip_not_report_default(testdir): + p = testdir.makepyfile(test_one=""" + import py + def test_this(): + py.test.skip("hello") + """) + result = testdir.runpytest(p, '-v') + extra = result.stdout.fnmatch_lines([ + "*1 skipped*--report=skipped*", + ]) + def test_xfail_decorator(testdir): p = testdir.makepyfile(test_one=""" import py @@ -11,7 +34,7 @@ def test_xfail_decorator(testdir): def test_that(): assert 1 """) - result = testdir.runpytest(p) + result = testdir.runpytest(p, '--report=xfailed') extra = result.stdout.fnmatch_lines([ "*expected failures*", "*test_one.test_this*test_one.py:4*", @@ -28,7 +51,7 @@ def test_xfail_at_module(testdir): def test_intentional_xfail(): assert 0 """) - result = testdir.runpytest(p) + result = testdir.runpytest(p, '--report=xfailed') extra = result.stdout.fnmatch_lines([ "*expected failures*", "*test_intentional_xfail*:4*", @@ -43,7 +66,7 @@ def test_skipif_decorator(testdir): def test_that(): assert 0 """) - result = testdir.runpytest(p) + result = testdir.runpytest(p, '--report=skipped') extra = result.stdout.fnmatch_lines([ "*Skipped*platform*", "*1 skipped*" @@ -99,3 +122,60 @@ def test_evalexpression_cls_config_examp x, y = evalexpression(item, 'skipif') assert x == 'config._hackxyz' assert y == 3 + +def test_skip_reasons_folding(): + from _py.test.plugin import pytest_runner as runner + from _py.test.plugin.pytest_skipping import folded_skips + class longrepr: + class reprcrash: + path = 'xyz' + lineno = 3 + message = "justso" + + ev1 = runner.CollectReport(None, None) + ev1.when = "execute" + ev1.skipped = True + ev1.longrepr = longrepr + + ev2 = runner.ItemTestReport(None, excinfo=longrepr) + ev2.skipped = True + + l = folded_skips([ev1, ev2]) + assert len(l) == 1 + num, fspath, lineno, reason = l[0] + assert num == 2 + assert fspath == longrepr.reprcrash.path + assert lineno == longrepr.reprcrash.lineno + assert reason == longrepr.reprcrash.message + +def test_skipped_reasons_functional(testdir): + testdir.makepyfile( + test_one=""" + from conftest import doskip + def setup_function(func): + doskip() + def test_func(): + pass + class TestClass: + def test_method(self): + doskip() + """, + test_two = """ + from conftest import doskip + doskip() + """, + conftest = """ + import py + def doskip(): + py.test.skip('test') + """ + ) + result = testdir.runpytest('--report=skipped') + extra = result.stdout.fnmatch_lines([ + "*test_one.py ss", + "*test_two.py S", + "___* skipped test summary *_", + "*conftest.py:3: *3* Skipped: 'test'", + ]) + assert result.ret == 0 + From commits-noreply at bitbucket.org Sat Oct 17 17:48:47 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 17 Oct 2009 15:48:47 +0000 (UTC) Subject: [py-svn] py-trunk commit a8c60fc98422: deprecate addgroup / allow ordering of option groups Message-ID: <20091017154847.250A37EF53@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1255794213 -7200 # Node ID a8c60fc9842288e4be3c75ca2a9ad31fdbe65288 # Parent 201d49c4a1f7599904b959bad0787a06c46b7199 deprecate addgroup / allow ordering of option groups --- a/_py/test/plugin/pytest_doctest.py +++ b/_py/test/plugin/pytest_doctest.py @@ -18,7 +18,7 @@ from _py.code.code import TerminalRepr, import doctest def pytest_addoption(parser): - group = parser.addgroup("doctest options") + group = parser.getgroup("doctest options") group.addoption("--doctest-modules", action="store_true", default=False, help="search all python files for doctests", --- a/conftest.py +++ b/conftest.py @@ -4,7 +4,7 @@ rsyncdirs = ['conftest.py', 'py', 'doc', import py def pytest_addoption(parser): - group = parser.addgroup("pylib", "py lib testing options") + group = parser.getgroup("pylib", "py lib testing options") group.addoption('--sshhost', action="store", dest="sshhost", default=None, help=("ssh xspec for ssh functional tests. ")) --- a/_py/test/plugin/pytest_default.py +++ b/_py/test/plugin/pytest_default.py @@ -73,7 +73,8 @@ def pytest_addoption(parser): action="store_true", dest="looponfail", default=False, help="run tests, re-run failing test set until all pass.") - group = parser.addgroup("debugconfig", "test process debugging and configuration") + group = parser.getgroup("debugconfig", + "test process debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", help="base temporary directory for this test run.") @@ -84,7 +85,8 @@ def pytest_addoption(parser): "execnet missing: --looponfailing and distributed testing not available.") def add_dist_options(parser): - group = parser.addgroup("dist", "distributed testing") # see http://pytest.org/help/dist") + # see http://pytest.org/help/dist") + group = parser.getgroup("dist", "distributed testing") group._addoption('--dist', metavar="distmode", action="store", choices=['load', 'each', 'no'], type="choice", dest="dist", default="no", --- a/testing/pytest/test_parseopt.py +++ b/testing/pytest/test_parseopt.py @@ -19,11 +19,15 @@ class TestParser: group = parser.addgroup("hello", description="desc") assert group.name == "hello" assert group.description == "desc" - py.test.raises(ValueError, parser.addgroup, "hello") + + def test_addgroup_deprecation(self, recwarn): + parser = parseopt.Parser() + group = parser.addgroup("hello", description="desc") + assert recwarn.pop() group2 = parser.getgroup("hello") - assert group2 is group + assert group == group2 - def test_getgroup_addsgroup(self): + def test_getgroup_simple(self): parser = parseopt.Parser() group = parser.getgroup("hello", description="desc") assert group.name == "hello" @@ -31,6 +35,14 @@ class TestParser: group2 = parser.getgroup("hello") assert group2 is group + def test_group_ordering(self): + parser = parseopt.Parser() + group0 = parser.getgroup("1") + group1 = parser.getgroup("2") + group1 = parser.getgroup("3", after="1") + groups = parser._groups + groups_names = [x.name for x in groups] + assert groups_names == list("132") def test_group_addoption(self): group = parseopt.OptionGroup("hello") --- a/_py/test/parseopt.py +++ b/_py/test/parseopt.py @@ -21,7 +21,7 @@ class Parser: def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) - self._groups = [self._anonymous] + self._groups = [] self._processopt = processopt self._usage = usage self.epilog = "" @@ -34,19 +34,21 @@ class Parser: def addnote(self, note): self._notes.append(note) - def addgroup(self, name, description=""): - for group in self._groups: - if group.name == name: - raise ValueError("group %r already exists" % name) - group = OptionGroup(name, description, parser=self) - self._groups.append(group) - return group - - def getgroup(self, name, description=""): + def getgroup(self, name, description="", after=None): for group in self._groups: if group.name == name: return group - return self.addgroup(name, description) + group = OptionGroup(name, description, parser=self) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i+1, group) + return group + + def addgroup(self, name, description=""): + py.log._apiwarn("1.1", "use getgroup() which gets-or-creates") + return self.getgroup(name, description) def addoption(self, *opts, **attrs): """ add an optparse-style option. """ @@ -56,7 +58,7 @@ class Parser: optparser = optparse.OptionParser(usage=self._usage) # make sure anaonymous group is at the end optparser.epilog = self.epilog - groups = self._groups[1:] + [self._groups[0]] + groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* deprecate parser.addgroup in favour of getgroup which creates option group + * add --report command line option that allows to control showing of skipped/xfailed sections * generalized skipping: a new way to mark python functions with skipif or xfail From commits-noreply at bitbucket.org Sun Oct 18 20:50:04 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 18 Oct 2009 18:50:04 +0000 (UTC) Subject: [py-svn] py-trunk commit 0d98a3be27c4: flush looponfail output to get around line-buffering Message-ID: <20091018185004.E5C4A7EEFD@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Ronny Pfannschmidt # Date 1255897818 -7200 # Node ID 0d98a3be27c4c3234a3771a5b23c3c58a39e3003 # Parent a416dddd664e20bb15fa5a03eb16887c1b4cefe0 flush looponfail output to get around line-buffering --- a/_py/test/looponfail/remote.py +++ b/_py/test/looponfail/remote.py @@ -79,7 +79,10 @@ class RemoteControl(object): slave_runsession(channel, config, fullwidth, hasmarkup) """) remote_outchannel = channel.receive() - remote_outchannel.setcallback(out._file.write) + def write(s): + out._file.write(s) + out._file.flush() + remote_outchannel.setcallback(write) channel = self.channel = PickleChannel(channel) channel.send((self.config, out.fullwidth, out.hasmarkup)) self.trace("set up of slave session complete") From commits-noreply at bitbucket.org Tue Oct 20 16:42:42 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 20 Oct 2009 14:42:42 +0000 (UTC) Subject: [py-svn] py-trunk commit d9645744d8a5: player nicer for missing parent Module objects for a collected function (bug triggered by oejskit) Message-ID: <20091020144242.C42E27EF68@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256049492 -7200 # Node ID d9645744d8a5a9f9fefe55698d2513ab966461e3 # Parent 0d98a3be27c4c3234a3771a5b23c3c58a39e3003 player nicer for missing parent Module objects for a collected function (bug triggered by oejskit) --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -175,7 +175,8 @@ def getexpression(item, keyword): if cls and hasattr(cls.obj, keyword): return getattr(cls.obj, keyword) mod = item.getparent(py.test.collect.Module) - return getattr(mod.obj, keyword, None) + if mod: + return getattr(mod.obj, keyword, None) def evalexpression(item, keyword): expr = getexpression(item, keyword) --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -110,6 +110,9 @@ def test_getexpression(testdir): assert getexpression(item2, 'just') + item2.parent = None + assert not getexpression(item2, 'nada') + def test_evalexpression_cls_config_example(testdir): from _py.test.plugin.pytest_skipping import evalexpression item, = testdir.getitems(""" From commits-noreply at bitbucket.org Thu Oct 22 18:38:22 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 22 Oct 2009 16:38:22 +0000 (UTC) Subject: [py-svn] py-trunk commit 5b34fb114c30: cleanup: move creation of python colitems to a default pytest_pycollect_makeitem hook impl Message-ID: <20091022163822.ABE5C7EF49@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256143360 -7200 # Node ID 5b34fb114c30bbfd6d6c3c8674136ac04897223a # Parent d9645744d8a5a9f9fefe55698d2513ab966461e3 cleanup: move creation of python colitems to a default pytest_pycollect_makeitem hook impl --- a/_py/test/pycollect.py +++ b/_py/test/pycollect.py @@ -120,24 +120,8 @@ class PyCollectorMixin(PyobjMixin, py.te return self.join(name) def makeitem(self, name, obj): - res = self.config.hook.pytest_pycollect_makeitem( + return self.config.hook.pytest_pycollect_makeitem( collector=self, name=name, obj=obj) - if res is not None: - return res - if self._istestclasscandidate(name, obj): - res = self._deprecated_join(name) - if res is not None: - return res - return self.Class(name, parent=self) - elif self.funcnamefilter(name) and hasattr(obj, '__call__'): - res = self._deprecated_join(name) - if res is not None: - return res - if is_generator(obj): - # XXX deprecation warning - return self.Generator(name, parent=self) - else: - return self._genfunctions(name, obj) def _istestclasscandidate(self, name, obj): if self.classnamefilter(name) and \ @@ -146,7 +130,6 @@ class PyCollectorMixin(PyobjMixin, py.te # XXX WARN return False return True - def _genfunctions(self, name, funcobj): module = self.getparent(Module).obj @@ -162,12 +145,6 @@ class PyCollectorMixin(PyobjMixin, py.te return funcargs.FunctionCollector(name=name, parent=self, calls=metafunc._calls) -def is_generator(func): - try: - return py.code.getrawcode(func).co_flags & 32 # generator function - except AttributeError: # builtin functions have no bytecode - # assume them to not be generators - return False class Module(py.test.collect.File, PyCollectorMixin): def _getobj(self): --- a/_py/test/plugin/pytest_default.py +++ b/_py/test/plugin/pytest_default.py @@ -119,3 +119,31 @@ def setsession(config): elif val("dist") != "no": from _py.test.dist.dsession import DSession config.setsessionclass(DSession) + +# pycollect related hooks and code, should move to pytest_pycollect.py + +def pytest_pycollect_makeitem(__multicall__, collector, name, obj): + res = __multicall__.execute() + if res is not None: + return res + if collector._istestclasscandidate(name, obj): + res = collector._deprecated_join(name) + if res is not None: + return res + return collector.Class(name, parent=collector) + elif collector.funcnamefilter(name) and hasattr(obj, '__call__'): + res = collector._deprecated_join(name) + if res is not None: + return res + if is_generator(obj): + # XXX deprecation warning + return collector.Generator(name, parent=collector) + else: + return collector._genfunctions(name, obj) + +def is_generator(func): + try: + return py.code.getrawcode(func).co_flags & 32 # generator function + except AttributeError: # builtin functions have no bytecode + # assume them to not be generators + return False --- a/_py/test/plugin/pytest_unittest.py +++ b/_py/test/plugin/pytest_unittest.py @@ -18,7 +18,7 @@ import sys def pytest_pycollect_makeitem(collector, name, obj): if 'unittest' not in sys.modules: - return # nobody could have possibly derived a subclass + return # nobody derived unittest.TestCase try: isunit = issubclass(obj, py.std.unittest.TestCase) except TypeError: From commits-noreply at bitbucket.org Thu Oct 22 18:38:24 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 22 Oct 2009 16:38:24 +0000 (UTC) Subject: [py-svn] py-trunk commit 4fcdb32e0b80: don't visit '_' attributes on python objects for calling hooks Message-ID: <20091022163824.890897EF4D@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256143452 -7200 # Node ID 4fcdb32e0b809d99d8d6f9cfb6485210c670a1d9 # Parent 5b34fb114c30bbfd6d6c3c8674136ac04897223a don't visit '_' attributes on python objects for calling hooks --- a/testing/pytest/test_pycollect.py +++ b/testing/pytest/test_pycollect.py @@ -389,6 +389,14 @@ class TestConftestCustomization: assert len(colitems) == 1 assert colitems[0].name == "check_method" + def test_makeitem_non_underscore(self, testdir, monkeypatch): + modcol = testdir.getmodulecol("def _hello(): pass") + l = [] + monkeypatch.setattr(py.test.collect.Module, 'makeitem', + lambda self, name, obj: l.append(name)) + modcol._buildname2items() + assert '_hello' not in l + class TestReportinfo: --- a/_py/test/pycollect.py +++ b/_py/test/pycollect.py @@ -109,9 +109,10 @@ class PyCollectorMixin(PyobjMixin, py.te if name in seen: continue seen[name] = True - res = self.makeitem(name, obj) - if res is not None: - d[name] = res + if name[0] != "_": + res = self.makeitem(name, obj) + if res is not None: + d[name] = res return d def _deprecated_join(self, name): From commits-noreply at bitbucket.org Thu Oct 22 18:38:24 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 22 Oct 2009 16:38:24 +0000 (UTC) Subject: [py-svn] py-trunk commit fca2db12253a: extend and refine test marking Message-ID: <20091022163824.9C0FE7EF4C@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256217718 -7200 # Node ID fca2db12253a13bb7eef5b4424a0befc34b50f49 # Parent 4fcdb32e0b809d99d8d6f9cfb6485210c670a1d9 extend and refine test marking - allow to mark tests via a "pytestmark" name at class/module level. - make combined positional args of marker calls available via an _args argument --- a/testing/pytest/plugin/test_pytest_keyword.py +++ b/testing/pytest/plugin/test_pytest_keyword.py @@ -1,30 +1,105 @@ import py from _py.test.plugin.pytest_keyword import Mark -def test_pytest_mark_api(): - mark = Mark() - py.test.raises(TypeError, "mark(x=3)") +class TestMark: + def test_pytest_mark_notcallable(self): + mark = Mark() + py.test.raises(TypeError, "mark()") - def f(): pass - mark.hello(f) - assert f.hello + def test_pytest_mark_bare(self): + mark = Mark() + def f(): pass + mark.hello(f) + assert f.hello - mark.world(x=3, y=4)(f) - assert f.world - assert f.world.x == 3 - assert f.world.y == 4 + def test_pytest_mark_keywords(self): + mark = Mark() + def f(): pass + mark.world(x=3, y=4)(f) + assert f.world + assert f.world.x == 3 + assert f.world.y == 4 - mark.world("hello")(f) - assert f.world._0 == "hello" + def test_apply_multiple_and_merge(self): + mark = Mark() + def f(): pass + marker = mark.world + mark.world(x=3)(f) + assert f.world.x == 3 + mark.world(y=4)(f) + assert f.world.x == 3 + assert f.world.y == 4 + mark.world(y=1)(f) + assert f.world.y == 1 + assert len(f.world._args) == 0 - py.test.raises(TypeError, "mark.some(x=3)(f=5)") + def test_pytest_mark_positional(self): + mark = Mark() + def f(): pass + mark.world("hello")(f) + assert f.world._args[0] == "hello" + mark.world("world")(f) -def test_mark_plugin(testdir): - p = testdir.makepyfile(""" - import py - @py.test.mark.hello - def test_hello(): - assert hasattr(test_hello, 'hello') - """) - result = testdir.runpytest(p) - assert result.stdout.fnmatch_lines(["*passed*"]) +class TestFunctional: + def test_mark_per_function(self, testdir): + p = testdir.makepyfile(""" + import py + @py.test.mark.hello + def test_hello(): + assert hasattr(test_hello, 'hello') + """) + result = testdir.runpytest(p) + assert result.stdout.fnmatch_lines(["*passed*"]) + + def test_mark_per_module(self, testdir): + item = testdir.getitem(""" + import py + pytestmark = py.test.mark.hello + def test_func(): + pass + """) + keywords = item.readkeywords() + assert 'hello' in keywords + + def test_mark_per_class(self, testdir): + modcol = testdir.getmodulecol(""" + import py + class TestClass: + pytestmark = py.test.mark.hello + def test_func(self): + assert TestClass.test_func.hello + """) + clscol = modcol.collect()[0] + item = clscol.collect()[0].collect()[0] + keywords = item.readkeywords() + assert 'hello' in keywords + + def test_merging_markers(self, testdir): + p = testdir.makepyfile(""" + import py + pytestmark = py.test.mark.hello("pos1", x=1, y=2) + class TestClass: + # classlevel overrides module level + pytestmark = py.test.mark.hello(x=3) + @py.test.mark.hello("pos0", z=4) + def test_func(self): + pass + """) + items, rec = testdir.inline_genitems(p) + item, = items + keywords = item.readkeywords() + marker = keywords['hello'] + assert marker._args == ["pos0", "pos1"] + assert marker.x == 3 + assert marker.y == 2 + assert marker.z == 4 + + def test_mark_other(self, testdir): + item = testdir.getitem(""" + import py + class pytestmark: + pass + def test_func(): + pass + """) + keywords = item.readkeywords() --- a/_py/test/plugin/pytest_keyword.py +++ b/_py/test/plugin/pytest_keyword.py @@ -1,7 +1,7 @@ """ mark test functions with keywords that may hold values. -Marking functions and setting rich attributes +Marking functions by a decorator ---------------------------------------------------- By default, all filename parts and class/function names of a test @@ -30,8 +30,29 @@ In addition to keyword arguments you can def test_receive(): ... -after which ``test_receive.webtest._1 == 'triangular`` hold true. +after which ``test_receive.webtest._args[0] == 'triangular`` holds true. + +Marking classes or modules +---------------------------------------------------- + +To mark all methods of a class you can set a class-level attribute:: + + class TestClass: + pytestmark = py.test.mark.webtest + +the marker function will be applied to all test methods. + +If you set a marker it inside a test module like this:: + + pytestmark = py.test.mark.webtest + +the marker will be applied to all functions and methods of +that module. The module marker is applied last. + +Outer ``pytestmark`` keywords will overwrite inner keyword +values. Positional arguments are all appeneded to the +same '_args' list. """ import py @@ -49,6 +70,8 @@ class MarkerDecorator: """ decorator for setting function attributes. """ def __init__(self, name): self.markname = name + self.kwargs = {} + self.args = [] def __repr__(self): d = self.__dict__.copy() @@ -57,19 +80,41 @@ class MarkerDecorator: def __call__(self, *args, **kwargs): if args: - if hasattr(args[0], '__call__'): + if len(args) == 1 and hasattr(args[0], '__call__'): func = args[0] - mh = MarkHolder(getattr(self, 'kwargs', {})) - setattr(func, self.markname, mh) + holder = getattr(func, self.markname, None) + if holder is None: + holder = MarkHolder(self.markname, self.args, self.kwargs) + setattr(func, self.markname, holder) + else: + holder.__dict__.update(self.kwargs) + holder._args.extend(self.args) return func - # not a function so we memorize all args/kwargs settings - for i, arg in enumerate(args): - kwargs["_" + str(i)] = arg - if hasattr(self, 'kwargs'): - raise TypeError("double mark-keywords?") - self.kwargs = kwargs.copy() + else: + self.args.extend(args) + self.kwargs.update(kwargs) return self class MarkHolder: - def __init__(self, kwargs): + def __init__(self, name, args, kwargs): + self._name = name + self._args = args + self._kwargs = kwargs self.__dict__.update(kwargs) + + def __repr__(self): + return "" % ( + self._name, self._args, self._kwargs) + + +def pytest_pycollect_makeitem(__multicall__, collector, name, obj): + item = __multicall__.execute() + if isinstance(item, py.test.collect.Function): + cls = collector.getparent(py.test.collect.Class) + mod = collector.getparent(py.test.collect.Module) + func = getattr(item.obj, 'im_func', item.obj) + for parent in [x for x in (mod, cls) if x]: + marker = getattr(parent.obj, 'pytestmark', None) + if isinstance(marker, MarkerDecorator): + marker(func) + return item From commits-noreply at bitbucket.org Thu Oct 22 18:38:26 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 22 Oct 2009 16:38:26 +0000 (UTC) Subject: [py-svn] py-trunk commit 57132bbe20c5: use new marking idioms, simplify generalized skipping implementation Message-ID: <20091022163826.9F8B27EF4E@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256229444 -7200 # Node ID 57132bbe20c575c34de83f196f1006cceaa594b8 # Parent fca2db12253a13bb7eef5b4424a0befc34b50f49 use new marking idioms, simplify generalized skipping implementation --- a/testing/pytest/plugin/test_pytest_runner.py +++ b/testing/pytest/plugin/test_pytest_runner.py @@ -218,7 +218,8 @@ class TestExecutionNonForked(BaseFunctio py.test.fail("did not raise") class TestExecutionForked(BaseFunctionalTests): - skipif = "not hasattr(os, 'fork')" + pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')") + def getrunner(self): return runner.forked_run_report --- a/conftest.py +++ b/conftest.py @@ -52,7 +52,7 @@ def pytest_generate_tests(metafunc): multi = getattr(metafunc.function, 'multi', None) if multi is None: return - assert len(multi.__dict__) == 1 - for name, l in multi.__dict__.items(): + assert len(multi._kwargs) == 1 + for name, l in multi._kwargs.items(): for val in l: metafunc.addcall(funcargs={name: val}) --- a/_py/test/plugin/pytest_keyword.py +++ b/_py/test/plugin/pytest_keyword.py @@ -33,6 +33,8 @@ In addition to keyword arguments you can after which ``test_receive.webtest._args[0] == 'triangular`` holds true. +.. _`scoped-marking`: + Marking classes or modules ---------------------------------------------------- --- a/testing/path/test_local.py +++ b/testing/path/test_local.py @@ -208,7 +208,7 @@ class TestLocalPath(common.CommonFSTests assert l[2] == p3 class TestExecutionOnWindows: - skipif = "sys.platform != 'win32'" + pytestmark = py.test.mark.skipif("sys.platform != 'win32'") def test_sysfind(self): x = py.path.local.sysfind('cmd') @@ -216,7 +216,7 @@ class TestExecutionOnWindows: assert py.path.local.sysfind('jaksdkasldqwe') is None class TestExecution: - skipif = "sys.platform == 'win32'" + pytestmark = py.test.mark.skipif("sys.platform == 'win32'") def test_sysfind(self): x = py.path.local.sysfind('test') @@ -346,7 +346,7 @@ def test_homedir(): assert homedir.check(dir=1) class TestWINLocalPath: - skipif = "sys.platform != 'win32'" + pytestmark = py.test.mark.skipif("sys.platform != 'win32'") def test_owner_group_not_implemented(self): py.test.raises(NotImplementedError, "path1.stat().owner") @@ -395,7 +395,7 @@ class TestWINLocalPath: old.chdir() class TestPOSIXLocalPath: - skipif = "sys.platform == 'win32'" + pytestmark = py.test.mark.skipif("sys.platform == 'win32'") def test_samefile(self, tmpdir): assert tmpdir.samefile(tmpdir) --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -13,15 +13,15 @@ reported at the end of test run through skip a test function conditionally ------------------------------------------- -Here is an example for skipping a test function on Python3:: +Here is an example for skipping a test function when +running on Python3:: @py.test.mark.skipif("sys.version_info >= (3,0)") def test_function(): ... -The 'skipif' marker accepts an **arbitrary python expression** -as a condition. When setting up the test function the condition -is evaluated by calling ``eval(expr, namespace)``. The namespace +During test function setup the skipif condition is +evaluated by calling ``eval(expr, namespace)``. The namespace contains the ``sys`` and ``os`` modules as well as the test ``config`` object. The latter allows you to skip based on a test configuration value e.g. like this:: @@ -30,6 +30,10 @@ on a test configuration value e.g. like def test_function(...): ... +Note that `test marking can be declared at whole class- or module level`_. + +.. _`test marking can also be declared at whole class- or module level`: keyword.html#scoped-marking + conditionally mark a function as "expected to fail" ------------------------------------------------------- @@ -123,6 +127,7 @@ def pytest_runtest_makereport(__multical rep.keywords['xfail'] = True # expr return rep +# called by terminalreporter progress reporting def pytest_report_teststatus(report): if 'xfail' in report.keywords: if report.skipped: @@ -165,29 +170,22 @@ def show_xfailed(terminalreporter): tr._tw.line(pos) -def getexpression(item, keyword): +def evalexpression(item, keyword): if isinstance(item, py.test.collect.Function): - val = getattr(item.obj, keyword, None) - val = getattr(val, '_0', val) - if val is not None: - return val - cls = item.getparent(py.test.collect.Class) - if cls and hasattr(cls.obj, keyword): - return getattr(cls.obj, keyword) - mod = item.getparent(py.test.collect.Module) - if mod: - return getattr(mod.obj, keyword, None) - -def evalexpression(item, keyword): - expr = getexpression(item, keyword) - result = None - if expr: - if isinstance(expr, str): + markholder = getattr(item.obj, keyword, None) + result = False + if markholder: d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config} - result = eval(expr, d) - else: - result = expr - return expr, result + expr, result = None, True + for expr in markholder._args: + if isinstance(expr, str): + result = eval(expr, d) + else: + result = expr + if not result: + break + return expr, result + return None, False def folded_skips(skipped): d = {} --- a/testing/process/test_forkedfunc.py +++ b/testing/process/test_forkedfunc.py @@ -1,6 +1,6 @@ import py, sys, os -skipif = "not hasattr(os, 'fork')" +pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')") def test_waitfinish_removes_tempdir(): ff = py.process.ForkedFunc(boxf1) --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -46,8 +46,8 @@ def test_xfail_decorator(testdir): def test_xfail_at_module(testdir): p = testdir.makepyfile(""" - xfail = 'True' - + import py + pytestmark = py.test.mark.xfail('True') def test_intentional_xfail(): assert 0 """) @@ -76,8 +76,9 @@ def test_skipif_decorator(testdir): def test_skipif_class(testdir): p = testdir.makepyfile(""" import py + class TestClass: - skipif = "True" + pytestmark = py.test.mark.skipif("True") def test_that(self): assert 0 def test_though(self): @@ -88,36 +89,12 @@ def test_skipif_class(testdir): "*2 skipped*" ]) -def test_getexpression(testdir): - from _py.test.plugin.pytest_skipping import getexpression - l = testdir.getitems(""" - import py - mod = 5 - class TestClass: - cls = 4 - @py.test.mark.func(3) - def test_func(self): - pass - @py.test.mark.just - def test_other(self): - pass - """) - item, item2 = l - assert getexpression(item, 'xyz') is None - assert getexpression(item, 'func') == 3 - assert getexpression(item, 'cls') == 4 - assert getexpression(item, 'mod') == 5 - - assert getexpression(item2, 'just') - - item2.parent = None - assert not getexpression(item2, 'nada') - def test_evalexpression_cls_config_example(testdir): from _py.test.plugin.pytest_skipping import evalexpression item, = testdir.getitems(""" + import py class TestClass: - skipif = "config._hackxyz" + pytestmark = py.test.mark.skipif("config._hackxyz") def test_func(self): pass """) From commits-noreply at bitbucket.org Fri Oct 23 13:12:11 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 11:12:11 +0000 (UTC) Subject: [py-svn] py-virtualenv commit d9645744d8a5: player nicer for missing parent Module objects for a collected function (bug triggered by oejskit) Message-ID: <20091023111211.983157EF64@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256049492 -7200 # Node ID d9645744d8a5a9f9fefe55698d2513ab966461e3 # Parent 0d98a3be27c4c3234a3771a5b23c3c58a39e3003 player nicer for missing parent Module objects for a collected function (bug triggered by oejskit) --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -175,7 +175,8 @@ def getexpression(item, keyword): if cls and hasattr(cls.obj, keyword): return getattr(cls.obj, keyword) mod = item.getparent(py.test.collect.Module) - return getattr(mod.obj, keyword, None) + if mod: + return getattr(mod.obj, keyword, None) def evalexpression(item, keyword): expr = getexpression(item, keyword) --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -110,6 +110,9 @@ def test_getexpression(testdir): assert getexpression(item2, 'just') + item2.parent = None + assert not getexpression(item2, 'nada') + def test_evalexpression_cls_config_example(testdir): from _py.test.plugin.pytest_skipping import evalexpression item, = testdir.getitems(""" From commits-noreply at bitbucket.org Fri Oct 23 13:12:13 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 11:12:13 +0000 (UTC) Subject: [py-svn] py-virtualenv commit 5b34fb114c30: cleanup: move creation of python colitems to a default pytest_pycollect_makeitem hook impl Message-ID: <20091023111213.0F6E77EF67@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256143360 -7200 # Node ID 5b34fb114c30bbfd6d6c3c8674136ac04897223a # Parent d9645744d8a5a9f9fefe55698d2513ab966461e3 cleanup: move creation of python colitems to a default pytest_pycollect_makeitem hook impl --- a/_py/test/pycollect.py +++ b/_py/test/pycollect.py @@ -120,24 +120,8 @@ class PyCollectorMixin(PyobjMixin, py.te return self.join(name) def makeitem(self, name, obj): - res = self.config.hook.pytest_pycollect_makeitem( + return self.config.hook.pytest_pycollect_makeitem( collector=self, name=name, obj=obj) - if res is not None: - return res - if self._istestclasscandidate(name, obj): - res = self._deprecated_join(name) - if res is not None: - return res - return self.Class(name, parent=self) - elif self.funcnamefilter(name) and hasattr(obj, '__call__'): - res = self._deprecated_join(name) - if res is not None: - return res - if is_generator(obj): - # XXX deprecation warning - return self.Generator(name, parent=self) - else: - return self._genfunctions(name, obj) def _istestclasscandidate(self, name, obj): if self.classnamefilter(name) and \ @@ -146,7 +130,6 @@ class PyCollectorMixin(PyobjMixin, py.te # XXX WARN return False return True - def _genfunctions(self, name, funcobj): module = self.getparent(Module).obj @@ -162,12 +145,6 @@ class PyCollectorMixin(PyobjMixin, py.te return funcargs.FunctionCollector(name=name, parent=self, calls=metafunc._calls) -def is_generator(func): - try: - return py.code.getrawcode(func).co_flags & 32 # generator function - except AttributeError: # builtin functions have no bytecode - # assume them to not be generators - return False class Module(py.test.collect.File, PyCollectorMixin): def _getobj(self): --- a/_py/test/plugin/pytest_default.py +++ b/_py/test/plugin/pytest_default.py @@ -119,3 +119,31 @@ def setsession(config): elif val("dist") != "no": from _py.test.dist.dsession import DSession config.setsessionclass(DSession) + +# pycollect related hooks and code, should move to pytest_pycollect.py + +def pytest_pycollect_makeitem(__multicall__, collector, name, obj): + res = __multicall__.execute() + if res is not None: + return res + if collector._istestclasscandidate(name, obj): + res = collector._deprecated_join(name) + if res is not None: + return res + return collector.Class(name, parent=collector) + elif collector.funcnamefilter(name) and hasattr(obj, '__call__'): + res = collector._deprecated_join(name) + if res is not None: + return res + if is_generator(obj): + # XXX deprecation warning + return collector.Generator(name, parent=collector) + else: + return collector._genfunctions(name, obj) + +def is_generator(func): + try: + return py.code.getrawcode(func).co_flags & 32 # generator function + except AttributeError: # builtin functions have no bytecode + # assume them to not be generators + return False --- a/_py/test/plugin/pytest_unittest.py +++ b/_py/test/plugin/pytest_unittest.py @@ -18,7 +18,7 @@ import sys def pytest_pycollect_makeitem(collector, name, obj): if 'unittest' not in sys.modules: - return # nobody could have possibly derived a subclass + return # nobody derived unittest.TestCase try: isunit = issubclass(obj, py.std.unittest.TestCase) except TypeError: From commits-noreply at bitbucket.org Fri Oct 23 13:12:14 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 11:12:14 +0000 (UTC) Subject: [py-svn] py-virtualenv commit 4fcdb32e0b80: don't visit '_' attributes on python objects for calling hooks Message-ID: <20091023111214.9B1377EE6D@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256143452 -7200 # Node ID 4fcdb32e0b809d99d8d6f9cfb6485210c670a1d9 # Parent 5b34fb114c30bbfd6d6c3c8674136ac04897223a don't visit '_' attributes on python objects for calling hooks --- a/testing/pytest/test_pycollect.py +++ b/testing/pytest/test_pycollect.py @@ -389,6 +389,14 @@ class TestConftestCustomization: assert len(colitems) == 1 assert colitems[0].name == "check_method" + def test_makeitem_non_underscore(self, testdir, monkeypatch): + modcol = testdir.getmodulecol("def _hello(): pass") + l = [] + monkeypatch.setattr(py.test.collect.Module, 'makeitem', + lambda self, name, obj: l.append(name)) + modcol._buildname2items() + assert '_hello' not in l + class TestReportinfo: --- a/_py/test/pycollect.py +++ b/_py/test/pycollect.py @@ -109,9 +109,10 @@ class PyCollectorMixin(PyobjMixin, py.te if name in seen: continue seen[name] = True - res = self.makeitem(name, obj) - if res is not None: - d[name] = res + if name[0] != "_": + res = self.makeitem(name, obj) + if res is not None: + d[name] = res return d def _deprecated_join(self, name): From commits-noreply at bitbucket.org Fri Oct 23 13:12:16 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 11:12:16 +0000 (UTC) Subject: [py-svn] py-virtualenv commit fca2db12253a: extend and refine test marking Message-ID: <20091023111216.496FB7EF0D@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256217718 -7200 # Node ID fca2db12253a13bb7eef5b4424a0befc34b50f49 # Parent 4fcdb32e0b809d99d8d6f9cfb6485210c670a1d9 extend and refine test marking - allow to mark tests via a "pytestmark" name at class/module level. - make combined positional args of marker calls available via an _args argument --- a/testing/pytest/plugin/test_pytest_keyword.py +++ b/testing/pytest/plugin/test_pytest_keyword.py @@ -1,30 +1,105 @@ import py from _py.test.plugin.pytest_keyword import Mark -def test_pytest_mark_api(): - mark = Mark() - py.test.raises(TypeError, "mark(x=3)") +class TestMark: + def test_pytest_mark_notcallable(self): + mark = Mark() + py.test.raises(TypeError, "mark()") - def f(): pass - mark.hello(f) - assert f.hello + def test_pytest_mark_bare(self): + mark = Mark() + def f(): pass + mark.hello(f) + assert f.hello - mark.world(x=3, y=4)(f) - assert f.world - assert f.world.x == 3 - assert f.world.y == 4 + def test_pytest_mark_keywords(self): + mark = Mark() + def f(): pass + mark.world(x=3, y=4)(f) + assert f.world + assert f.world.x == 3 + assert f.world.y == 4 - mark.world("hello")(f) - assert f.world._0 == "hello" + def test_apply_multiple_and_merge(self): + mark = Mark() + def f(): pass + marker = mark.world + mark.world(x=3)(f) + assert f.world.x == 3 + mark.world(y=4)(f) + assert f.world.x == 3 + assert f.world.y == 4 + mark.world(y=1)(f) + assert f.world.y == 1 + assert len(f.world._args) == 0 - py.test.raises(TypeError, "mark.some(x=3)(f=5)") + def test_pytest_mark_positional(self): + mark = Mark() + def f(): pass + mark.world("hello")(f) + assert f.world._args[0] == "hello" + mark.world("world")(f) -def test_mark_plugin(testdir): - p = testdir.makepyfile(""" - import py - @py.test.mark.hello - def test_hello(): - assert hasattr(test_hello, 'hello') - """) - result = testdir.runpytest(p) - assert result.stdout.fnmatch_lines(["*passed*"]) +class TestFunctional: + def test_mark_per_function(self, testdir): + p = testdir.makepyfile(""" + import py + @py.test.mark.hello + def test_hello(): + assert hasattr(test_hello, 'hello') + """) + result = testdir.runpytest(p) + assert result.stdout.fnmatch_lines(["*passed*"]) + + def test_mark_per_module(self, testdir): + item = testdir.getitem(""" + import py + pytestmark = py.test.mark.hello + def test_func(): + pass + """) + keywords = item.readkeywords() + assert 'hello' in keywords + + def test_mark_per_class(self, testdir): + modcol = testdir.getmodulecol(""" + import py + class TestClass: + pytestmark = py.test.mark.hello + def test_func(self): + assert TestClass.test_func.hello + """) + clscol = modcol.collect()[0] + item = clscol.collect()[0].collect()[0] + keywords = item.readkeywords() + assert 'hello' in keywords + + def test_merging_markers(self, testdir): + p = testdir.makepyfile(""" + import py + pytestmark = py.test.mark.hello("pos1", x=1, y=2) + class TestClass: + # classlevel overrides module level + pytestmark = py.test.mark.hello(x=3) + @py.test.mark.hello("pos0", z=4) + def test_func(self): + pass + """) + items, rec = testdir.inline_genitems(p) + item, = items + keywords = item.readkeywords() + marker = keywords['hello'] + assert marker._args == ["pos0", "pos1"] + assert marker.x == 3 + assert marker.y == 2 + assert marker.z == 4 + + def test_mark_other(self, testdir): + item = testdir.getitem(""" + import py + class pytestmark: + pass + def test_func(): + pass + """) + keywords = item.readkeywords() --- a/_py/test/plugin/pytest_keyword.py +++ b/_py/test/plugin/pytest_keyword.py @@ -1,7 +1,7 @@ """ mark test functions with keywords that may hold values. -Marking functions and setting rich attributes +Marking functions by a decorator ---------------------------------------------------- By default, all filename parts and class/function names of a test @@ -30,8 +30,29 @@ In addition to keyword arguments you can def test_receive(): ... -after which ``test_receive.webtest._1 == 'triangular`` hold true. +after which ``test_receive.webtest._args[0] == 'triangular`` holds true. + +Marking classes or modules +---------------------------------------------------- + +To mark all methods of a class you can set a class-level attribute:: + + class TestClass: + pytestmark = py.test.mark.webtest + +the marker function will be applied to all test methods. + +If you set a marker it inside a test module like this:: + + pytestmark = py.test.mark.webtest + +the marker will be applied to all functions and methods of +that module. The module marker is applied last. + +Outer ``pytestmark`` keywords will overwrite inner keyword +values. Positional arguments are all appeneded to the +same '_args' list. """ import py @@ -49,6 +70,8 @@ class MarkerDecorator: """ decorator for setting function attributes. """ def __init__(self, name): self.markname = name + self.kwargs = {} + self.args = [] def __repr__(self): d = self.__dict__.copy() @@ -57,19 +80,41 @@ class MarkerDecorator: def __call__(self, *args, **kwargs): if args: - if hasattr(args[0], '__call__'): + if len(args) == 1 and hasattr(args[0], '__call__'): func = args[0] - mh = MarkHolder(getattr(self, 'kwargs', {})) - setattr(func, self.markname, mh) + holder = getattr(func, self.markname, None) + if holder is None: + holder = MarkHolder(self.markname, self.args, self.kwargs) + setattr(func, self.markname, holder) + else: + holder.__dict__.update(self.kwargs) + holder._args.extend(self.args) return func - # not a function so we memorize all args/kwargs settings - for i, arg in enumerate(args): - kwargs["_" + str(i)] = arg - if hasattr(self, 'kwargs'): - raise TypeError("double mark-keywords?") - self.kwargs = kwargs.copy() + else: + self.args.extend(args) + self.kwargs.update(kwargs) return self class MarkHolder: - def __init__(self, kwargs): + def __init__(self, name, args, kwargs): + self._name = name + self._args = args + self._kwargs = kwargs self.__dict__.update(kwargs) + + def __repr__(self): + return "" % ( + self._name, self._args, self._kwargs) + + +def pytest_pycollect_makeitem(__multicall__, collector, name, obj): + item = __multicall__.execute() + if isinstance(item, py.test.collect.Function): + cls = collector.getparent(py.test.collect.Class) + mod = collector.getparent(py.test.collect.Module) + func = getattr(item.obj, 'im_func', item.obj) + for parent in [x for x in (mod, cls) if x]: + marker = getattr(parent.obj, 'pytestmark', None) + if isinstance(marker, MarkerDecorator): + marker(func) + return item From commits-noreply at bitbucket.org Fri Oct 23 13:12:16 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 11:12:16 +0000 (UTC) Subject: [py-svn] py-virtualenv commit 57132bbe20c5: use new marking idioms, simplify generalized skipping implementation Message-ID: <20091023111216.595637EF46@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256229444 -7200 # Node ID 57132bbe20c575c34de83f196f1006cceaa594b8 # Parent fca2db12253a13bb7eef5b4424a0befc34b50f49 use new marking idioms, simplify generalized skipping implementation --- a/testing/pytest/plugin/test_pytest_runner.py +++ b/testing/pytest/plugin/test_pytest_runner.py @@ -218,7 +218,8 @@ class TestExecutionNonForked(BaseFunctio py.test.fail("did not raise") class TestExecutionForked(BaseFunctionalTests): - skipif = "not hasattr(os, 'fork')" + pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')") + def getrunner(self): return runner.forked_run_report --- a/conftest.py +++ b/conftest.py @@ -52,7 +52,7 @@ def pytest_generate_tests(metafunc): multi = getattr(metafunc.function, 'multi', None) if multi is None: return - assert len(multi.__dict__) == 1 - for name, l in multi.__dict__.items(): + assert len(multi._kwargs) == 1 + for name, l in multi._kwargs.items(): for val in l: metafunc.addcall(funcargs={name: val}) --- a/_py/test/plugin/pytest_keyword.py +++ b/_py/test/plugin/pytest_keyword.py @@ -33,6 +33,8 @@ In addition to keyword arguments you can after which ``test_receive.webtest._args[0] == 'triangular`` holds true. +.. _`scoped-marking`: + Marking classes or modules ---------------------------------------------------- --- a/testing/path/test_local.py +++ b/testing/path/test_local.py @@ -208,7 +208,7 @@ class TestLocalPath(common.CommonFSTests assert l[2] == p3 class TestExecutionOnWindows: - skipif = "sys.platform != 'win32'" + pytestmark = py.test.mark.skipif("sys.platform != 'win32'") def test_sysfind(self): x = py.path.local.sysfind('cmd') @@ -216,7 +216,7 @@ class TestExecutionOnWindows: assert py.path.local.sysfind('jaksdkasldqwe') is None class TestExecution: - skipif = "sys.platform == 'win32'" + pytestmark = py.test.mark.skipif("sys.platform == 'win32'") def test_sysfind(self): x = py.path.local.sysfind('test') @@ -346,7 +346,7 @@ def test_homedir(): assert homedir.check(dir=1) class TestWINLocalPath: - skipif = "sys.platform != 'win32'" + pytestmark = py.test.mark.skipif("sys.platform != 'win32'") def test_owner_group_not_implemented(self): py.test.raises(NotImplementedError, "path1.stat().owner") @@ -395,7 +395,7 @@ class TestWINLocalPath: old.chdir() class TestPOSIXLocalPath: - skipif = "sys.platform == 'win32'" + pytestmark = py.test.mark.skipif("sys.platform == 'win32'") def test_samefile(self, tmpdir): assert tmpdir.samefile(tmpdir) --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -13,15 +13,15 @@ reported at the end of test run through skip a test function conditionally ------------------------------------------- -Here is an example for skipping a test function on Python3:: +Here is an example for skipping a test function when +running on Python3:: @py.test.mark.skipif("sys.version_info >= (3,0)") def test_function(): ... -The 'skipif' marker accepts an **arbitrary python expression** -as a condition. When setting up the test function the condition -is evaluated by calling ``eval(expr, namespace)``. The namespace +During test function setup the skipif condition is +evaluated by calling ``eval(expr, namespace)``. The namespace contains the ``sys`` and ``os`` modules as well as the test ``config`` object. The latter allows you to skip based on a test configuration value e.g. like this:: @@ -30,6 +30,10 @@ on a test configuration value e.g. like def test_function(...): ... +Note that `test marking can be declared at whole class- or module level`_. + +.. _`test marking can also be declared at whole class- or module level`: keyword.html#scoped-marking + conditionally mark a function as "expected to fail" ------------------------------------------------------- @@ -123,6 +127,7 @@ def pytest_runtest_makereport(__multical rep.keywords['xfail'] = True # expr return rep +# called by terminalreporter progress reporting def pytest_report_teststatus(report): if 'xfail' in report.keywords: if report.skipped: @@ -165,29 +170,22 @@ def show_xfailed(terminalreporter): tr._tw.line(pos) -def getexpression(item, keyword): +def evalexpression(item, keyword): if isinstance(item, py.test.collect.Function): - val = getattr(item.obj, keyword, None) - val = getattr(val, '_0', val) - if val is not None: - return val - cls = item.getparent(py.test.collect.Class) - if cls and hasattr(cls.obj, keyword): - return getattr(cls.obj, keyword) - mod = item.getparent(py.test.collect.Module) - if mod: - return getattr(mod.obj, keyword, None) - -def evalexpression(item, keyword): - expr = getexpression(item, keyword) - result = None - if expr: - if isinstance(expr, str): + markholder = getattr(item.obj, keyword, None) + result = False + if markholder: d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config} - result = eval(expr, d) - else: - result = expr - return expr, result + expr, result = None, True + for expr in markholder._args: + if isinstance(expr, str): + result = eval(expr, d) + else: + result = expr + if not result: + break + return expr, result + return None, False def folded_skips(skipped): d = {} --- a/testing/process/test_forkedfunc.py +++ b/testing/process/test_forkedfunc.py @@ -1,6 +1,6 @@ import py, sys, os -skipif = "not hasattr(os, 'fork')" +pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')") def test_waitfinish_removes_tempdir(): ff = py.process.ForkedFunc(boxf1) --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -46,8 +46,8 @@ def test_xfail_decorator(testdir): def test_xfail_at_module(testdir): p = testdir.makepyfile(""" - xfail = 'True' - + import py + pytestmark = py.test.mark.xfail('True') def test_intentional_xfail(): assert 0 """) @@ -76,8 +76,9 @@ def test_skipif_decorator(testdir): def test_skipif_class(testdir): p = testdir.makepyfile(""" import py + class TestClass: - skipif = "True" + pytestmark = py.test.mark.skipif("True") def test_that(self): assert 0 def test_though(self): @@ -88,36 +89,12 @@ def test_skipif_class(testdir): "*2 skipped*" ]) -def test_getexpression(testdir): - from _py.test.plugin.pytest_skipping import getexpression - l = testdir.getitems(""" - import py - mod = 5 - class TestClass: - cls = 4 - @py.test.mark.func(3) - def test_func(self): - pass - @py.test.mark.just - def test_other(self): - pass - """) - item, item2 = l - assert getexpression(item, 'xyz') is None - assert getexpression(item, 'func') == 3 - assert getexpression(item, 'cls') == 4 - assert getexpression(item, 'mod') == 5 - - assert getexpression(item2, 'just') - - item2.parent = None - assert not getexpression(item2, 'nada') - def test_evalexpression_cls_config_example(testdir): from _py.test.plugin.pytest_skipping import evalexpression item, = testdir.getitems(""" + import py class TestClass: - skipif = "config._hackxyz" + pytestmark = py.test.mark.skipif("config._hackxyz") def test_func(self): pass """) From commits-noreply at bitbucket.org Fri Oct 23 13:12:18 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 11:12:18 +0000 (UTC) Subject: [py-svn] py-virtualenv commit eb41af2b8634: nosetest plugin now supports fallback to module level setup Message-ID: <20091023111218.076197EF57@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User Ronny Pfannschmidt # Date 1256303513 -7200 # Node ID eb41af2b863412a2d9d0e73d42d4f13407ef120e # Parent 57132bbe20c575c34de83f196f1006cceaa594b8 nosetest plugin now supports fallback to module level setup --- a/_py/test/plugin/pytest_nose.py +++ b/_py/test/plugin/pytest_nose.py @@ -66,7 +66,9 @@ def pytest_runtest_setup(item): if isinstance(gen.parent, py.test.collect.Instance): call_optional(gen.parent.obj, 'setup') gen._nosegensetup = True - call_optional(item.obj, 'setup') + if not call_optional(item.obj, 'setup'): + # call module level setup if there is no object level one + call_optional(item.parent.obj, 'setup') def pytest_runtest_teardown(item): if isinstance(item, py.test.collect.Function): @@ -83,3 +85,6 @@ def call_optional(obj, name): method = getattr(obj, name, None) if method: method() + return True + else: + return False --- a/testing/pytest/plugin/test_pytest_nose.py +++ b/testing/pytest/plugin/test_pytest_nose.py @@ -85,3 +85,17 @@ def test_nose_test_generator_fixtures(te ]) + +def test_module_level_setup(testdir): + testdir.makepyfile(""" + items = {} + def setup(): + items[1]=1 + + def test_setup_changed_stuff(): + assert items + """) + result = testdir.runpytest('-p', 'nose') + result.stdout.fnmatch_lines([ + "*1 passed*", + ]) From commits-noreply at bitbucket.org Fri Oct 23 13:28:21 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 11:28:21 +0000 (UTC) Subject: [py-svn] py-virtualenv commit 61a03e7e7224: better tests for the nose plugin, support module level teardown Message-ID: <20091023112821.D88CD7EF46@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User Ronny Pfannschmidt # Date 1256304479 -7200 # Node ID 61a03e7e72248ee42f29cbceea1aad052c62362b # Parent eb41af2b863412a2d9d0e73d42d4f13407ef120e better tests for the nose plugin, support module level teardown --- a/_py/test/plugin/pytest_nose.py +++ b/_py/test/plugin/pytest_nose.py @@ -72,7 +72,8 @@ def pytest_runtest_setup(item): def pytest_runtest_teardown(item): if isinstance(item, py.test.collect.Function): - call_optional(item.obj, 'teardown') + if not call_optional(item.obj, 'teardown'): + call_optional(item.parent.obj, 'teardown') #if hasattr(item.parent, '_nosegensetup'): # #call_optional(item._nosegensetup, 'teardown') # del item.parent._nosegensetup --- a/testing/pytest/plugin/test_pytest_nose.py +++ b/testing/pytest/plugin/test_pytest_nose.py @@ -88,14 +88,30 @@ def test_nose_test_generator_fixtures(te def test_module_level_setup(testdir): testdir.makepyfile(""" + from nose.tools import with_setup items = {} def setup(): items[1]=1 - def test_setup_changed_stuff(): - assert items + def teardown(): + del items[1] + + def setup2(): + items[2] = 2 + + def teardown2(): + del items[2] + + def test_setup_module_setup(): + assert items[1] == 1 + + @with_setup(setup2, teardown2) + def test_local_setup(): + assert items[2] == 2 + assert 1 not in items + """) result = testdir.runpytest('-p', 'nose') result.stdout.fnmatch_lines([ - "*1 passed*", + "*2 passed*", ]) From commits-noreply at bitbucket.org Fri Oct 23 14:17:25 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 12:17:25 +0000 (UTC) Subject: [py-svn] py-virtualenv commit 988b3400b47e: nose plugin wont call setup functions that arent made for it Message-ID: <20091023121725.B7D657EF1F@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User Ronny Pfannschmidt # Date 1256307388 -7200 # Node ID 988b3400b47e55cd1fb5e2b3afaa4462831cf51f # Parent 61a03e7e72248ee42f29cbceea1aad052c62362b nose plugin wont call setup functions that arent made for it --- a/_py/test/plugin/pytest_nose.py +++ b/_py/test/plugin/pytest_nose.py @@ -85,7 +85,9 @@ def pytest_make_collect_report(collector def call_optional(obj, name): method = getattr(obj, name, None) if method: - method() - return True - else: - return False + argspec = inspect.getargspec(method) + if argspec[0] == ['self']: + argspec = argspec[1:] + if not any(argspec): + method() + return True From commits-noreply at bitbucket.org Fri Oct 23 14:17:27 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 23 Oct 2009 12:17:27 +0000 (UTC) Subject: [py-svn] py-virtualenv commit af41413664fb: support nose style argument-free setup/teardown functions Message-ID: <20091023121727.7504A7EF56@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User Ronny Pfannschmidt # Date 1256307426 -7200 # Node ID af41413664fbbfdf816ae4a8a8e383283d5c0977 # Parent 988b3400b47e55cd1fb5e2b3afaa4462831cf51f support nose style argument-free setup/teardown functions --- a/_py/test/pycollect.py +++ b/_py/test/pycollect.py @@ -161,13 +161,24 @@ class Module(py.test.collect.File, PyCol def setup(self): if getattr(self.obj, 'disabled', 0): py.test.skip("%r is disabled" %(self.obj,)) - mod = self.obj - if hasattr(mod, 'setup_module'): - self.obj.setup_module(mod) + if hasattr(self.obj, 'setup_module'): + #XXX: nose compat hack, move to nose plugin + # if it takes a positional arg, its probably a py.test style one + # so we pass the current module object + if inspect.getargspec(self.obj.setup_module)[0]: + self.obj.setup_module(self.obj) + else: + self.obj.setup_module() def teardown(self): if hasattr(self.obj, 'teardown_module'): - self.obj.teardown_module(self.obj) + #XXX: nose compat hack, move to nose plugin + # if it takes a positional arg, its probably a py.test style one + # so we pass the current module object + if inspect.getargspec(self.obj.teardown_module)[0]: + self.obj.teardown_module(self.obj) + else: + self.obj.teardown_module() class Class(PyCollectorMixin, py.test.collect.Collector): --- a/testing/pytest/plugin/test_pytest_nose.py +++ b/testing/pytest/plugin/test_pytest_nose.py @@ -115,3 +115,24 @@ def test_module_level_setup(testdir): result.stdout.fnmatch_lines([ "*2 passed*", ]) + +def test_nose_style_setup_teardown(testdir): + testdir.makepyfile(""" + l = [] + def setup_module(): + l.append(1) + + def teardown_module(): + del l[0] + + def test_hello(): + assert l == [1] + + def test_world(): + assert l == [1] + """) + result = testdir.runpytest('-p', 'nose') + result.stdout.fnmatch_lines([ + "*2 passed*", + ]) + From commits-noreply at bitbucket.org Tue Oct 27 09:43:08 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 08:43:08 +0000 (UTC) Subject: [py-svn] py-trunk commit 0530b0720167: removing some py.execnet references and moving scripts to execnet repo Message-ID: <20091027084308.6E7247EF34@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256631563 -3600 # Node ID 0530b07201676100ca553767972ed537aa8883d3 # Parent ded48829fe86317694801bb622a7c2679eb24efd removing some py.execnet references and moving scripts to execnet repo --- a/contrib/sysinfo.py +++ /dev/null @@ -1,139 +0,0 @@ -""" -sysinfo.py [host1] [host2] [options] - -obtain system info from remote machine. -""" - -import py -import sys - - -parser = py.std.optparse.OptionParser(usage=__doc__) -parser.add_option("-f", "--sshconfig", action="store", dest="ssh_config", default=None, - help="use given ssh config file, and add info all contained hosts for getting info") -parser.add_option("-i", "--ignore", action="store", dest="ignores", default=None, - help="ignore hosts (useful if the list of hostnames come from a file list)") - -def parsehosts(path): - path = py.path.local(path) - l = [] - rex = py.std.re.compile(r'Host\s*(\S+)') - for line in path.readlines(): - m = rex.match(line) - if m is not None: - sshname, = m.groups() - l.append(sshname) - return l - -class RemoteInfo: - def __init__(self, gateway): - self.gw = gateway - self._cache = {} - - def exreceive(self, execstring): - if execstring not in self._cache: - channel = self.gw.remote_exec(execstring) - self._cache[execstring] = channel.receive() - return self._cache[execstring] - - def getmodattr(self, modpath): - module = modpath.split(".")[0] - return self.exreceive(""" - import %s - channel.send(%s) - """ %(module, modpath)) - - def islinux(self): - return self.getmodattr('sys.platform').find("linux") != -1 - - def getfqdn(self): - return self.exreceive(""" - import socket - channel.send(socket.getfqdn()) - """) - - def getmemswap(self): - if self.islinux(): - return self.exreceive(""" - import commands, re - out = commands.getoutput("free") - mem = re.search(r"Mem:\s+(\S*)", out).group(1) - swap = re.search(r"Swap:\s+(\S*)", out).group(1) - channel.send((mem, swap)) - """) - - def getcpuinfo(self): - if self.islinux(): - return self.exreceive(""" - # a hyperthreaded cpu core only counts as 1, although it - # is present as 2 in /proc/cpuinfo. Counting it as 2 is - # misleading because it is *by far* not as efficient as - # two independent cores. - cpus = {} - cpuinfo = {} - f = open("/proc/cpuinfo") - lines = f.readlines() - f.close() - for line in lines + ['']: - if line.strip(): - key, value = line.split(":", 1) - cpuinfo[key.strip()] = value.strip() - else: - corekey = (cpuinfo.get("physical id"), - cpuinfo.get("core id")) - cpus[corekey] = 1 - numcpus = len(cpus) - model = cpuinfo.get("model name") - channel.send((numcpus, model)) - """) - -def debug(*args): - print >>sys.stderr, " ".join(map(str, args)) -def error(*args): - debug("ERROR", args[0] + ":", *args[1:]) - -def getinfo(sshname, ssh_config=None, loginfo=sys.stdout): - debug("connecting to", sshname) - try: - gw = execnet.SshGateway(sshname, ssh_config=ssh_config) - except IOError: - error("could not get sshagteway", sshname) - else: - ri = RemoteInfo(gw) - #print "%s info:" % sshname - prefix = sshname.upper() + " " - print >>loginfo, prefix, "fqdn:", ri.getfqdn() - for attr in ( - "sys.platform", - "sys.version_info", - ): - loginfo.write("%s %s: " %(prefix, attr,)) - loginfo.flush() - value = ri.getmodattr(attr) - loginfo.write(str(value)) - loginfo.write("\n") - loginfo.flush() - memswap = ri.getmemswap() - if memswap: - mem,swap = memswap - print >>loginfo, prefix, "Memory:", mem, "Swap:", swap - cpuinfo = ri.getcpuinfo() - if cpuinfo: - numcpu, model = cpuinfo - print >>loginfo, prefix, "number of cpus:", numcpu - print >>loginfo, prefix, "cpu model", model - return ri - -if __name__ == '__main__': - options, args = parser.parse_args() - hosts = list(args) - ssh_config = options.ssh_config - if ssh_config: - hosts.extend(parsehosts(ssh_config)) - ignores = options.ignores or () - if ignores: - ignores = ignores.split(",") - for host in hosts: - if host not in ignores: - getinfo(host, ssh_config=ssh_config) - --- a/contrib/svn-sync-repo.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python - -""" - -small utility for hot-syncing a svn repository through ssh. -uses execnet. - -""" - -import py -import sys, os - -def usage(): - arg0 = sys.argv[0] - print """%s [user@]remote-host:/repo/location localrepo [identity keyfile]""" % (arg0,) - - -def main(args): - remote = args[0] - localrepo = py.path.local(args[1]) - if not localrepo.check(dir=1): - raise SystemExit("localrepo %s does not exist" %(localrepo,)) - if len(args) == 3: - keyfile = py.path.local(args[2]) - else: - keyfile = None - remote_host, path = remote.split(':', 1) - print "ssh-connecting to", remote_host - gw = getgateway(remote_host, keyfile) - - local_rev = get_svn_youngest(localrepo) - - # local protocol - # 1. client sends rev/repo -> server - # 2. server checks for newer revisions and sends dumps - # 3. client receives dumps, updates local repo - # 4. client goes back to step 1 - c = gw.remote_exec(""" - import py - import os - remote_rev, repopath = channel.receive() - while 1: - rev = py.process.cmdexec('svnlook youngest "%s"' % repopath) - rev = int(rev) - if rev > remote_rev: - revrange = (remote_rev+1, rev) - dumpchannel = channel.gateway.newchannel() - channel.send(revrange) - channel.send(dumpchannel) - - f = os.popen( - "svnadmin dump -q --incremental -r %s:%s %s" - % (revrange[0], revrange[1], repopath), 'r') - try: - maxcount = dumpchannel.receive() - count = maxcount - while 1: - s = f.read(8192) - if not s: - raise EOFError - dumpchannel.send(s) - count = count - 1 - if count <= 0: - ack = dumpchannel.receive() - count = maxcount - - except EOFError: - dumpchannel.close() - remote_rev = rev - else: - # using svn-hook instead would be nice here - py.std.time.sleep(30) - """) - - c.send((local_rev, path)) - print "checking revisions from %d in %s" %(local_rev, remote) - while 1: - revstart, revend = c.receive() - dumpchannel = c.receive() - print "receiving revisions", revstart, "-", revend, "replaying..." - svn_load(localrepo, dumpchannel) - print "current revision", revend - -def svn_load(repo, dumpchannel, maxcount=100): - # every maxcount we will send an ACK to the other - # side in order to synchronise and avoid our side - # growing buffers (py.execnet does not control - # RAM usage or receive queue sizes) - dumpchannel.send(maxcount) - f = os.popen("svnadmin load -q %s" %(repo, ), "w") - count = maxcount - for x in dumpchannel: - sys.stdout.write(".") - sys.stdout.flush() - f.write(x) - count = count - 1 - if count <= 0: - dumpchannel.send(maxcount) - count = maxcount - print >>sys.stdout - f.close() - -def get_svn_youngest(repo): - rev = py.process.cmdexec('svnlook youngest "%s"' % repo) - return int(rev) - -def getgateway(host, keyfile=None): - return execnet.SshGateway(host, identity=keyfile) - -if __name__ == '__main__': - if len(sys.argv) < 3: - usage() - raise SystemExit(1) - - main(sys.argv[1:]) - --- a/README.txt +++ b/README.txt @@ -2,7 +2,6 @@ The py lib is a Python development suppo the following tools and modules: * py.test: tool for distributed automated testing -* py.execnet: ad-hoc distributed execution * py.code: dynamic code generation and introspection * py.path: uniform local and svn path objects --- a/doc/confrest.py +++ b/doc/confrest.py @@ -73,7 +73,6 @@ pageTracker._trackPageview(); html.div( html.h3("supporting APIs:"), self.a_docref("pylib index", "index.html"), - self.a_docref("py.execnet", "execnet.html"), self.a_docref("py.path", "path.html"), self.a_docref("py.code", "code.html"), ) From commits-noreply at bitbucket.org Tue Oct 27 09:43:10 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 08:43:10 +0000 (UTC) Subject: [py-svn] py-trunk commit ded48829fe86: refine naming, API and docs for py.test.mark mechanism - now contained in pytest_mark plugin Message-ID: <20091027084310.DBA467EF31@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256237841 -7200 # Node ID ded48829fe86317694801bb622a7c2679eb24efd # Parent 57132bbe20c575c34de83f196f1006cceaa594b8 refine naming, API and docs for py.test.mark mechanism - now contained in pytest_mark plugin --- /dev/null +++ b/_py/test/plugin/pytest_mark.py @@ -0,0 +1,137 @@ +""" +generic mechanism for marking python functions. + +By using the ``py.test.mark`` helper you can instantiate +decorators that will set named meta data on test functions. + +Marking a single function +---------------------------------------------------- + +You can "mark" a test function with meta data like this:: + + @py.test.mark.webtest + def test_send_http(): + ... + +This will set a "Marker" instance as a function attribute named "webtest". +You can also specify parametrized meta data like this:: + + @py.test.mark.webtest(firefox=30) + def test_receive(): + ... + +The named marker can be accessed like this later:: + + test_receive.webtest.kwargs['firefox'] == 30 + +In addition to set key-value pairs you can also use positional arguments:: + + @py.test.mark.webtest("triangular") + def test_receive(): + ... + +and later access it with ``test_receive.webtest.args[0] == 'triangular``. + +.. _`scoped-marking`: + +Marking classes or modules +---------------------------------------------------- + +To mark all methods of a class set a ``pytestmark`` attribute like this:: + + import py + + class TestClass: + pytestmark = py.test.mark.webtest + +You can re-use the same markers that you would use for decorating +a function - in fact this marker decorator will be applied +to all test methods of the class. + +You can also set a module level marker:: + + import py + pytestmark = py.test.mark.webtest + +in which case then the marker decorator will be applied to all functions and +methods defined in the module. + +The order in which marker functions are called is this:: + + per-function (upon import of module already) + per-class + per-module + +Later called markers may overwrite previous key-value settings. +Positional arguments are all appended to the same 'args' list +of the Marker object. +""" +import py + +def pytest_namespace(): + return {'mark': Mark()} + + +class Mark(object): + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError(name) + return MarkerDecorator(name) + +class MarkerDecorator: + """ decorator for setting function attributes. """ + def __init__(self, name): + self.markname = name + self.kwargs = {} + self.args = [] + + def __repr__(self): + d = self.__dict__.copy() + name = d.pop('markname') + return "" %(name, d) + + def __call__(self, *args, **kwargs): + if args: + if len(args) == 1 and hasattr(args[0], '__call__'): + func = args[0] + holder = getattr(func, self.markname, None) + if holder is None: + holder = Marker(self.markname, self.args, self.kwargs) + setattr(func, self.markname, holder) + else: + holder.kwargs.update(self.kwargs) + holder.args.extend(self.args) + return func + else: + self.args.extend(args) + self.kwargs.update(kwargs) + return self + +class Marker: + def __init__(self, name, args, kwargs): + self._name = name + self.args = args + self.kwargs = kwargs + + def __getattr__(self, name): + if name[0] != '_' and name in self.kwargs: + py.log._apiwarn("1.1", "use .kwargs attribute to access key-values") + return self.kwargs[name] + raise AttributeError(name) + + def __repr__(self): + return "" % ( + self._name, self.args, self.kwargs) + + +def pytest_pycollect_makeitem(__multicall__, collector, name, obj): + item = __multicall__.execute() + if isinstance(item, py.test.collect.Function): + cls = collector.getparent(py.test.collect.Class) + mod = collector.getparent(py.test.collect.Module) + func = getattr(item.obj, 'im_func', item.obj) + for parent in [x for x in (mod, cls) if x]: + marker = getattr(parent.obj, 'pytestmark', None) + if isinstance(marker, MarkerDecorator): + marker(func) + return item --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -3,7 +3,6 @@ .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_recwarn.py .. _`unittest`: unittest.html .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_monkeypatch.py -.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_keyword.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html .. _`plugins`: index.html @@ -13,6 +12,7 @@ .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pastebin.py +.. _`mark`: mark.html .. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_figleaf.py .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_hooklog.py .. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_skipping.py @@ -20,6 +20,7 @@ .. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html +.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_mark.py .. _`get in contact`: ../../contact.html .. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_capture.py .. _`figleaf`: figleaf.html @@ -30,7 +31,6 @@ .. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`resultlog`: resultlog.html -.. _`keyword`: keyword.html .. _`django`: django.html .. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_unittest.py .. _`nose`: nose.html --- a/conftest.py +++ b/conftest.py @@ -52,7 +52,7 @@ def pytest_generate_tests(metafunc): multi = getattr(metafunc.function, 'multi', None) if multi is None: return - assert len(multi._kwargs) == 1 - for name, l in multi._kwargs.items(): + assert len(multi.kwargs) == 1 + for name, l in multi.kwargs.items(): for val in l: metafunc.addcall(funcargs={name: val}) --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -13,7 +13,7 @@ plugins = [ ('plugins for generic reporting and failure logging', 'pastebin resultlog terminal',), ('misc plugins / core functionality', - 'helpconfig pdb keyword hooklog') + 'helpconfig pdb mark hooklog') #('internal plugins / core functionality', # #'pdb keyword hooklog runner execnetcleanup # pytester', # 'pdb keyword hooklog runner execnetcleanup' # pytester', --- a/doc/test/features.txt +++ b/doc/test/features.txt @@ -200,7 +200,7 @@ kewords like this:: and then use those keywords to select tests. See the `pytest_keyword`_ plugin for more information. -.. _`pytest_keyword`: plugin/keyword.html +.. _`pytest_keyword`: plugin/mark.html easy to extend ========================================= --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -2,7 +2,7 @@ plugins for Python test functions ================================= -skipping_ advanced conditional skipping for python test functions, classes or modules. +skipping_ advanced skipping for python test functions, classes or modules. figleaf_ write and report coverage data with 'figleaf'. @@ -56,7 +56,7 @@ helpconfig_ provide version info, confte pdb_ interactive debugging with the Python Debugger. -keyword_ mark test functions with keywords that may hold values. +mark_ generic mechanism for marking python functions. hooklog_ log invocations of extension hooks to a file. --- a/doc/test/plugin/skipping.txt +++ b/doc/test/plugin/skipping.txt @@ -2,32 +2,39 @@ pytest_skipping plugin ====================== -advanced conditional skipping for python test functions, classes or modules. +advanced skipping for python test functions, classes or modules. .. contents:: :local: -You can mark functions, classes or modules for for conditional -skipping (skipif) or as expected-to-fail (xfail). The difference -between the two is that 'xfail' will still execute test functions -but it will invert the outcome: a passing test becomes a failure and -a failing test is a semi-passing one. All skip conditions are -reported at the end of test run through the terminal reporter. +With this plugin you can mark test functions for conditional skipping +or as "xfail", expected-to-fail. Skipping a test will avoid running it +while xfail-marked tests will run and result in an inverted outcome: +a pass becomes a failure and a fail becomes a semi-passing one. + +The need for skipping a test is usually connected to a condition. +If a test fails under all conditions then it's probably better +to mark your test as 'xfail'. + +By passing ``--report=xfailed,skipped`` to the terminal reporter +you will see summary information on skips and xfail-run tests +at the end of a test run. .. _skipif: -skip a test function conditionally +mark a test function to be skipped ------------------------------------------- -Here is an example for skipping a test function on Python3:: +Here is an example for skipping a test function when +running on Python3:: @py.test.mark.skipif("sys.version_info >= (3,0)") def test_function(): ... -The 'skipif' marker accepts an **arbitrary python expression** -as a condition. When setting up the test function the condition -is evaluated by calling ``eval(expr, namespace)``. The namespace + +During test function setup the skipif condition is +evaluated by calling ``eval(expr, namespace)``. The namespace contains the ``sys`` and ``os`` modules as well as the test ``config`` object. The latter allows you to skip based on a test configuration value e.g. like this:: @@ -37,71 +44,74 @@ on a test configuration value e.g. like ... -conditionally mark a function as "expected to fail" +mark many test functions at once +-------------------------------------- + +As with all metadata function marking you can do it at +`whole class- or module level`_. Here is an example +for skipping all methods of a test class based on platform:: + + class TestPosixCalls: + pytestmark = py.test.mark.skipif("sys.platform == 'win32'") + + def test_function(self): + # will not be setup or run under 'win32' platform + # + + +.. _`whole class- or module level`: mark.html#scoped-marking + + +mark a test function as expected to fail ------------------------------------------------------- -You can use the ``xfail`` keyword to mark your test functions as -'expected to fail':: +You can use the ``xfail`` marker to indicate that you +expect the test to fail:: @py.test.mark.xfail - def test_hello(): - ... - -This test will be executed but no traceback will be reported -when it fails. Instead terminal reporting will list it in the -"expected to fail" or "unexpectedly passing" sections. -As with skipif_ you may selectively expect a failure -depending on platform:: - - @py.test.mark.xfail("sys.version_info >= (3,0)") def test_function(): ... -skip/xfail a whole test class or module -------------------------------------------- +This test will be run but no traceback will be reported +when it fails. Instead terminal reporting will list it in the +"expected to fail" or "unexpectedly passing" sections. -Instead of marking single functions you can skip -a whole class of tests when running on a specific -platform:: +Same as with skipif_ you can also selectively expect a failure +depending on platform:: - class TestSomething: - skipif = "sys.platform == 'win32'" + @py.test.mark.xfail(if"sys.version_info >= (3,0)") -Or you can mark all test functions as expected -to fail for a specific test configuration:: + def test_function(): + ... - xfail = "config.getvalue('db') == 'mysql'" +skipping on a missing import dependency +-------------------------------------------------- -skip if a dependency cannot be imported ---------------------------------------------- - -You can use a helper to skip on a failing import:: +You can use the following import helper at module level +or within a test or setup function. docutils = py.test.importorskip("docutils") -You can use this helper at module level or within -a test or setup function. - -You can also skip if a library does not come with a high enough version:: +If ``docutils`` cannot be imported here, this will lead to a +skip outcome of the test. You can also skip dependeing if +if a library does not come with a high enough version:: docutils = py.test.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. -dynamically skip from within a test or setup -------------------------------------------------- +imperative skip from within a test or setup function +------------------------------------------------------ -If you want to skip the execution of a test you can call -``py.test.skip()`` within a test, a setup or from a -`funcarg factory`_ function. Example:: +If for some reason you cannot declare skip-conditions +you can also imperatively produce a Skip-outcome from +within test or setup code. Example:: def test_function(): if not valid_config(): py.test.skip("unsuppored configuration") -.. _`funcarg factory`: ../funcargs.html#factory - Start improving this plugin in 30 seconds ========================================= --- /dev/null +++ b/doc/test/plugin/mark.txt @@ -0,0 +1,85 @@ + +pytest_mark plugin +================== + +generic mechanism for marking python functions. + +.. contents:: + :local: + +By using the ``py.test.mark`` helper you can instantiate +decorators that will set named meta data on test functions. + +Marking a single function +---------------------------------------------------- + +You can "mark" a test function with meta data like this:: + + @py.test.mark.webtest + def test_send_http(): + ... + +This will set a "Marker" instance as a function attribute named "webtest". +You can also specify parametrized meta data like this:: + + @py.test.mark.webtest(firefox=30) + def test_receive(): + ... + +The named marker can be accessed like this later:: + + test_receive.webtest.kwargs['firefox'] == 30 + +In addition to set key-value pairs you can also use positional arguments:: + + @py.test.mark.webtest("triangular") + def test_receive(): + ... + +and later access it with ``test_receive.webtest.args[0] == 'triangular``. + +.. _`scoped-marking`: + +Marking classes or modules +---------------------------------------------------- + +To mark all methods of a class set a ``pytestmark`` attribute like this:: + + import py + + class TestClass: + pytestmark = py.test.mark.webtest + +You can re-use the same markers that you would use for decorating +a function - in fact this marker decorator will be applied +to all test methods of the class. + +You can also set a module level marker:: + + import py + pytestmark = py.test.mark.webtest + +in which case then the marker decorator will be applied to all functions and +methods defined in the module. + +The order in which marker functions are called is this:: + + per-function (upon import of module already) + per-class + per-module + +Later called markers may overwrite previous key-value settings. +Positional arguments are all appended to the same 'args' list +of the Marker object. + +Start improving this plugin in 30 seconds +========================================= + + +1. Download `pytest_mark.py`_ plugin source code +2. put it somewhere as ``pytest_mark.py`` into your import path +3. a subsequent ``py.test`` run will use your local version + +Checkout customize_, other plugins_ or `get in contact`_. + +.. include:: links.txt --- a/_py/test/plugin/pytest_keyword.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -mark test functions with keywords that may hold values. - -Marking functions by a decorator ----------------------------------------------------- - -By default, all filename parts and class/function names of a test -function are put into the set of keywords for a given test. You can -specify additional kewords like this:: - - @py.test.mark.webtest - def test_send_http(): - ... - -This will set an attribute 'webtest' to True on the given test function. -You can read the value 'webtest' from the functions __dict__ later. - -You can also set values for an attribute which are put on an empty -dummy object:: - - @py.test.mark.webtest(firefox=30) - def test_receive(): - ... - -after which ``test_receive.webtest.firefox == 30`` holds true. - -In addition to keyword arguments you can also use positional arguments:: - - @py.test.mark.webtest("triangular") - def test_receive(): - ... - -after which ``test_receive.webtest._args[0] == 'triangular`` holds true. - - -.. _`scoped-marking`: - -Marking classes or modules ----------------------------------------------------- - -To mark all methods of a class you can set a class-level attribute:: - - class TestClass: - pytestmark = py.test.mark.webtest - -the marker function will be applied to all test methods. - -If you set a marker it inside a test module like this:: - - pytestmark = py.test.mark.webtest - -the marker will be applied to all functions and methods of -that module. The module marker is applied last. - -Outer ``pytestmark`` keywords will overwrite inner keyword -values. Positional arguments are all appeneded to the -same '_args' list. -""" -import py - -def pytest_namespace(): - return {'mark': Mark()} - - -class Mark(object): - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError(name) - return MarkerDecorator(name) - -class MarkerDecorator: - """ decorator for setting function attributes. """ - def __init__(self, name): - self.markname = name - self.kwargs = {} - self.args = [] - - def __repr__(self): - d = self.__dict__.copy() - name = d.pop('markname') - return "" %(name, d) - - def __call__(self, *args, **kwargs): - if args: - if len(args) == 1 and hasattr(args[0], '__call__'): - func = args[0] - holder = getattr(func, self.markname, None) - if holder is None: - holder = MarkHolder(self.markname, self.args, self.kwargs) - setattr(func, self.markname, holder) - else: - holder.__dict__.update(self.kwargs) - holder._args.extend(self.args) - return func - else: - self.args.extend(args) - self.kwargs.update(kwargs) - return self - -class MarkHolder: - def __init__(self, name, args, kwargs): - self._name = name - self._args = args - self._kwargs = kwargs - self.__dict__.update(kwargs) - - def __repr__(self): - return "" % ( - self._name, self._args, self._kwargs) - - -def pytest_pycollect_makeitem(__multicall__, collector, name, obj): - item = __multicall__.execute() - if isinstance(item, py.test.collect.Function): - cls = collector.getparent(py.test.collect.Class) - mod = collector.getparent(py.test.collect.Module) - func = getattr(item.obj, 'im_func', item.obj) - for parent in [x for x in (mod, cls) if x]: - marker = getattr(parent.obj, 'pytestmark', None) - if isinstance(marker, MarkerDecorator): - marker(func) - return item --- /dev/null +++ b/testing/pytest/plugin/test_pytest_mark.py @@ -0,0 +1,110 @@ +import py +from _py.test.plugin.pytest_mark import Mark + +class TestMark: + def test_pytest_mark_notcallable(self): + mark = Mark() + py.test.raises(TypeError, "mark()") + + def test_pytest_mark_bare(self): + mark = Mark() + def f(): pass + mark.hello(f) + assert f.hello + + def test_pytest_mark_keywords(self): + mark = Mark() + def f(): pass + mark.world(x=3, y=4)(f) + assert f.world + assert f.world.x == 3 + assert f.world.y == 4 + + def test_apply_multiple_and_merge(self): + mark = Mark() + def f(): pass + marker = mark.world + mark.world(x=3)(f) + assert f.world.x == 3 + mark.world(y=4)(f) + assert f.world.x == 3 + assert f.world.y == 4 + mark.world(y=1)(f) + assert f.world.y == 1 + assert len(f.world.args) == 0 + + def test_pytest_mark_positional(self): + mark = Mark() + def f(): pass + mark.world("hello")(f) + assert f.world.args[0] == "hello" + mark.world("world")(f) + + def test_oldstyle_marker_access(self, recwarn): + mark = Mark() + def f(): pass + mark.world(x=1)(f) + assert f.world.x == 1 + assert recwarn.pop() + +class TestFunctional: + def test_mark_per_function(self, testdir): + p = testdir.makepyfile(""" + import py + @py.test.mark.hello + def test_hello(): + assert hasattr(test_hello, 'hello') + """) + result = testdir.runpytest(p) + assert result.stdout.fnmatch_lines(["*passed*"]) + + def test_mark_per_module(self, testdir): + item = testdir.getitem(""" + import py + pytestmark = py.test.mark.hello + def test_func(): + pass + """) + keywords = item.readkeywords() + assert 'hello' in keywords + + def test_mark_per_class(self, testdir): + modcol = testdir.getmodulecol(""" + import py + class TestClass: + pytestmark = py.test.mark.hello + def test_func(self): + assert TestClass.test_func.hello + """) + clscol = modcol.collect()[0] + item = clscol.collect()[0].collect()[0] + keywords = item.readkeywords() + assert 'hello' in keywords + + def test_merging_markers(self, testdir): + p = testdir.makepyfile(""" + import py + pytestmark = py.test.mark.hello("pos1", x=1, y=2) + class TestClass: + # classlevel overrides module level + pytestmark = py.test.mark.hello(x=3) + @py.test.mark.hello("pos0", z=4) + def test_func(self): + pass + """) + items, rec = testdir.inline_genitems(p) + item, = items + keywords = item.readkeywords() + marker = keywords['hello'] + assert marker.args == ["pos0", "pos1"] + assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4} + + def test_mark_other(self, testdir): + item = testdir.getitem(""" + import py + class pytestmark: + pass + def test_func(): + pass + """) + keywords = item.readkeywords() --- a/doc/test/plugin/keyword.txt +++ /dev/null @@ -1,51 +0,0 @@ - -pytest_keyword plugin -===================== - -mark test functions with keywords that may hold values. - -.. contents:: - :local: - -Marking functions and setting rich attributes ----------------------------------------------------- - -By default, all filename parts and class/function names of a test -function are put into the set of keywords for a given test. You can -specify additional kewords like this:: - - @py.test.mark.webtest - def test_send_http(): - ... - -This will set an attribute 'webtest' to True on the given test function. -You can read the value 'webtest' from the functions __dict__ later. - -You can also set values for an attribute which are put on an empty -dummy object:: - - @py.test.mark.webtest(firefox=30) - def test_receive(): - ... - -after which ``test_receive.webtest.firefox == 30`` holds true. - -In addition to keyword arguments you can also use positional arguments:: - - @py.test.mark.webtest("triangular") - def test_receive(): - ... - -after which ``test_receive.webtest._1 == 'triangular`` hold true. - -Start improving this plugin in 30 seconds -========================================= - - -1. Download `pytest_keyword.py`_ plugin source code -2. put it somewhere as ``pytest_keyword.py`` into your import path -3. a subsequent ``py.test`` run will use your local version - -Checkout customize_, other plugins_ or `get in contact`_. - -.. include:: links.txt --- a/doc/test/plugin/terminal.txt +++ b/doc/test/plugin/terminal.txt @@ -13,16 +13,24 @@ command line options -------------------- +``-v, --verbose`` + increase verbosity. +``-l, --showlocals`` + show locals in tracebacks (disabled by default). +``--report=opts`` + comma separated reporting options +``--tb=style`` + traceback verboseness (long/short/no). +``--fulltrace`` + don't cut any tracebacks (default is to cut). ``--collectonly`` only collect tests, don't execute them. ``--traceconfig`` trace considerations of conftest.py files. ``--nomagic`` don't reinterpret asserts, no traceback cutting. -``--fulltrace`` - don't cut any tracebacks (default is to cut). ``--debug`` - generate and show debugging information. + generate and show internal debugging information. Start improving this plugin in 30 seconds ========================================= --- a/doc/test/plugin/capture.txt +++ b/doc/test/plugin/capture.txt @@ -113,10 +113,10 @@ command line options -------------------- +``--capture=method`` + set capturing method during tests: fd (default)|sys|no. ``-s`` shortcut for --capture=no. -``--capture=method`` - set capturing method during tests: fd (default)|sys|no. Start improving this plugin in 30 seconds ========================================= --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -1,16 +1,22 @@ """ -advanced conditional skipping for python test functions, classes or modules. +advanced skipping for python test functions, classes or modules. -You can mark functions, classes or modules for for conditional -skipping (skipif) or as expected-to-fail (xfail). The difference -between the two is that 'xfail' will still execute test functions -but it will invert the outcome: a passing test becomes a failure and -a failing test is a semi-passing one. All skip conditions are -reported at the end of test run through the terminal reporter. +With this plugin you can mark test functions for conditional skipping +or as "xfail", expected-to-fail. Skipping a test will avoid running it +while xfail-marked tests will run and result in an inverted outcome: +a pass becomes a failure and a fail becomes a semi-passing one. + +The need for skipping a test is usually connected to a condition. +If a test fails under all conditions then it's probably better +to mark your test as 'xfail'. + +By passing ``--report=xfailed,skipped`` to the terminal reporter +you will see summary information on skips and xfail-run tests +at the end of a test run. .. _skipif: -skip a test function conditionally +mark a test function to be skipped ------------------------------------------- Here is an example for skipping a test function when @@ -20,6 +26,7 @@ running on Python3:: def test_function(): ... + During test function setup the skipif condition is evaluated by calling ``eval(expr, namespace)``. The namespace contains the ``sys`` and ``os`` modules as well as the @@ -30,76 +37,75 @@ on a test configuration value e.g. like def test_function(...): ... -Note that `test marking can be declared at whole class- or module level`_. -.. _`test marking can also be declared at whole class- or module level`: keyword.html#scoped-marking +mark many test functions at once +-------------------------------------- +As with all metadata function marking you can do it at +`whole class- or module level`_. Here is an example +for skipping all methods of a test class based on platform:: -conditionally mark a function as "expected to fail" + class TestPosixCalls: + pytestmark = py.test.mark.skipif("sys.platform == 'win32'") + + def test_function(self): + # will not be setup or run under 'win32' platform + # + + +.. _`whole class- or module level`: mark.html#scoped-marking + + +mark a test function as expected to fail ------------------------------------------------------- -You can use the ``xfail`` keyword to mark your test functions as -'expected to fail':: +You can use the ``xfail`` marker to indicate that you +expect the test to fail:: @py.test.mark.xfail - def test_hello(): - ... - -This test will be executed but no traceback will be reported -when it fails. Instead terminal reporting will list it in the -"expected to fail" or "unexpectedly passing" sections. -As with skipif_ you may selectively expect a failure -depending on platform:: - - @py.test.mark.xfail("sys.version_info >= (3,0)") def test_function(): ... -skip/xfail a whole test class or module -------------------------------------------- +This test will be run but no traceback will be reported +when it fails. Instead terminal reporting will list it in the +"expected to fail" or "unexpectedly passing" sections. -Instead of marking single functions you can skip -a whole class of tests when running on a specific -platform:: +Same as with skipif_ you can also selectively expect a failure +depending on platform:: - class TestSomething: - skipif = "sys.platform == 'win32'" + @py.test.mark.xfail(if"sys.version_info >= (3,0)") -Or you can mark all test functions as expected -to fail for a specific test configuration:: + def test_function(): + ... - xfail = "config.getvalue('db') == 'mysql'" +skipping on a missing import dependency +-------------------------------------------------- -skip if a dependency cannot be imported ---------------------------------------------- - -You can use a helper to skip on a failing import:: +You can use the following import helper at module level +or within a test or setup function. docutils = py.test.importorskip("docutils") -You can use this helper at module level or within -a test or setup function. - -You can also skip if a library does not come with a high enough version:: +If ``docutils`` cannot be imported here, this will lead to a +skip outcome of the test. You can also skip dependeing if +if a library does not come with a high enough version:: docutils = py.test.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. -dynamically skip from within a test or setup -------------------------------------------------- +imperative skip from within a test or setup function +------------------------------------------------------ -If you want to skip the execution of a test you can call -``py.test.skip()`` within a test, a setup or from a -`funcarg factory`_ function. Example:: +If for some reason you cannot declare skip-conditions +you can also imperatively produce a Skip-outcome from +within test or setup code. Example:: def test_function(): if not valid_config(): py.test.skip("unsuppored configuration") -.. _`funcarg factory`: ../funcargs.html#factory - """ # XXX py.test.skip, .importorskip and the Skipped class # should also be defined in this plugin, requires thought/changes @@ -177,7 +183,7 @@ def evalexpression(item, keyword): if markholder: d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config} expr, result = None, True - for expr in markholder._args: + for expr in markholder.args: if isinstance(expr, str): result = eval(expr, d) else: --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* introduce generalized py.test.mark function marking + * reshuffle / refine command line grouping * deprecate parser.addgroup in favour of getgroup which creates option group --- a/_py/test/defaultconftest.py +++ b/_py/test/defaultconftest.py @@ -10,5 +10,5 @@ Generator = py.test.collect.Generator Function = py.test.collect.Function Instance = py.test.collect.Instance -pytest_plugins = "default runner capture terminal keyword skipping tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split() +pytest_plugins = "default runner capture terminal mark skipping tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split() --- a/testing/pytest/plugin/test_pytest_keyword.py +++ /dev/null @@ -1,105 +0,0 @@ -import py -from _py.test.plugin.pytest_keyword import Mark - -class TestMark: - def test_pytest_mark_notcallable(self): - mark = Mark() - py.test.raises(TypeError, "mark()") - - def test_pytest_mark_bare(self): - mark = Mark() - def f(): pass - mark.hello(f) - assert f.hello - - def test_pytest_mark_keywords(self): - mark = Mark() - def f(): pass - mark.world(x=3, y=4)(f) - assert f.world - assert f.world.x == 3 - assert f.world.y == 4 - - def test_apply_multiple_and_merge(self): - mark = Mark() - def f(): pass - marker = mark.world - mark.world(x=3)(f) - assert f.world.x == 3 - mark.world(y=4)(f) - assert f.world.x == 3 - assert f.world.y == 4 - mark.world(y=1)(f) - assert f.world.y == 1 - assert len(f.world._args) == 0 - - def test_pytest_mark_positional(self): - mark = Mark() - def f(): pass - mark.world("hello")(f) - assert f.world._args[0] == "hello" - mark.world("world")(f) - -class TestFunctional: - def test_mark_per_function(self, testdir): - p = testdir.makepyfile(""" - import py - @py.test.mark.hello - def test_hello(): - assert hasattr(test_hello, 'hello') - """) - result = testdir.runpytest(p) - assert result.stdout.fnmatch_lines(["*passed*"]) - - def test_mark_per_module(self, testdir): - item = testdir.getitem(""" - import py - pytestmark = py.test.mark.hello - def test_func(): - pass - """) - keywords = item.readkeywords() - assert 'hello' in keywords - - def test_mark_per_class(self, testdir): - modcol = testdir.getmodulecol(""" - import py - class TestClass: - pytestmark = py.test.mark.hello - def test_func(self): - assert TestClass.test_func.hello - """) - clscol = modcol.collect()[0] - item = clscol.collect()[0].collect()[0] - keywords = item.readkeywords() - assert 'hello' in keywords - - def test_merging_markers(self, testdir): - p = testdir.makepyfile(""" - import py - pytestmark = py.test.mark.hello("pos1", x=1, y=2) - class TestClass: - # classlevel overrides module level - pytestmark = py.test.mark.hello(x=3) - @py.test.mark.hello("pos0", z=4) - def test_func(self): - pass - """) - items, rec = testdir.inline_genitems(p) - item, = items - keywords = item.readkeywords() - marker = keywords['hello'] - assert marker._args == ["pos0", "pos1"] - assert marker.x == 3 - assert marker.y == 2 - assert marker.z == 4 - - def test_mark_other(self, testdir): - item = testdir.getitem(""" - import py - class pytestmark: - pass - def test_func(): - pass - """) - keywords = item.readkeywords() From commits-noreply at bitbucket.org Tue Oct 27 10:05:17 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 09:05:17 +0000 (UTC) Subject: [py-svn] py-trunk commit f9ae279ff5aa: remove deprecated parser.addgroup usage in favour of getgroup Message-ID: <20091027090517.89C277EF31@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256634191 -3600 # Node ID f9ae279ff5aadd4369ede09806742edd6b2cc10e # Parent 0530b07201676100ca553767972ed537aa8883d3 remove deprecated parser.addgroup usage in favour of getgroup --- a/testing/pytest/test_config.py +++ b/testing/pytest/test_config.py @@ -23,7 +23,7 @@ class TestConfigCmdlineParsing: def test_parser_addoption_default_env(self, testdir, monkeypatch): import os config = testdir.Config() - group = config._parser.addgroup("hello") + group = config._parser.getgroup("hello") monkeypatch.setitem(os.environ, 'PYTEST_OPTION_OPTION1', 'True') group.addoption("--option1", action="store_true") --- a/_py/test/plugin/pytest_restdoc.py +++ b/_py/test/plugin/pytest_restdoc.py @@ -5,7 +5,7 @@ import py import sys def pytest_addoption(parser): - group = parser.addgroup("ReST", "ReST documentation check options") + group = parser.getgroup("ReST", "ReST documentation check options") group.addoption('-R', '--urlcheck', action="store_true", dest="urlcheck", default=False, help="urlopen() remote links found in ReST text files.") --- a/_py/test/plugin/pytest_pylint.py +++ b/_py/test/plugin/pytest_pylint.py @@ -7,7 +7,7 @@ import py pylint = py.test.importorskip("pylint.lint") def pytest_addoption(parser): - group = parser.addgroup('pylint options') + group = parser.getgroup('pylint options') group.addoption('--pylint', action='store_true', default=False, dest='pylint', help='run pylint on python files.') --- a/_py/test/plugin/pytest_figleaf.py +++ b/_py/test/plugin/pytest_figleaf.py @@ -8,7 +8,7 @@ py.test.importorskip("figleaf.annotate_h import figleaf def pytest_addoption(parser): - group = parser.addgroup('figleaf options') + group = parser.getgroup('figleaf options') group.addoption('-F', action='store_true', default=False, dest = 'figleaf', help=('trace python coverage with figleaf and write HTML ' --- a/example/funcarg/urloption/conftest.py +++ b/example/funcarg/urloption/conftest.py @@ -3,7 +3,7 @@ import py def pytest_addoption(parser): - grp = parser.addgroup("testserver options") + grp = parser.getgroup("testserver options") grp.addoption("--url", action="store", default=None, help="url for testserver") --- a/testing/pytest/test_pickling.py +++ b/testing/pytest/test_pickling.py @@ -89,7 +89,7 @@ class TestConfigPickling: def test_config_pickling_customoption(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): - group = parser.addgroup("testing group") + group = parser.getgroup("testing group") group.addoption('-G', '--glong', action="store", default=42, type="int", dest="gdest", help="g value.") """) @@ -109,7 +109,7 @@ class TestConfigPickling: tmp.ensure("__init__.py") tmp.join("conftest.py").write(py.code.Source(""" def pytest_addoption(parser): - group = parser.addgroup("testing group") + group = parser.getgroup("testing group") group.addoption('-G', '--glong', action="store", default=42, type="int", dest="gdest", help="g value.") """)) --- a/_py/test/plugin/pytest_resultlog.py +++ b/_py/test/plugin/pytest_resultlog.py @@ -6,7 +6,7 @@ import py from py.builtin import print_ def pytest_addoption(parser): - group = parser.addgroup("resultlog", "resultlog plugin options") + group = parser.getgroup("resultlog", "resultlog plugin options") group.addoption('--resultlog', action="store", dest="resultlog", metavar="path", default=None, help="path for machine-readable result log.") From commits-noreply at bitbucket.org Tue Oct 27 12:26:19 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 11:26:19 +0000 (UTC) Subject: [py-svn] py-trunk commit 0c477e1fc2ff: fix bug: a false xfail expression would erranonously report XPASS on failures Message-ID: <20091027112619.893887EF34@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256641360 -3600 # Node ID 0c477e1fc2ffc220438af98fcac4c464c50a0bd7 # Parent f9ae279ff5aadd4369ede09806742edd6b2cc10e fix bug: a false xfail expression would erranonously report XPASS on failures --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -120,18 +120,20 @@ def pytest_runtest_setup(item): def pytest_runtest_makereport(__multicall__, item, call): if call.when != "call": return - if hasattr(item, 'obj'): - expr, result = evalexpression(item, 'xfail') - if result: - rep = __multicall__.execute() - if call.excinfo: - rep.skipped = True - rep.failed = rep.passed = False - else: - rep.skipped = rep.passed = False - rep.failed = True - rep.keywords['xfail'] = True # expr - return rep + expr, result = evalexpression(item, 'xfail') + rep = __multicall__.execute() + if result: + if call.excinfo: + rep.skipped = True + rep.failed = rep.passed = False + else: + rep.skipped = rep.passed = False + rep.failed = True + rep.keywords['xfail'] = expr + else: + if 'xfail' in rep.keywords: + del rep.keywords['xfail'] + return rep # called by terminalreporter progress reporting def pytest_report_teststatus(report): --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -59,6 +59,20 @@ def test_xfail_at_module(testdir): ]) assert result.ret == 0 +def test_xfail_evalfalse_but_fails(testdir): + p = testdir.makepyfile(""" + import py + @py.test.mark.xfail('False') + def test_fail(): + assert 0 + """) + result = testdir.runpytest(p, '--report=xfailed') + extra = result.stdout.fnmatch_lines([ + "*test_xfail_evalfalse_but_fails*:4*", + "*1 failed*" + ]) + assert result.ret == 1 + def test_skipif_decorator(testdir): p = testdir.makepyfile(""" import py From commits-noreply at bitbucket.org Tue Oct 27 16:03:46 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:03:46 +0000 (UTC) Subject: [py-svn] py-trunk commit b8da724503b5: fix "py.cleanup -d" - add test and check to only remove empty dirs (!) Message-ID: <20091027150346.056F77EF41@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256655794 -3600 # Node ID b8da724503b52dffac09cc6fa30ab6a71c6f92a2 # Parent 0c477e1fc2ffc220438af98fcac4c464c50a0bd7 fix "py.cleanup -d" - add test and check to only remove empty dirs (!) --- a/_py/cmdline/pycleanup.py +++ b/_py/cmdline/pycleanup.py @@ -35,7 +35,8 @@ def main(): if options.removedir: for x in path.visit(lambda x: x.check(dir=1), lambda x: x.check(dotfile=0, link=0)): - remove(x, options) + if not x.listdir(): + remove(x, options) def remove(path, options): if options.dryrun: --- a/testing/cmdline/test_cmdline.py +++ b/testing/cmdline/test_cmdline.py @@ -38,12 +38,14 @@ class TestPyCleanup: result = testdir.runpybin("py.cleanup", tmpdir) assert not pyc.check() - def test_dir_remove(self, testdir, tmpdir): - p = tmpdir.mkdir("a") - result = testdir.runpybin("py.cleanup", tmpdir) + def test_dir_remove_simple(self, testdir, tmpdir): + subdir = tmpdir.mkdir("subdir") + p = subdir.ensure("file") + result = testdir.runpybin("py.cleanup", "-d", tmpdir) assert result.ret == 0 - assert p.check() + assert subdir.check() + p.remove() + p = tmpdir.mkdir("hello") result = testdir.runpybin("py.cleanup", tmpdir, '-d') assert result.ret == 0 - assert not p.check() - + assert not subdir.check() From commits-noreply at bitbucket.org Tue Oct 27 16:23:38 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:23:38 +0000 (UTC) Subject: [py-svn] py-virtualenv commit 0530b0720167: removing some py.execnet references and moving scripts to execnet repo Message-ID: <20091027152338.C0F1C7EF37@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256631563 -3600 # Node ID 0530b07201676100ca553767972ed537aa8883d3 # Parent ded48829fe86317694801bb622a7c2679eb24efd removing some py.execnet references and moving scripts to execnet repo --- a/contrib/sysinfo.py +++ /dev/null @@ -1,139 +0,0 @@ -""" -sysinfo.py [host1] [host2] [options] - -obtain system info from remote machine. -""" - -import py -import sys - - -parser = py.std.optparse.OptionParser(usage=__doc__) -parser.add_option("-f", "--sshconfig", action="store", dest="ssh_config", default=None, - help="use given ssh config file, and add info all contained hosts for getting info") -parser.add_option("-i", "--ignore", action="store", dest="ignores", default=None, - help="ignore hosts (useful if the list of hostnames come from a file list)") - -def parsehosts(path): - path = py.path.local(path) - l = [] - rex = py.std.re.compile(r'Host\s*(\S+)') - for line in path.readlines(): - m = rex.match(line) - if m is not None: - sshname, = m.groups() - l.append(sshname) - return l - -class RemoteInfo: - def __init__(self, gateway): - self.gw = gateway - self._cache = {} - - def exreceive(self, execstring): - if execstring not in self._cache: - channel = self.gw.remote_exec(execstring) - self._cache[execstring] = channel.receive() - return self._cache[execstring] - - def getmodattr(self, modpath): - module = modpath.split(".")[0] - return self.exreceive(""" - import %s - channel.send(%s) - """ %(module, modpath)) - - def islinux(self): - return self.getmodattr('sys.platform').find("linux") != -1 - - def getfqdn(self): - return self.exreceive(""" - import socket - channel.send(socket.getfqdn()) - """) - - def getmemswap(self): - if self.islinux(): - return self.exreceive(""" - import commands, re - out = commands.getoutput("free") - mem = re.search(r"Mem:\s+(\S*)", out).group(1) - swap = re.search(r"Swap:\s+(\S*)", out).group(1) - channel.send((mem, swap)) - """) - - def getcpuinfo(self): - if self.islinux(): - return self.exreceive(""" - # a hyperthreaded cpu core only counts as 1, although it - # is present as 2 in /proc/cpuinfo. Counting it as 2 is - # misleading because it is *by far* not as efficient as - # two independent cores. - cpus = {} - cpuinfo = {} - f = open("/proc/cpuinfo") - lines = f.readlines() - f.close() - for line in lines + ['']: - if line.strip(): - key, value = line.split(":", 1) - cpuinfo[key.strip()] = value.strip() - else: - corekey = (cpuinfo.get("physical id"), - cpuinfo.get("core id")) - cpus[corekey] = 1 - numcpus = len(cpus) - model = cpuinfo.get("model name") - channel.send((numcpus, model)) - """) - -def debug(*args): - print >>sys.stderr, " ".join(map(str, args)) -def error(*args): - debug("ERROR", args[0] + ":", *args[1:]) - -def getinfo(sshname, ssh_config=None, loginfo=sys.stdout): - debug("connecting to", sshname) - try: - gw = execnet.SshGateway(sshname, ssh_config=ssh_config) - except IOError: - error("could not get sshagteway", sshname) - else: - ri = RemoteInfo(gw) - #print "%s info:" % sshname - prefix = sshname.upper() + " " - print >>loginfo, prefix, "fqdn:", ri.getfqdn() - for attr in ( - "sys.platform", - "sys.version_info", - ): - loginfo.write("%s %s: " %(prefix, attr,)) - loginfo.flush() - value = ri.getmodattr(attr) - loginfo.write(str(value)) - loginfo.write("\n") - loginfo.flush() - memswap = ri.getmemswap() - if memswap: - mem,swap = memswap - print >>loginfo, prefix, "Memory:", mem, "Swap:", swap - cpuinfo = ri.getcpuinfo() - if cpuinfo: - numcpu, model = cpuinfo - print >>loginfo, prefix, "number of cpus:", numcpu - print >>loginfo, prefix, "cpu model", model - return ri - -if __name__ == '__main__': - options, args = parser.parse_args() - hosts = list(args) - ssh_config = options.ssh_config - if ssh_config: - hosts.extend(parsehosts(ssh_config)) - ignores = options.ignores or () - if ignores: - ignores = ignores.split(",") - for host in hosts: - if host not in ignores: - getinfo(host, ssh_config=ssh_config) - --- a/contrib/svn-sync-repo.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python - -""" - -small utility for hot-syncing a svn repository through ssh. -uses execnet. - -""" - -import py -import sys, os - -def usage(): - arg0 = sys.argv[0] - print """%s [user@]remote-host:/repo/location localrepo [identity keyfile]""" % (arg0,) - - -def main(args): - remote = args[0] - localrepo = py.path.local(args[1]) - if not localrepo.check(dir=1): - raise SystemExit("localrepo %s does not exist" %(localrepo,)) - if len(args) == 3: - keyfile = py.path.local(args[2]) - else: - keyfile = None - remote_host, path = remote.split(':', 1) - print "ssh-connecting to", remote_host - gw = getgateway(remote_host, keyfile) - - local_rev = get_svn_youngest(localrepo) - - # local protocol - # 1. client sends rev/repo -> server - # 2. server checks for newer revisions and sends dumps - # 3. client receives dumps, updates local repo - # 4. client goes back to step 1 - c = gw.remote_exec(""" - import py - import os - remote_rev, repopath = channel.receive() - while 1: - rev = py.process.cmdexec('svnlook youngest "%s"' % repopath) - rev = int(rev) - if rev > remote_rev: - revrange = (remote_rev+1, rev) - dumpchannel = channel.gateway.newchannel() - channel.send(revrange) - channel.send(dumpchannel) - - f = os.popen( - "svnadmin dump -q --incremental -r %s:%s %s" - % (revrange[0], revrange[1], repopath), 'r') - try: - maxcount = dumpchannel.receive() - count = maxcount - while 1: - s = f.read(8192) - if not s: - raise EOFError - dumpchannel.send(s) - count = count - 1 - if count <= 0: - ack = dumpchannel.receive() - count = maxcount - - except EOFError: - dumpchannel.close() - remote_rev = rev - else: - # using svn-hook instead would be nice here - py.std.time.sleep(30) - """) - - c.send((local_rev, path)) - print "checking revisions from %d in %s" %(local_rev, remote) - while 1: - revstart, revend = c.receive() - dumpchannel = c.receive() - print "receiving revisions", revstart, "-", revend, "replaying..." - svn_load(localrepo, dumpchannel) - print "current revision", revend - -def svn_load(repo, dumpchannel, maxcount=100): - # every maxcount we will send an ACK to the other - # side in order to synchronise and avoid our side - # growing buffers (py.execnet does not control - # RAM usage or receive queue sizes) - dumpchannel.send(maxcount) - f = os.popen("svnadmin load -q %s" %(repo, ), "w") - count = maxcount - for x in dumpchannel: - sys.stdout.write(".") - sys.stdout.flush() - f.write(x) - count = count - 1 - if count <= 0: - dumpchannel.send(maxcount) - count = maxcount - print >>sys.stdout - f.close() - -def get_svn_youngest(repo): - rev = py.process.cmdexec('svnlook youngest "%s"' % repo) - return int(rev) - -def getgateway(host, keyfile=None): - return execnet.SshGateway(host, identity=keyfile) - -if __name__ == '__main__': - if len(sys.argv) < 3: - usage() - raise SystemExit(1) - - main(sys.argv[1:]) - --- a/README.txt +++ b/README.txt @@ -2,7 +2,6 @@ The py lib is a Python development suppo the following tools and modules: * py.test: tool for distributed automated testing -* py.execnet: ad-hoc distributed execution * py.code: dynamic code generation and introspection * py.path: uniform local and svn path objects --- a/doc/confrest.py +++ b/doc/confrest.py @@ -73,7 +73,6 @@ pageTracker._trackPageview(); html.div( html.h3("supporting APIs:"), self.a_docref("pylib index", "index.html"), - self.a_docref("py.execnet", "execnet.html"), self.a_docref("py.path", "path.html"), self.a_docref("py.code", "code.html"), ) From commits-noreply at bitbucket.org Tue Oct 27 16:23:38 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:23:38 +0000 (UTC) Subject: [py-svn] py-virtualenv commit ded48829fe86: refine naming, API and docs for py.test.mark mechanism - now contained in pytest_mark plugin Message-ID: <20091027152338.AACBC7EF08@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256237841 -7200 # Node ID ded48829fe86317694801bb622a7c2679eb24efd # Parent 57132bbe20c575c34de83f196f1006cceaa594b8 refine naming, API and docs for py.test.mark mechanism - now contained in pytest_mark plugin --- /dev/null +++ b/_py/test/plugin/pytest_mark.py @@ -0,0 +1,137 @@ +""" +generic mechanism for marking python functions. + +By using the ``py.test.mark`` helper you can instantiate +decorators that will set named meta data on test functions. + +Marking a single function +---------------------------------------------------- + +You can "mark" a test function with meta data like this:: + + @py.test.mark.webtest + def test_send_http(): + ... + +This will set a "Marker" instance as a function attribute named "webtest". +You can also specify parametrized meta data like this:: + + @py.test.mark.webtest(firefox=30) + def test_receive(): + ... + +The named marker can be accessed like this later:: + + test_receive.webtest.kwargs['firefox'] == 30 + +In addition to set key-value pairs you can also use positional arguments:: + + @py.test.mark.webtest("triangular") + def test_receive(): + ... + +and later access it with ``test_receive.webtest.args[0] == 'triangular``. + +.. _`scoped-marking`: + +Marking classes or modules +---------------------------------------------------- + +To mark all methods of a class set a ``pytestmark`` attribute like this:: + + import py + + class TestClass: + pytestmark = py.test.mark.webtest + +You can re-use the same markers that you would use for decorating +a function - in fact this marker decorator will be applied +to all test methods of the class. + +You can also set a module level marker:: + + import py + pytestmark = py.test.mark.webtest + +in which case then the marker decorator will be applied to all functions and +methods defined in the module. + +The order in which marker functions are called is this:: + + per-function (upon import of module already) + per-class + per-module + +Later called markers may overwrite previous key-value settings. +Positional arguments are all appended to the same 'args' list +of the Marker object. +""" +import py + +def pytest_namespace(): + return {'mark': Mark()} + + +class Mark(object): + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError(name) + return MarkerDecorator(name) + +class MarkerDecorator: + """ decorator for setting function attributes. """ + def __init__(self, name): + self.markname = name + self.kwargs = {} + self.args = [] + + def __repr__(self): + d = self.__dict__.copy() + name = d.pop('markname') + return "" %(name, d) + + def __call__(self, *args, **kwargs): + if args: + if len(args) == 1 and hasattr(args[0], '__call__'): + func = args[0] + holder = getattr(func, self.markname, None) + if holder is None: + holder = Marker(self.markname, self.args, self.kwargs) + setattr(func, self.markname, holder) + else: + holder.kwargs.update(self.kwargs) + holder.args.extend(self.args) + return func + else: + self.args.extend(args) + self.kwargs.update(kwargs) + return self + +class Marker: + def __init__(self, name, args, kwargs): + self._name = name + self.args = args + self.kwargs = kwargs + + def __getattr__(self, name): + if name[0] != '_' and name in self.kwargs: + py.log._apiwarn("1.1", "use .kwargs attribute to access key-values") + return self.kwargs[name] + raise AttributeError(name) + + def __repr__(self): + return "" % ( + self._name, self.args, self.kwargs) + + +def pytest_pycollect_makeitem(__multicall__, collector, name, obj): + item = __multicall__.execute() + if isinstance(item, py.test.collect.Function): + cls = collector.getparent(py.test.collect.Class) + mod = collector.getparent(py.test.collect.Module) + func = getattr(item.obj, 'im_func', item.obj) + for parent in [x for x in (mod, cls) if x]: + marker = getattr(parent.obj, 'pytestmark', None) + if isinstance(marker, MarkerDecorator): + marker(func) + return item --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -3,7 +3,6 @@ .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_recwarn.py .. _`unittest`: unittest.html .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_monkeypatch.py -.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_keyword.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html .. _`plugins`: index.html @@ -13,6 +12,7 @@ .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pastebin.py +.. _`mark`: mark.html .. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_figleaf.py .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_hooklog.py .. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_skipping.py @@ -20,6 +20,7 @@ .. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html +.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_mark.py .. _`get in contact`: ../../contact.html .. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_capture.py .. _`figleaf`: figleaf.html @@ -30,7 +31,6 @@ .. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`resultlog`: resultlog.html -.. _`keyword`: keyword.html .. _`django`: django.html .. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_unittest.py .. _`nose`: nose.html --- a/conftest.py +++ b/conftest.py @@ -52,7 +52,7 @@ def pytest_generate_tests(metafunc): multi = getattr(metafunc.function, 'multi', None) if multi is None: return - assert len(multi._kwargs) == 1 - for name, l in multi._kwargs.items(): + assert len(multi.kwargs) == 1 + for name, l in multi.kwargs.items(): for val in l: metafunc.addcall(funcargs={name: val}) --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -13,7 +13,7 @@ plugins = [ ('plugins for generic reporting and failure logging', 'pastebin resultlog terminal',), ('misc plugins / core functionality', - 'helpconfig pdb keyword hooklog') + 'helpconfig pdb mark hooklog') #('internal plugins / core functionality', # #'pdb keyword hooklog runner execnetcleanup # pytester', # 'pdb keyword hooklog runner execnetcleanup' # pytester', --- a/doc/test/features.txt +++ b/doc/test/features.txt @@ -200,7 +200,7 @@ kewords like this:: and then use those keywords to select tests. See the `pytest_keyword`_ plugin for more information. -.. _`pytest_keyword`: plugin/keyword.html +.. _`pytest_keyword`: plugin/mark.html easy to extend ========================================= --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -2,7 +2,7 @@ plugins for Python test functions ================================= -skipping_ advanced conditional skipping for python test functions, classes or modules. +skipping_ advanced skipping for python test functions, classes or modules. figleaf_ write and report coverage data with 'figleaf'. @@ -56,7 +56,7 @@ helpconfig_ provide version info, confte pdb_ interactive debugging with the Python Debugger. -keyword_ mark test functions with keywords that may hold values. +mark_ generic mechanism for marking python functions. hooklog_ log invocations of extension hooks to a file. --- a/doc/test/plugin/skipping.txt +++ b/doc/test/plugin/skipping.txt @@ -2,32 +2,39 @@ pytest_skipping plugin ====================== -advanced conditional skipping for python test functions, classes or modules. +advanced skipping for python test functions, classes or modules. .. contents:: :local: -You can mark functions, classes or modules for for conditional -skipping (skipif) or as expected-to-fail (xfail). The difference -between the two is that 'xfail' will still execute test functions -but it will invert the outcome: a passing test becomes a failure and -a failing test is a semi-passing one. All skip conditions are -reported at the end of test run through the terminal reporter. +With this plugin you can mark test functions for conditional skipping +or as "xfail", expected-to-fail. Skipping a test will avoid running it +while xfail-marked tests will run and result in an inverted outcome: +a pass becomes a failure and a fail becomes a semi-passing one. + +The need for skipping a test is usually connected to a condition. +If a test fails under all conditions then it's probably better +to mark your test as 'xfail'. + +By passing ``--report=xfailed,skipped`` to the terminal reporter +you will see summary information on skips and xfail-run tests +at the end of a test run. .. _skipif: -skip a test function conditionally +mark a test function to be skipped ------------------------------------------- -Here is an example for skipping a test function on Python3:: +Here is an example for skipping a test function when +running on Python3:: @py.test.mark.skipif("sys.version_info >= (3,0)") def test_function(): ... -The 'skipif' marker accepts an **arbitrary python expression** -as a condition. When setting up the test function the condition -is evaluated by calling ``eval(expr, namespace)``. The namespace + +During test function setup the skipif condition is +evaluated by calling ``eval(expr, namespace)``. The namespace contains the ``sys`` and ``os`` modules as well as the test ``config`` object. The latter allows you to skip based on a test configuration value e.g. like this:: @@ -37,71 +44,74 @@ on a test configuration value e.g. like ... -conditionally mark a function as "expected to fail" +mark many test functions at once +-------------------------------------- + +As with all metadata function marking you can do it at +`whole class- or module level`_. Here is an example +for skipping all methods of a test class based on platform:: + + class TestPosixCalls: + pytestmark = py.test.mark.skipif("sys.platform == 'win32'") + + def test_function(self): + # will not be setup or run under 'win32' platform + # + + +.. _`whole class- or module level`: mark.html#scoped-marking + + +mark a test function as expected to fail ------------------------------------------------------- -You can use the ``xfail`` keyword to mark your test functions as -'expected to fail':: +You can use the ``xfail`` marker to indicate that you +expect the test to fail:: @py.test.mark.xfail - def test_hello(): - ... - -This test will be executed but no traceback will be reported -when it fails. Instead terminal reporting will list it in the -"expected to fail" or "unexpectedly passing" sections. -As with skipif_ you may selectively expect a failure -depending on platform:: - - @py.test.mark.xfail("sys.version_info >= (3,0)") def test_function(): ... -skip/xfail a whole test class or module -------------------------------------------- +This test will be run but no traceback will be reported +when it fails. Instead terminal reporting will list it in the +"expected to fail" or "unexpectedly passing" sections. -Instead of marking single functions you can skip -a whole class of tests when running on a specific -platform:: +Same as with skipif_ you can also selectively expect a failure +depending on platform:: - class TestSomething: - skipif = "sys.platform == 'win32'" + @py.test.mark.xfail(if"sys.version_info >= (3,0)") -Or you can mark all test functions as expected -to fail for a specific test configuration:: + def test_function(): + ... - xfail = "config.getvalue('db') == 'mysql'" +skipping on a missing import dependency +-------------------------------------------------- -skip if a dependency cannot be imported ---------------------------------------------- - -You can use a helper to skip on a failing import:: +You can use the following import helper at module level +or within a test or setup function. docutils = py.test.importorskip("docutils") -You can use this helper at module level or within -a test or setup function. - -You can also skip if a library does not come with a high enough version:: +If ``docutils`` cannot be imported here, this will lead to a +skip outcome of the test. You can also skip dependeing if +if a library does not come with a high enough version:: docutils = py.test.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. -dynamically skip from within a test or setup -------------------------------------------------- +imperative skip from within a test or setup function +------------------------------------------------------ -If you want to skip the execution of a test you can call -``py.test.skip()`` within a test, a setup or from a -`funcarg factory`_ function. Example:: +If for some reason you cannot declare skip-conditions +you can also imperatively produce a Skip-outcome from +within test or setup code. Example:: def test_function(): if not valid_config(): py.test.skip("unsuppored configuration") -.. _`funcarg factory`: ../funcargs.html#factory - Start improving this plugin in 30 seconds ========================================= --- /dev/null +++ b/doc/test/plugin/mark.txt @@ -0,0 +1,85 @@ + +pytest_mark plugin +================== + +generic mechanism for marking python functions. + +.. contents:: + :local: + +By using the ``py.test.mark`` helper you can instantiate +decorators that will set named meta data on test functions. + +Marking a single function +---------------------------------------------------- + +You can "mark" a test function with meta data like this:: + + @py.test.mark.webtest + def test_send_http(): + ... + +This will set a "Marker" instance as a function attribute named "webtest". +You can also specify parametrized meta data like this:: + + @py.test.mark.webtest(firefox=30) + def test_receive(): + ... + +The named marker can be accessed like this later:: + + test_receive.webtest.kwargs['firefox'] == 30 + +In addition to set key-value pairs you can also use positional arguments:: + + @py.test.mark.webtest("triangular") + def test_receive(): + ... + +and later access it with ``test_receive.webtest.args[0] == 'triangular``. + +.. _`scoped-marking`: + +Marking classes or modules +---------------------------------------------------- + +To mark all methods of a class set a ``pytestmark`` attribute like this:: + + import py + + class TestClass: + pytestmark = py.test.mark.webtest + +You can re-use the same markers that you would use for decorating +a function - in fact this marker decorator will be applied +to all test methods of the class. + +You can also set a module level marker:: + + import py + pytestmark = py.test.mark.webtest + +in which case then the marker decorator will be applied to all functions and +methods defined in the module. + +The order in which marker functions are called is this:: + + per-function (upon import of module already) + per-class + per-module + +Later called markers may overwrite previous key-value settings. +Positional arguments are all appended to the same 'args' list +of the Marker object. + +Start improving this plugin in 30 seconds +========================================= + + +1. Download `pytest_mark.py`_ plugin source code +2. put it somewhere as ``pytest_mark.py`` into your import path +3. a subsequent ``py.test`` run will use your local version + +Checkout customize_, other plugins_ or `get in contact`_. + +.. include:: links.txt --- a/_py/test/plugin/pytest_keyword.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -mark test functions with keywords that may hold values. - -Marking functions by a decorator ----------------------------------------------------- - -By default, all filename parts and class/function names of a test -function are put into the set of keywords for a given test. You can -specify additional kewords like this:: - - @py.test.mark.webtest - def test_send_http(): - ... - -This will set an attribute 'webtest' to True on the given test function. -You can read the value 'webtest' from the functions __dict__ later. - -You can also set values for an attribute which are put on an empty -dummy object:: - - @py.test.mark.webtest(firefox=30) - def test_receive(): - ... - -after which ``test_receive.webtest.firefox == 30`` holds true. - -In addition to keyword arguments you can also use positional arguments:: - - @py.test.mark.webtest("triangular") - def test_receive(): - ... - -after which ``test_receive.webtest._args[0] == 'triangular`` holds true. - - -.. _`scoped-marking`: - -Marking classes or modules ----------------------------------------------------- - -To mark all methods of a class you can set a class-level attribute:: - - class TestClass: - pytestmark = py.test.mark.webtest - -the marker function will be applied to all test methods. - -If you set a marker it inside a test module like this:: - - pytestmark = py.test.mark.webtest - -the marker will be applied to all functions and methods of -that module. The module marker is applied last. - -Outer ``pytestmark`` keywords will overwrite inner keyword -values. Positional arguments are all appeneded to the -same '_args' list. -""" -import py - -def pytest_namespace(): - return {'mark': Mark()} - - -class Mark(object): - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError(name) - return MarkerDecorator(name) - -class MarkerDecorator: - """ decorator for setting function attributes. """ - def __init__(self, name): - self.markname = name - self.kwargs = {} - self.args = [] - - def __repr__(self): - d = self.__dict__.copy() - name = d.pop('markname') - return "" %(name, d) - - def __call__(self, *args, **kwargs): - if args: - if len(args) == 1 and hasattr(args[0], '__call__'): - func = args[0] - holder = getattr(func, self.markname, None) - if holder is None: - holder = MarkHolder(self.markname, self.args, self.kwargs) - setattr(func, self.markname, holder) - else: - holder.__dict__.update(self.kwargs) - holder._args.extend(self.args) - return func - else: - self.args.extend(args) - self.kwargs.update(kwargs) - return self - -class MarkHolder: - def __init__(self, name, args, kwargs): - self._name = name - self._args = args - self._kwargs = kwargs - self.__dict__.update(kwargs) - - def __repr__(self): - return "" % ( - self._name, self._args, self._kwargs) - - -def pytest_pycollect_makeitem(__multicall__, collector, name, obj): - item = __multicall__.execute() - if isinstance(item, py.test.collect.Function): - cls = collector.getparent(py.test.collect.Class) - mod = collector.getparent(py.test.collect.Module) - func = getattr(item.obj, 'im_func', item.obj) - for parent in [x for x in (mod, cls) if x]: - marker = getattr(parent.obj, 'pytestmark', None) - if isinstance(marker, MarkerDecorator): - marker(func) - return item --- /dev/null +++ b/testing/pytest/plugin/test_pytest_mark.py @@ -0,0 +1,110 @@ +import py +from _py.test.plugin.pytest_mark import Mark + +class TestMark: + def test_pytest_mark_notcallable(self): + mark = Mark() + py.test.raises(TypeError, "mark()") + + def test_pytest_mark_bare(self): + mark = Mark() + def f(): pass + mark.hello(f) + assert f.hello + + def test_pytest_mark_keywords(self): + mark = Mark() + def f(): pass + mark.world(x=3, y=4)(f) + assert f.world + assert f.world.x == 3 + assert f.world.y == 4 + + def test_apply_multiple_and_merge(self): + mark = Mark() + def f(): pass + marker = mark.world + mark.world(x=3)(f) + assert f.world.x == 3 + mark.world(y=4)(f) + assert f.world.x == 3 + assert f.world.y == 4 + mark.world(y=1)(f) + assert f.world.y == 1 + assert len(f.world.args) == 0 + + def test_pytest_mark_positional(self): + mark = Mark() + def f(): pass + mark.world("hello")(f) + assert f.world.args[0] == "hello" + mark.world("world")(f) + + def test_oldstyle_marker_access(self, recwarn): + mark = Mark() + def f(): pass + mark.world(x=1)(f) + assert f.world.x == 1 + assert recwarn.pop() + +class TestFunctional: + def test_mark_per_function(self, testdir): + p = testdir.makepyfile(""" + import py + @py.test.mark.hello + def test_hello(): + assert hasattr(test_hello, 'hello') + """) + result = testdir.runpytest(p) + assert result.stdout.fnmatch_lines(["*passed*"]) + + def test_mark_per_module(self, testdir): + item = testdir.getitem(""" + import py + pytestmark = py.test.mark.hello + def test_func(): + pass + """) + keywords = item.readkeywords() + assert 'hello' in keywords + + def test_mark_per_class(self, testdir): + modcol = testdir.getmodulecol(""" + import py + class TestClass: + pytestmark = py.test.mark.hello + def test_func(self): + assert TestClass.test_func.hello + """) + clscol = modcol.collect()[0] + item = clscol.collect()[0].collect()[0] + keywords = item.readkeywords() + assert 'hello' in keywords + + def test_merging_markers(self, testdir): + p = testdir.makepyfile(""" + import py + pytestmark = py.test.mark.hello("pos1", x=1, y=2) + class TestClass: + # classlevel overrides module level + pytestmark = py.test.mark.hello(x=3) + @py.test.mark.hello("pos0", z=4) + def test_func(self): + pass + """) + items, rec = testdir.inline_genitems(p) + item, = items + keywords = item.readkeywords() + marker = keywords['hello'] + assert marker.args == ["pos0", "pos1"] + assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4} + + def test_mark_other(self, testdir): + item = testdir.getitem(""" + import py + class pytestmark: + pass + def test_func(): + pass + """) + keywords = item.readkeywords() --- a/doc/test/plugin/keyword.txt +++ /dev/null @@ -1,51 +0,0 @@ - -pytest_keyword plugin -===================== - -mark test functions with keywords that may hold values. - -.. contents:: - :local: - -Marking functions and setting rich attributes ----------------------------------------------------- - -By default, all filename parts and class/function names of a test -function are put into the set of keywords for a given test. You can -specify additional kewords like this:: - - @py.test.mark.webtest - def test_send_http(): - ... - -This will set an attribute 'webtest' to True on the given test function. -You can read the value 'webtest' from the functions __dict__ later. - -You can also set values for an attribute which are put on an empty -dummy object:: - - @py.test.mark.webtest(firefox=30) - def test_receive(): - ... - -after which ``test_receive.webtest.firefox == 30`` holds true. - -In addition to keyword arguments you can also use positional arguments:: - - @py.test.mark.webtest("triangular") - def test_receive(): - ... - -after which ``test_receive.webtest._1 == 'triangular`` hold true. - -Start improving this plugin in 30 seconds -========================================= - - -1. Download `pytest_keyword.py`_ plugin source code -2. put it somewhere as ``pytest_keyword.py`` into your import path -3. a subsequent ``py.test`` run will use your local version - -Checkout customize_, other plugins_ or `get in contact`_. - -.. include:: links.txt --- a/doc/test/plugin/terminal.txt +++ b/doc/test/plugin/terminal.txt @@ -13,16 +13,24 @@ command line options -------------------- +``-v, --verbose`` + increase verbosity. +``-l, --showlocals`` + show locals in tracebacks (disabled by default). +``--report=opts`` + comma separated reporting options +``--tb=style`` + traceback verboseness (long/short/no). +``--fulltrace`` + don't cut any tracebacks (default is to cut). ``--collectonly`` only collect tests, don't execute them. ``--traceconfig`` trace considerations of conftest.py files. ``--nomagic`` don't reinterpret asserts, no traceback cutting. -``--fulltrace`` - don't cut any tracebacks (default is to cut). ``--debug`` - generate and show debugging information. + generate and show internal debugging information. Start improving this plugin in 30 seconds ========================================= --- a/doc/test/plugin/capture.txt +++ b/doc/test/plugin/capture.txt @@ -113,10 +113,10 @@ command line options -------------------- +``--capture=method`` + set capturing method during tests: fd (default)|sys|no. ``-s`` shortcut for --capture=no. -``--capture=method`` - set capturing method during tests: fd (default)|sys|no. Start improving this plugin in 30 seconds ========================================= --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -1,16 +1,22 @@ """ -advanced conditional skipping for python test functions, classes or modules. +advanced skipping for python test functions, classes or modules. -You can mark functions, classes or modules for for conditional -skipping (skipif) or as expected-to-fail (xfail). The difference -between the two is that 'xfail' will still execute test functions -but it will invert the outcome: a passing test becomes a failure and -a failing test is a semi-passing one. All skip conditions are -reported at the end of test run through the terminal reporter. +With this plugin you can mark test functions for conditional skipping +or as "xfail", expected-to-fail. Skipping a test will avoid running it +while xfail-marked tests will run and result in an inverted outcome: +a pass becomes a failure and a fail becomes a semi-passing one. + +The need for skipping a test is usually connected to a condition. +If a test fails under all conditions then it's probably better +to mark your test as 'xfail'. + +By passing ``--report=xfailed,skipped`` to the terminal reporter +you will see summary information on skips and xfail-run tests +at the end of a test run. .. _skipif: -skip a test function conditionally +mark a test function to be skipped ------------------------------------------- Here is an example for skipping a test function when @@ -20,6 +26,7 @@ running on Python3:: def test_function(): ... + During test function setup the skipif condition is evaluated by calling ``eval(expr, namespace)``. The namespace contains the ``sys`` and ``os`` modules as well as the @@ -30,76 +37,75 @@ on a test configuration value e.g. like def test_function(...): ... -Note that `test marking can be declared at whole class- or module level`_. -.. _`test marking can also be declared at whole class- or module level`: keyword.html#scoped-marking +mark many test functions at once +-------------------------------------- +As with all metadata function marking you can do it at +`whole class- or module level`_. Here is an example +for skipping all methods of a test class based on platform:: -conditionally mark a function as "expected to fail" + class TestPosixCalls: + pytestmark = py.test.mark.skipif("sys.platform == 'win32'") + + def test_function(self): + # will not be setup or run under 'win32' platform + # + + +.. _`whole class- or module level`: mark.html#scoped-marking + + +mark a test function as expected to fail ------------------------------------------------------- -You can use the ``xfail`` keyword to mark your test functions as -'expected to fail':: +You can use the ``xfail`` marker to indicate that you +expect the test to fail:: @py.test.mark.xfail - def test_hello(): - ... - -This test will be executed but no traceback will be reported -when it fails. Instead terminal reporting will list it in the -"expected to fail" or "unexpectedly passing" sections. -As with skipif_ you may selectively expect a failure -depending on platform:: - - @py.test.mark.xfail("sys.version_info >= (3,0)") def test_function(): ... -skip/xfail a whole test class or module -------------------------------------------- +This test will be run but no traceback will be reported +when it fails. Instead terminal reporting will list it in the +"expected to fail" or "unexpectedly passing" sections. -Instead of marking single functions you can skip -a whole class of tests when running on a specific -platform:: +Same as with skipif_ you can also selectively expect a failure +depending on platform:: - class TestSomething: - skipif = "sys.platform == 'win32'" + @py.test.mark.xfail(if"sys.version_info >= (3,0)") -Or you can mark all test functions as expected -to fail for a specific test configuration:: + def test_function(): + ... - xfail = "config.getvalue('db') == 'mysql'" +skipping on a missing import dependency +-------------------------------------------------- -skip if a dependency cannot be imported ---------------------------------------------- - -You can use a helper to skip on a failing import:: +You can use the following import helper at module level +or within a test or setup function. docutils = py.test.importorskip("docutils") -You can use this helper at module level or within -a test or setup function. - -You can also skip if a library does not come with a high enough version:: +If ``docutils`` cannot be imported here, this will lead to a +skip outcome of the test. You can also skip dependeing if +if a library does not come with a high enough version:: docutils = py.test.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. -dynamically skip from within a test or setup -------------------------------------------------- +imperative skip from within a test or setup function +------------------------------------------------------ -If you want to skip the execution of a test you can call -``py.test.skip()`` within a test, a setup or from a -`funcarg factory`_ function. Example:: +If for some reason you cannot declare skip-conditions +you can also imperatively produce a Skip-outcome from +within test or setup code. Example:: def test_function(): if not valid_config(): py.test.skip("unsuppored configuration") -.. _`funcarg factory`: ../funcargs.html#factory - """ # XXX py.test.skip, .importorskip and the Skipped class # should also be defined in this plugin, requires thought/changes @@ -177,7 +183,7 @@ def evalexpression(item, keyword): if markholder: d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config} expr, result = None, True - for expr in markholder._args: + for expr in markholder.args: if isinstance(expr, str): result = eval(expr, d) else: --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,8 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* introduce generalized py.test.mark function marking + * reshuffle / refine command line grouping * deprecate parser.addgroup in favour of getgroup which creates option group --- a/_py/test/defaultconftest.py +++ b/_py/test/defaultconftest.py @@ -10,5 +10,5 @@ Generator = py.test.collect.Generator Function = py.test.collect.Function Instance = py.test.collect.Instance -pytest_plugins = "default runner capture terminal keyword skipping tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split() +pytest_plugins = "default runner capture terminal mark skipping tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split() --- a/testing/pytest/plugin/test_pytest_keyword.py +++ /dev/null @@ -1,105 +0,0 @@ -import py -from _py.test.plugin.pytest_keyword import Mark - -class TestMark: - def test_pytest_mark_notcallable(self): - mark = Mark() - py.test.raises(TypeError, "mark()") - - def test_pytest_mark_bare(self): - mark = Mark() - def f(): pass - mark.hello(f) - assert f.hello - - def test_pytest_mark_keywords(self): - mark = Mark() - def f(): pass - mark.world(x=3, y=4)(f) - assert f.world - assert f.world.x == 3 - assert f.world.y == 4 - - def test_apply_multiple_and_merge(self): - mark = Mark() - def f(): pass - marker = mark.world - mark.world(x=3)(f) - assert f.world.x == 3 - mark.world(y=4)(f) - assert f.world.x == 3 - assert f.world.y == 4 - mark.world(y=1)(f) - assert f.world.y == 1 - assert len(f.world._args) == 0 - - def test_pytest_mark_positional(self): - mark = Mark() - def f(): pass - mark.world("hello")(f) - assert f.world._args[0] == "hello" - mark.world("world")(f) - -class TestFunctional: - def test_mark_per_function(self, testdir): - p = testdir.makepyfile(""" - import py - @py.test.mark.hello - def test_hello(): - assert hasattr(test_hello, 'hello') - """) - result = testdir.runpytest(p) - assert result.stdout.fnmatch_lines(["*passed*"]) - - def test_mark_per_module(self, testdir): - item = testdir.getitem(""" - import py - pytestmark = py.test.mark.hello - def test_func(): - pass - """) - keywords = item.readkeywords() - assert 'hello' in keywords - - def test_mark_per_class(self, testdir): - modcol = testdir.getmodulecol(""" - import py - class TestClass: - pytestmark = py.test.mark.hello - def test_func(self): - assert TestClass.test_func.hello - """) - clscol = modcol.collect()[0] - item = clscol.collect()[0].collect()[0] - keywords = item.readkeywords() - assert 'hello' in keywords - - def test_merging_markers(self, testdir): - p = testdir.makepyfile(""" - import py - pytestmark = py.test.mark.hello("pos1", x=1, y=2) - class TestClass: - # classlevel overrides module level - pytestmark = py.test.mark.hello(x=3) - @py.test.mark.hello("pos0", z=4) - def test_func(self): - pass - """) - items, rec = testdir.inline_genitems(p) - item, = items - keywords = item.readkeywords() - marker = keywords['hello'] - assert marker._args == ["pos0", "pos1"] - assert marker.x == 3 - assert marker.y == 2 - assert marker.z == 4 - - def test_mark_other(self, testdir): - item = testdir.getitem(""" - import py - class pytestmark: - pass - def test_func(): - pass - """) - keywords = item.readkeywords() From commits-noreply at bitbucket.org Tue Oct 27 16:23:40 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:23:40 +0000 (UTC) Subject: [py-svn] py-virtualenv commit f9ae279ff5aa: remove deprecated parser.addgroup usage in favour of getgroup Message-ID: <20091027152340.597D77EF44@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256634191 -3600 # Node ID f9ae279ff5aadd4369ede09806742edd6b2cc10e # Parent 0530b07201676100ca553767972ed537aa8883d3 remove deprecated parser.addgroup usage in favour of getgroup --- a/testing/pytest/test_config.py +++ b/testing/pytest/test_config.py @@ -23,7 +23,7 @@ class TestConfigCmdlineParsing: def test_parser_addoption_default_env(self, testdir, monkeypatch): import os config = testdir.Config() - group = config._parser.addgroup("hello") + group = config._parser.getgroup("hello") monkeypatch.setitem(os.environ, 'PYTEST_OPTION_OPTION1', 'True') group.addoption("--option1", action="store_true") --- a/_py/test/plugin/pytest_restdoc.py +++ b/_py/test/plugin/pytest_restdoc.py @@ -5,7 +5,7 @@ import py import sys def pytest_addoption(parser): - group = parser.addgroup("ReST", "ReST documentation check options") + group = parser.getgroup("ReST", "ReST documentation check options") group.addoption('-R', '--urlcheck', action="store_true", dest="urlcheck", default=False, help="urlopen() remote links found in ReST text files.") --- a/_py/test/plugin/pytest_pylint.py +++ b/_py/test/plugin/pytest_pylint.py @@ -7,7 +7,7 @@ import py pylint = py.test.importorskip("pylint.lint") def pytest_addoption(parser): - group = parser.addgroup('pylint options') + group = parser.getgroup('pylint options') group.addoption('--pylint', action='store_true', default=False, dest='pylint', help='run pylint on python files.') --- a/_py/test/plugin/pytest_figleaf.py +++ b/_py/test/plugin/pytest_figleaf.py @@ -8,7 +8,7 @@ py.test.importorskip("figleaf.annotate_h import figleaf def pytest_addoption(parser): - group = parser.addgroup('figleaf options') + group = parser.getgroup('figleaf options') group.addoption('-F', action='store_true', default=False, dest = 'figleaf', help=('trace python coverage with figleaf and write HTML ' --- a/example/funcarg/urloption/conftest.py +++ b/example/funcarg/urloption/conftest.py @@ -3,7 +3,7 @@ import py def pytest_addoption(parser): - grp = parser.addgroup("testserver options") + grp = parser.getgroup("testserver options") grp.addoption("--url", action="store", default=None, help="url for testserver") --- a/testing/pytest/test_pickling.py +++ b/testing/pytest/test_pickling.py @@ -89,7 +89,7 @@ class TestConfigPickling: def test_config_pickling_customoption(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): - group = parser.addgroup("testing group") + group = parser.getgroup("testing group") group.addoption('-G', '--glong', action="store", default=42, type="int", dest="gdest", help="g value.") """) @@ -109,7 +109,7 @@ class TestConfigPickling: tmp.ensure("__init__.py") tmp.join("conftest.py").write(py.code.Source(""" def pytest_addoption(parser): - group = parser.addgroup("testing group") + group = parser.getgroup("testing group") group.addoption('-G', '--glong', action="store", default=42, type="int", dest="gdest", help="g value.") """)) --- a/_py/test/plugin/pytest_resultlog.py +++ b/_py/test/plugin/pytest_resultlog.py @@ -6,7 +6,7 @@ import py from py.builtin import print_ def pytest_addoption(parser): - group = parser.addgroup("resultlog", "resultlog plugin options") + group = parser.getgroup("resultlog", "resultlog plugin options") group.addoption('--resultlog', action="store", dest="resultlog", metavar="path", default=None, help="path for machine-readable result log.") From commits-noreply at bitbucket.org Tue Oct 27 16:23:42 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:23:42 +0000 (UTC) Subject: [py-svn] py-virtualenv commit b8da724503b5: fix "py.cleanup -d" - add test and check to only remove empty dirs (!) Message-ID: <20091027152342.535E77EF48@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256655794 -3600 # Node ID b8da724503b52dffac09cc6fa30ab6a71c6f92a2 # Parent 0c477e1fc2ffc220438af98fcac4c464c50a0bd7 fix "py.cleanup -d" - add test and check to only remove empty dirs (!) --- a/_py/cmdline/pycleanup.py +++ b/_py/cmdline/pycleanup.py @@ -35,7 +35,8 @@ def main(): if options.removedir: for x in path.visit(lambda x: x.check(dir=1), lambda x: x.check(dotfile=0, link=0)): - remove(x, options) + if not x.listdir(): + remove(x, options) def remove(path, options): if options.dryrun: --- a/testing/cmdline/test_cmdline.py +++ b/testing/cmdline/test_cmdline.py @@ -38,12 +38,14 @@ class TestPyCleanup: result = testdir.runpybin("py.cleanup", tmpdir) assert not pyc.check() - def test_dir_remove(self, testdir, tmpdir): - p = tmpdir.mkdir("a") - result = testdir.runpybin("py.cleanup", tmpdir) + def test_dir_remove_simple(self, testdir, tmpdir): + subdir = tmpdir.mkdir("subdir") + p = subdir.ensure("file") + result = testdir.runpybin("py.cleanup", "-d", tmpdir) assert result.ret == 0 - assert p.check() + assert subdir.check() + p.remove() + p = tmpdir.mkdir("hello") result = testdir.runpybin("py.cleanup", tmpdir, '-d') assert result.ret == 0 - assert not p.check() - + assert not subdir.check() From commits-noreply at bitbucket.org Tue Oct 27 16:23:42 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:23:42 +0000 (UTC) Subject: [py-svn] py-virtualenv commit 0c477e1fc2ff: fix bug: a false xfail expression would erranonously report XPASS on failures Message-ID: <20091027152342.438637EF46@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-virtualenv # URL http://bitbucket.org/RonnyPfannschmidt/py-virtualenv/overview/ # User holger krekel # Date 1256641360 -3600 # Node ID 0c477e1fc2ffc220438af98fcac4c464c50a0bd7 # Parent f9ae279ff5aadd4369ede09806742edd6b2cc10e fix bug: a false xfail expression would erranonously report XPASS on failures --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -120,18 +120,20 @@ def pytest_runtest_setup(item): def pytest_runtest_makereport(__multicall__, item, call): if call.when != "call": return - if hasattr(item, 'obj'): - expr, result = evalexpression(item, 'xfail') - if result: - rep = __multicall__.execute() - if call.excinfo: - rep.skipped = True - rep.failed = rep.passed = False - else: - rep.skipped = rep.passed = False - rep.failed = True - rep.keywords['xfail'] = True # expr - return rep + expr, result = evalexpression(item, 'xfail') + rep = __multicall__.execute() + if result: + if call.excinfo: + rep.skipped = True + rep.failed = rep.passed = False + else: + rep.skipped = rep.passed = False + rep.failed = True + rep.keywords['xfail'] = expr + else: + if 'xfail' in rep.keywords: + del rep.keywords['xfail'] + return rep # called by terminalreporter progress reporting def pytest_report_teststatus(report): --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -59,6 +59,20 @@ def test_xfail_at_module(testdir): ]) assert result.ret == 0 +def test_xfail_evalfalse_but_fails(testdir): + p = testdir.makepyfile(""" + import py + @py.test.mark.xfail('False') + def test_fail(): + assert 0 + """) + result = testdir.runpytest(p, '--report=xfailed') + extra = result.stdout.fnmatch_lines([ + "*test_xfail_evalfalse_but_fails*:4*", + "*1 failed*" + ]) + assert result.ret == 1 + def test_skipif_decorator(testdir): p = testdir.makepyfile(""" import py From commits-noreply at bitbucket.org Tue Oct 27 16:52:30 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:52:30 +0000 (UTC) Subject: [py-svn] py-trunk commit eb41af2b8634: nosetest plugin now supports fallback to module level setup Message-ID: <20091027155230.1AA467EEE3@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Ronny Pfannschmidt # Date 1256303513 -7200 # Node ID eb41af2b863412a2d9d0e73d42d4f13407ef120e # Parent 57132bbe20c575c34de83f196f1006cceaa594b8 nosetest plugin now supports fallback to module level setup --- a/_py/test/plugin/pytest_nose.py +++ b/_py/test/plugin/pytest_nose.py @@ -66,7 +66,9 @@ def pytest_runtest_setup(item): if isinstance(gen.parent, py.test.collect.Instance): call_optional(gen.parent.obj, 'setup') gen._nosegensetup = True - call_optional(item.obj, 'setup') + if not call_optional(item.obj, 'setup'): + # call module level setup if there is no object level one + call_optional(item.parent.obj, 'setup') def pytest_runtest_teardown(item): if isinstance(item, py.test.collect.Function): @@ -83,3 +85,6 @@ def call_optional(obj, name): method = getattr(obj, name, None) if method: method() + return True + else: + return False --- a/testing/pytest/plugin/test_pytest_nose.py +++ b/testing/pytest/plugin/test_pytest_nose.py @@ -85,3 +85,17 @@ def test_nose_test_generator_fixtures(te ]) + +def test_module_level_setup(testdir): + testdir.makepyfile(""" + items = {} + def setup(): + items[1]=1 + + def test_setup_changed_stuff(): + assert items + """) + result = testdir.runpytest('-p', 'nose') + result.stdout.fnmatch_lines([ + "*1 passed*", + ]) From commits-noreply at bitbucket.org Tue Oct 27 16:52:29 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:52:29 +0000 (UTC) Subject: [py-svn] py-trunk commit 726d84814940: merged ronny's nose-compatibility hacks, i.e. nosestyle Message-ID: <20091027155229.C82F17EE87@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256658578 -3600 # Node ID 726d84814940d464c3fc22f0e8624f9368035695 # Parent b8da724503b52dffac09cc6fa30ab6a71c6f92a2 # Parent af41413664fbbfdf816ae4a8a8e383283d5c0977 merged ronny's nose-compatibility hacks, i.e. nosestyle setup_module() and setup() functions are supported. added a few notes to changelog and documentation about it --- a/_py/test/plugin/pytest_nose.py +++ b/_py/test/plugin/pytest_nose.py @@ -1,7 +1,7 @@ """nose-compatibility plugin: allow to run nose test suites natively. This is an experimental plugin for allowing to run tests written -in 'nosetests' style with py.test. +in 'nosetests style with py.test. Usage ------------- @@ -10,26 +10,32 @@ type:: py.test # instead of 'nosetests' -and you should be able to run nose style tests. You will of course -get py.test style reporting and its feature set. +and you should be able to run nose style tests and at the same +time can make full use of py.test's capabilities. -Issues? ----------------- +Supported nose Idioms +---------------------- -If you find issues or have suggestions please run:: +* setup and teardown at module/class/method level +* SkipTest exceptions and markers +* setup/teardown decorators +* yield-based tests and their setup +* general usage of nose utilities - py.test --pastebin=all - -and send the resulting URL to a some contact channel. - -Known issues ------------------- +Unsupported idioms / issues +---------------------------------- - nose-style doctests are not collected and executed correctly, also fixtures don't work. - no nose-configuration is recognized +If you find other issues or have suggestions please run:: + + py.test --pastebin=all + +and send the resulting URL to a py.test contact channel, +at best to the mailing list. """ import py import inspect @@ -66,11 +72,14 @@ def pytest_runtest_setup(item): if isinstance(gen.parent, py.test.collect.Instance): call_optional(gen.parent.obj, 'setup') gen._nosegensetup = True - call_optional(item.obj, 'setup') + if not call_optional(item.obj, 'setup'): + # call module level setup if there is no object level one + call_optional(item.parent.obj, 'setup') def pytest_runtest_teardown(item): if isinstance(item, py.test.collect.Function): - call_optional(item.obj, 'teardown') + if not call_optional(item.obj, 'teardown'): + call_optional(item.parent.obj, 'teardown') #if hasattr(item.parent, '_nosegensetup'): # #call_optional(item._nosegensetup, 'teardown') # del item.parent._nosegensetup @@ -82,4 +91,9 @@ def pytest_make_collect_report(collector def call_optional(obj, name): method = getattr(obj, name, None) if method: - method() + argspec = inspect.getargspec(method) + if argspec[0] == ['self']: + argspec = argspec[1:] + if not any(argspec): + method() + return True --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -1,6 +1,5 @@ -import py -import sys +import os, sys WIDTH = 75 plugins = [ @@ -269,6 +268,9 @@ class PluginDoc(RestWriter): self.Print(opt.help, indent=4) if __name__ == "__main__": + if os.path.exists("py"): + sys.path.insert(0, os.getcwd()) + import py _config = py.test.config _config.parse([]) _config.pluginmanager.do_configure(_config) --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,10 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* merged Ronny's nose-compatibility hacks: now + nose-style setup_module() and setup() functions are + supported + * introduce generalized py.test.mark function marking * reshuffle / refine command line grouping --- a/doc/test/plugin/nose.txt +++ b/doc/test/plugin/nose.txt @@ -8,7 +8,7 @@ nose-compatibility plugin: allow to run :local: This is an experimental plugin for allowing to run tests written -in 'nosetests' style with py.test. +in 'nosetests style with py.test. Usage ------------- @@ -17,25 +17,32 @@ type:: py.test # instead of 'nosetests' -and you should be able to run nose style tests. You will of course -get py.test style reporting and its feature set. +and you should be able to run nose style tests and at the same +time can make full use of py.test's capabilities. -Issues? ----------------- +Supported nose Idioms +---------------------- -If you find issues or have suggestions please run:: +* setup and teardown at module/class/method level +* SkipTest exceptions and markers +* setup/teardown decorators +* yield-based tests and their setup +* general usage of nose utilities - py.test --pastebin=all - -and send the resulting URL to a some contact channel. - -Known issues ------------------- +Unsupported idioms / issues +---------------------------------- - nose-style doctests are not collected and executed correctly, also fixtures don't work. -- no nose-configuration is recognized +- no nose-configuration is recognized + +If you find other issues or have suggestions please run:: + + py.test --pastebin=all + +and send the resulting URL to a py.test contact channel, +at best to the mailing list. Start improving this plugin in 30 seconds ========================================= From commits-noreply at bitbucket.org Tue Oct 27 16:52:31 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:52:31 +0000 (UTC) Subject: [py-svn] py-trunk commit 61a03e7e7224: better tests for the nose plugin, support module level teardown Message-ID: <20091027155231.C14177EEE9@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Ronny Pfannschmidt # Date 1256304479 -7200 # Node ID 61a03e7e72248ee42f29cbceea1aad052c62362b # Parent eb41af2b863412a2d9d0e73d42d4f13407ef120e better tests for the nose plugin, support module level teardown --- a/_py/test/plugin/pytest_nose.py +++ b/_py/test/plugin/pytest_nose.py @@ -72,7 +72,8 @@ def pytest_runtest_setup(item): def pytest_runtest_teardown(item): if isinstance(item, py.test.collect.Function): - call_optional(item.obj, 'teardown') + if not call_optional(item.obj, 'teardown'): + call_optional(item.parent.obj, 'teardown') #if hasattr(item.parent, '_nosegensetup'): # #call_optional(item._nosegensetup, 'teardown') # del item.parent._nosegensetup --- a/testing/pytest/plugin/test_pytest_nose.py +++ b/testing/pytest/plugin/test_pytest_nose.py @@ -88,14 +88,30 @@ def test_nose_test_generator_fixtures(te def test_module_level_setup(testdir): testdir.makepyfile(""" + from nose.tools import with_setup items = {} def setup(): items[1]=1 - def test_setup_changed_stuff(): - assert items + def teardown(): + del items[1] + + def setup2(): + items[2] = 2 + + def teardown2(): + del items[2] + + def test_setup_module_setup(): + assert items[1] == 1 + + @with_setup(setup2, teardown2) + def test_local_setup(): + assert items[2] == 2 + assert 1 not in items + """) result = testdir.runpytest('-p', 'nose') result.stdout.fnmatch_lines([ - "*1 passed*", + "*2 passed*", ]) From commits-noreply at bitbucket.org Tue Oct 27 16:52:33 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:52:33 +0000 (UTC) Subject: [py-svn] py-trunk commit 988b3400b47e: nose plugin wont call setup functions that arent made for it Message-ID: <20091027155233.EC2797EEEB@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Ronny Pfannschmidt # Date 1256307388 -7200 # Node ID 988b3400b47e55cd1fb5e2b3afaa4462831cf51f # Parent 61a03e7e72248ee42f29cbceea1aad052c62362b nose plugin wont call setup functions that arent made for it --- a/_py/test/plugin/pytest_nose.py +++ b/_py/test/plugin/pytest_nose.py @@ -85,7 +85,9 @@ def pytest_make_collect_report(collector def call_optional(obj, name): method = getattr(obj, name, None) if method: - method() - return True - else: - return False + argspec = inspect.getargspec(method) + if argspec[0] == ['self']: + argspec = argspec[1:] + if not any(argspec): + method() + return True From commits-noreply at bitbucket.org Tue Oct 27 16:52:34 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 15:52:34 +0000 (UTC) Subject: [py-svn] py-trunk commit af41413664fb: support nose style argument-free setup/teardown functions Message-ID: <20091027155234.060A67EEF5@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Ronny Pfannschmidt # Date 1256307426 -7200 # Node ID af41413664fbbfdf816ae4a8a8e383283d5c0977 # Parent 988b3400b47e55cd1fb5e2b3afaa4462831cf51f support nose style argument-free setup/teardown functions --- a/_py/test/pycollect.py +++ b/_py/test/pycollect.py @@ -161,13 +161,24 @@ class Module(py.test.collect.File, PyCol def setup(self): if getattr(self.obj, 'disabled', 0): py.test.skip("%r is disabled" %(self.obj,)) - mod = self.obj - if hasattr(mod, 'setup_module'): - self.obj.setup_module(mod) + if hasattr(self.obj, 'setup_module'): + #XXX: nose compat hack, move to nose plugin + # if it takes a positional arg, its probably a py.test style one + # so we pass the current module object + if inspect.getargspec(self.obj.setup_module)[0]: + self.obj.setup_module(self.obj) + else: + self.obj.setup_module() def teardown(self): if hasattr(self.obj, 'teardown_module'): - self.obj.teardown_module(self.obj) + #XXX: nose compat hack, move to nose plugin + # if it takes a positional arg, its probably a py.test style one + # so we pass the current module object + if inspect.getargspec(self.obj.teardown_module)[0]: + self.obj.teardown_module(self.obj) + else: + self.obj.teardown_module() class Class(PyCollectorMixin, py.test.collect.Collector): --- a/testing/pytest/plugin/test_pytest_nose.py +++ b/testing/pytest/plugin/test_pytest_nose.py @@ -115,3 +115,24 @@ def test_module_level_setup(testdir): result.stdout.fnmatch_lines([ "*2 passed*", ]) + +def test_nose_style_setup_teardown(testdir): + testdir.makepyfile(""" + l = [] + def setup_module(): + l.append(1) + + def teardown_module(): + del l[0] + + def test_hello(): + assert l == [1] + + def test_world(): + assert l == [1] + """) + result = testdir.runpytest('-p', 'nose') + result.stdout.fnmatch_lines([ + "*2 passed*", + ]) + From commits-noreply at bitbucket.org Tue Oct 27 21:04:45 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 20:04:45 +0000 (UTC) Subject: [py-svn] apipkg commit 526d7d388cd8: support for single level relative imports and more unittests Message-ID: <20091027200445.26BCB7EEF0@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User Ronny Pfannschmidt # Date 1256048014 -7200 # Node ID 526d7d388cd8f09d42067412594923983e4f3e0b # Parent 78f6f28253457f50ac06a429cde0d88234c0e0a8 support for single level relative imports and more unittests --- a/test_apipkg.py +++ b/test_apipkg.py @@ -1,7 +1,7 @@ import types import sys import py - +import apipkg # # test support for importing modules # @@ -126,3 +126,38 @@ def parsenamespace(spec): cur[apinames[-1]] = spec return ns + +def test_relative_import(): + import email + api_email = apipkg.ApiModule('email', {'Message': '.message:Message'}) + assert api_email.Message is email.message.Message + +def test_absolute_import(): + import email + api_email = apipkg.ApiModule('email', {'Message':'email.message:Message'}) + assert api_email.Message is email.message.Message + +def test_nested_absolute_imports(): + import email + api_email = apipkg.ApiModule('email',{ + 'message2': { + 'Message': 'email.message:Message', + }, + }) + # nesting is supposed to replace things in sys.modules + assert 'email.message2' in sys.modules + + +def test_initpkg_no_replace(monkeypatch): + api = apipkg.ApiModule('email_no_replace', {}) + monkeypatch.setitem(sys.modules, 'email_no_replace', api) + apipkg.initpkg('email_no_replace', {}) + assert sys.modules['email_no_replace'] is api + + at py.test.mark.xfail +def test_initpkg_do_replace(monkeypatch): + api = apipkg.ApiModule('email_replace', {}) + monkeypatch.setitem(sys.modules, 'email_replace', api) + apipkg.initpkg('email_replace', {}, replace=True) + assert sys.modules['email_replace'] is not api + --- a/apipkg.py +++ b/apipkg.py @@ -45,6 +45,8 @@ class ApiModule(ModuleType): if name == '__doc__': self.__doc__ = importobj(importspec) else: + if importspec[0] == '.': + importspec = fullname + importspec self.__map__[name] = importspec def __repr__(self): From commits-noreply at bitbucket.org Tue Oct 27 21:04:47 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 20:04:47 +0000 (UTC) Subject: [py-svn] apipkg commit 9fee4ebc2b03: initpkg now replaces the package in sys.modules Message-ID: <20091027200447.DD7C97EF18@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User Ronny Pfannschmidt # Date 1256298349 -7200 # Node ID 9fee4ebc2b037921b0bff990052ddae0d7265060 # Parent 526d7d388cd8f09d42067412594923983e4f3e0b initpkg now replaces the package in sys.modules importing a package which initializes itself with apipkg will result in a ApiModule instead of a module --- a/test_apipkg.py +++ b/test_apipkg.py @@ -144,20 +144,13 @@ def test_nested_absolute_imports(): 'Message': 'email.message:Message', }, }) - # nesting is supposed to replace things in sys.modules + # nesting is supposed to put nested items into sys.modules assert 'email.message2' in sys.modules - -def test_initpkg_no_replace(monkeypatch): - api = apipkg.ApiModule('email_no_replace', {}) - monkeypatch.setitem(sys.modules, 'email_no_replace', api) - apipkg.initpkg('email_no_replace', {}) - assert sys.modules['email_no_replace'] is api - - at py.test.mark.xfail def test_initpkg_do_replace(monkeypatch): api = apipkg.ApiModule('email_replace', {}) monkeypatch.setitem(sys.modules, 'email_replace', api) + # initpkg will also replace in sys.modules apipkg.initpkg('email_replace', {}, replace=True) assert sys.modules['email_replace'] is not api --- a/apipkg.py +++ b/apipkg.py @@ -10,13 +10,12 @@ from types import ModuleType __version__ = "1.0b1" -def initpkg(pkgname, exportdefs): - """ initialize given package from the export definitions. """ - pkgmodule = sys.modules[pkgname] +def initpkg(pkgname, exportdefs, replace=False): + """ initialize given package from the export definitions. + replace it in sys.modules + """ mod = ApiModule(pkgname, exportdefs) - for name, value in mod.__dict__.items(): - if name[:2] != "__" or name == "__all__": - setattr(pkgmodule, name, value) + sys.modules[pkgname] = mod def importobj(importspec): """ return object specified by importspec.""" From commits-noreply at bitbucket.org Tue Oct 27 21:04:51 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 20:04:51 +0000 (UTC) Subject: [py-svn] apipkg commit 78ac2d06cf0b: remove the __fullname__ attribute, __name__ is now the expected doted name Message-ID: <20091027200451.2D6CA7EF1D@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User Ronny Pfannschmidt # Date 1256300292 -7200 # Node ID 78ac2d06cf0b8e9dc445f8c72dd637fa643a59b5 # Parent 9fee4ebc2b037921b0bff990052ddae0d7265060 remove the __fullname__ attribute, __name__ is now the expected doted name --- a/test_apipkg.py +++ b/test_apipkg.py @@ -154,3 +154,9 @@ def test_initpkg_do_replace(monkeypatch) apipkg.initpkg('email_replace', {}, replace=True) assert sys.modules['email_replace'] is not api +def test_name_attribute(): + api = apipkg.ApiModule('name_test', { + 'subpkg': {}, + }) + assert api.__name__ == 'name_test' + assert api.subpkg.__name__ == 'name_test.subpkg' --- a/apipkg.py +++ b/apipkg.py @@ -28,16 +28,12 @@ class ApiModule(ModuleType): self.__name__ = name self.__all__ = list(importspec) self.__map__ = {} - if parent: - fullname = parent.__fullname__ + "." + name - setattr(parent, name, self) - else: - fullname = name - self.__fullname__ = fullname for name, importspec in importspec.items(): if isinstance(importspec, dict): - apimod = ApiModule(name, importspec, parent=self) - sys.modules[apimod.__fullname__] = apimod + package = '%s.%s'%(self.__name__, name) + apimod = ApiModule(package, importspec, parent=self) + sys.modules[package] = apimod + setattr(self, name, apimod) else: if not importspec.count(":") == 1: raise ValueError("invalid importspec %r" % (importspec,)) @@ -45,11 +41,11 @@ class ApiModule(ModuleType): self.__doc__ = importobj(importspec) else: if importspec[0] == '.': - importspec = fullname + importspec + importspec = self.__name__ + importspec self.__map__[name] = importspec def __repr__(self): - return '' % (self.__fullname__,) + return '' % (self.__name__,) def __getattr__(self, name): try: From commits-noreply at bitbucket.org Tue Oct 27 21:04:54 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 20:04:54 +0000 (UTC) Subject: [py-svn] apipkg commit d53b2f585f67: handle __file__, __version__ and __path__ generically Message-ID: <20091027200454.905867EF29@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1256673797 -3600 # Node ID d53b2f585f67404894285671874827e0232a26bb # Parent 78ac2d06cf0b8e9dc445f8c72dd637fa643a59b5 handle __file__, __version__ and __path__ generically fix trove license id, starting to write better tests --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ def main(): classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', - 'License :: OSI Approved :: GNU General Public License (GPL)', + 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', --- a/apipkg.py +++ b/apipkg.py @@ -5,16 +5,20 @@ see http://pypi.python.org/pypi/apipkg (c) holger krekel, 2009 - MIT license """ -import os, sys +import sys from types import ModuleType -__version__ = "1.0b1" +__version__ = "1.0b2" -def initpkg(pkgname, exportdefs, replace=False): +def initpkg(pkgname, exportdefs): """ initialize given package from the export definitions. replace it in sys.modules """ mod = ApiModule(pkgname, exportdefs) + oldmod = sys.modules[pkgname] + mod.__file__ = getattr(oldmod, '__file__', None) + mod.__version__ = getattr(oldmod, '__version__', None) + mod.__path__ = getattr(oldmod, '__path__', None) sys.modules[pkgname] = mod def importobj(importspec): --- a/test_apipkg.py +++ b/test_apipkg.py @@ -84,6 +84,31 @@ class TestRealModule: print (realtest.x.module.__map__) assert realtest.x.module.__doc__ == 'test module' +class TestScenarios: + def test_relative_import(self, monkeypatch, tmpdir): + pkgdir = tmpdir.mkdir("mymodule") + pkgdir.join('__init__.py').write(py.code.Source(""" + import apipkg + apipkg.initpkg(__name__, { + 'x': '.submod:x' + }) + """)) + pkgdir.join('submod.py').write("x=3\n") + monkeypatch.syspath_prepend(tmpdir) + import mymodule + assert isinstance(mymodule, apipkg.ApiModule) + assert mymodule.x == 3 + +def xtest_nested_absolute_imports(): + import email + api_email = apipkg.ApiModule('email',{ + 'message2': { + 'Message': 'email.message:Message', + }, + }) + # nesting is supposed to put nested items into sys.modules + assert 'email.message2' in sys.modules + # alternate ideas for specifying package + preliminary code # def test_parsenamespace(): @@ -93,7 +118,7 @@ def test_parsenamespace(): test.raises __.test.outcome::raises """ d = parsenamespace(spec) - print d + print (d) assert d == {'test': {'raises': '__.test.outcome::raises'}, 'path': {'svnwc': '__.path.svnwc::WCCommandPath', 'local': '__.path.local::LocalPath'} @@ -126,33 +151,32 @@ def parsenamespace(spec): cur[apinames[-1]] = spec return ns +def test_initpkg_replaces_sysmodules(monkeypatch): + mod = type(sys)('hello') + monkeypatch.setitem(sys.modules, 'hello', mod) + apipkg.initpkg('hello', {'x': 'os.path:abspath'}) + newmod = sys.modules['hello'] + assert newmod != mod + assert newmod.x == py.std.os.path.abspath -def test_relative_import(): - import email - api_email = apipkg.ApiModule('email', {'Message': '.message:Message'}) - assert api_email.Message is email.message.Message +def test_initpkg_transfers_version_and_file(monkeypatch): + mod = type(sys)('hello') + mod.__version__ = 10 + mod.__file__ = "hello.py" + monkeypatch.setitem(sys.modules, 'hello', mod) + apipkg.initpkg('hello', {}) + newmod = sys.modules['hello'] + assert newmod != mod + assert newmod.__file__ == mod.__file__ + assert newmod.__version__ == mod.__version__ -def test_absolute_import(): - import email - api_email = apipkg.ApiModule('email', {'Message':'email.message:Message'}) - assert api_email.Message is email.message.Message - -def test_nested_absolute_imports(): - import email - api_email = apipkg.ApiModule('email',{ - 'message2': { - 'Message': 'email.message:Message', - }, - }) - # nesting is supposed to put nested items into sys.modules - assert 'email.message2' in sys.modules - -def test_initpkg_do_replace(monkeypatch): - api = apipkg.ApiModule('email_replace', {}) - monkeypatch.setitem(sys.modules, 'email_replace', api) - # initpkg will also replace in sys.modules - apipkg.initpkg('email_replace', {}, replace=True) - assert sys.modules['email_replace'] is not api +def test_initpkg_defaults(monkeypatch): + mod = type(sys)('hello') + monkeypatch.setitem(sys.modules, 'hello', mod) + apipkg.initpkg('hello', {}) + newmod = sys.modules['hello'] + assert newmod.__file__ == None + assert newmod.__version__ == None def test_name_attribute(): api = apipkg.ApiModule('name_test', { From commits-noreply at bitbucket.org Tue Oct 27 21:38:34 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 20:38:34 +0000 (UTC) Subject: [py-svn] py-trunk commit 68c5479b8f72: first round of fixing jython compatibility issues, marking some tests as xfail-on-jython Message-ID: <20091027203834.575E47EF5C@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256675651 -3600 # Node ID 68c5479b8f72860ee9f5117388ab0471c019ab1c # Parent eac97f2e317418956e509252f7d6adf6a0bed792 first round of fixing jython compatibility issues, marking some tests as xfail-on-jython --- a/testing/pytest/plugin/test_pytest_capture.py +++ b/testing/pytest/plugin/test_pytest_capture.py @@ -1,12 +1,14 @@ import py, os, sys from _py.test.plugin.pytest_capture import CaptureManager +needsosdup = py.test.mark.xfail("not hasattr(os, 'dup')") + class TestCaptureManager: def test_getmethod_default_no_fd(self, testdir, monkeypatch): config = testdir.parseconfig(testdir.tmpdir) assert config.getvalue("capture") is None capman = CaptureManager() - monkeypatch.delattr(os, 'dup') + monkeypatch.delattr(os, 'dup', raising=False) try: assert capman._getmethod(config, None) == "sys" finally: @@ -16,14 +18,21 @@ class TestCaptureManager: config = testdir.parseconfig(testdir.tmpdir) assert config.getvalue("capture") is None capman = CaptureManager() - assert capman._getmethod(config, None) == "fd" # default + hasfd = hasattr(os, 'dup') + if hasfd: + assert capman._getmethod(config, None) == "fd" + else: + assert capman._getmethod(config, None) == "sys" for name in ('no', 'fd', 'sys'): + if not hasfd and name == 'fd': + continue sub = testdir.tmpdir.mkdir("dir" + name) sub.ensure("__init__.py") sub.join("conftest.py").write('option_capture = %r' % name) assert capman._getmethod(config, sub.join("test_hello.py")) == name + @needsosdup @py.test.mark.multi(method=['no', 'fd', 'sys']) def test_capturing_basic_api(self, method): capouter = py.io.StdCaptureFD() @@ -43,6 +52,7 @@ class TestCaptureManager: finally: capouter.reset() + @needsosdup def test_juggle_capturings(self, testdir): capouter = py.io.StdCaptureFD() try: @@ -242,10 +252,13 @@ class TestLoggingInteraction: # here we check a fundamental feature rootdir = str(py.path.local(py.__file__).dirpath().dirpath()) p = testdir.makepyfile(""" - import sys + import sys, os sys.path.insert(0, %r) import py, logging - cap = py.io.StdCaptureFD(out=False, in_=False) + if hasattr(os, 'dup'): + cap = py.io.StdCaptureFD(out=False, in_=False) + else: + cap = py.io.StdCapture(out=False, in_=False) logging.warn("hello1") outerr = cap.suspend() @@ -328,7 +341,8 @@ class TestCaptureFuncarg: assert out.startswith("42") """) reprec.assertoutcome(passed=1) - + + @needsosdup def test_stdfd_functional(self, testdir): reprec = testdir.inline_runsource(""" def test_hello(capfd): @@ -351,6 +365,7 @@ class TestCaptureFuncarg: "*1 error*", ]) + @needsosdup def test_keyboardinterrupt_disables_capturing(self, testdir): p = testdir.makepyfile(""" def test_hello(capfd): --- a/testing/path/common.py +++ b/testing/path/common.py @@ -132,6 +132,7 @@ class CommonFSTests(object): assert not l1.relto(l2) assert not l2.relto(l1) + @py.test.mark.xfail("sys.platform.startswith('java')") def test_listdir(self, path1): l = path1.listdir() assert path1.join('sampledir') in l @@ -177,6 +178,7 @@ class CommonFSTests(object): assert "sampledir" in l assert "otherdir" in l + @py.test.mark.xfail("sys.platform.startswith('java')") def test_visit_ignore(self, path1): p = path1.join('nonexisting') assert list(p.visit(ignore=py.error.ENOENT)) == [] --- a/_py/test/plugin/pytest_assertion.py +++ b/_py/test/plugin/pytest_assertion.py @@ -8,6 +8,9 @@ def pytest_addoption(parser): help="disable python assert expression reinterpretation."), def pytest_configure(config): + if sys.platform.startswith("java"): + return # XXX assertions don't work yet with jython 2.5.1 + if not config.getvalue("noassert") and not config.getvalue("nomagic"): warn_about_missing_assertion() config._oldassertion = py.builtin.builtins.AssertionError --- a/testing/pytest/test_collect.py +++ b/testing/pytest/test_collect.py @@ -189,7 +189,7 @@ class TestPrunetraceback: assert "__import__" not in result.stdout.str(), "too long traceback" result.stdout.fnmatch_lines([ "*ERROR during collection*", - ">*import not_exists*" + "*mport*not_exists*" ]) class TestCustomConftests: --- a/_py/code/code.py +++ b/_py/code/code.py @@ -28,6 +28,9 @@ class Code(object): if rec-cursive is true then dive into code objects contained in co_consts. """ + if sys.platform.startswith("java"): + # XXX jython does not support the below co_filename hack + return self.raw names = [x for x in dir(self.raw) if x[:3] == 'co_'] for name in kwargs: if name not in names: --- a/testing/path/test_local.py +++ b/testing/path/test_local.py @@ -3,6 +3,8 @@ import sys from py.path import local from testing.path import common +failsonjython = py.test.mark.xfail("sys.platform.startswith('java')") + def pytest_funcarg__path1(request): def setup(): path1 = request.config.mktemp("path1") @@ -545,11 +547,13 @@ class TestPOSIXLocalPath: for x,y in oldmodes.items(): x.chmod(y) + @failsonjython def test_chown_identity(self, path1): owner = path1.stat().owner group = path1.stat().group path1.chown(owner, group) + @failsonjython def test_chown_dangling_link(self, path1): owner = path1.stat().owner group = path1.stat().group @@ -560,6 +564,7 @@ class TestPOSIXLocalPath: finally: x.remove(rec=0) + @failsonjython def test_chown_identity_rec_mayfail(self, path1): owner = path1.stat().owner group = path1.stat().group --- a/testing/code/test_code.py +++ b/testing/code/test_code.py @@ -3,6 +3,8 @@ import py import sys from _py.code.code import safe_repr +failsonjython = py.test.mark.xfail("sys.platform.startswith('java')") + def test_newcode(): source = "i = 3" co = compile(source, '', 'exec') @@ -16,10 +18,12 @@ def test_ne(): code2 = py.code.Code(compile('foo = "baz"', '', 'exec')) assert code2 != code1 + at failsonjython def test_newcode_unknown_args(): code = py.code.Code(compile("", '', 'exec')) py.test.raises(TypeError, 'code.new(filename="hello")') + at failsonjython def test_newcode_withfilename(): source = py.code.Source(""" def f(): @@ -44,6 +48,7 @@ def test_newcode_withfilename(): assert 'f' in names assert 'g' in names + at failsonjython def test_newcode_with_filename(): source = "i = 3" co = compile(source, '', 'exec') @@ -58,6 +63,7 @@ def test_newcode_with_filename(): assert str(s) == source + at failsonjython def test_new_code_object_carries_filename_through(): class mystr(str): pass --- a/_py/code/assertion.py +++ b/_py/code/assertion.py @@ -37,7 +37,7 @@ def _format_explanation(explanation): return '\n'.join(result) -if sys.version_info >= (2, 6): +if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): from _py.code._assertionnew import interpret else: from _py.code._assertionold import interpret --- a/_py/log/warning.py +++ b/_py/log/warning.py @@ -50,6 +50,8 @@ def warn(msg, stacklevel=1, function=Non fnl = filename.lower() if fnl.endswith(".pyc") or fnl.endswith(".pyo"): filename = filename[:-1] + elif fnl.endswith("$py.class"): + filename = filename.replace('$py.class', '.py') else: if module == "__main__": try: --- a/testing/code/test_assertion.py +++ b/testing/code/test_assertion.py @@ -1,5 +1,7 @@ import py +pytestmark = py.test.mark.skipif("sys.platform.startswith('java')") + def exvalue(): return py.std.sys.exc_info()[1] --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -384,7 +384,7 @@ raise ValueError() def test_repr_local(self): p = FormattedExcinfo(showlocals=True) - loc = {'y': 5, 'z': 7, 'x': 3, '__builtins__': __builtins__} + loc = {'y': 5, 'z': 7, 'x': 3, '__builtins__': {}} # __builtins__} reprlocals = p.repr_locals(loc) assert reprlocals.lines assert reprlocals.lines[0] == '__builtins__ = ' From commits-noreply at bitbucket.org Tue Oct 27 21:38:32 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 20:38:32 +0000 (UTC) Subject: [py-svn] py-trunk commit eac97f2e3174: using apipkg 1.0b2 snapshot version - adjusting/cleaning up some impl-detail accesses Message-ID: <20091027203832.A88FB7EF52@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256675502 -3600 # Node ID eac97f2e317418956e509252f7d6adf6a0bed792 # Parent 726d84814940d464c3fc22f0e8624f9368035695 using apipkg 1.0b2 snapshot version - adjusting/cleaning up some impl-detail accesses --- a/_py/test/plugin/pytest_restdoc.py +++ b/_py/test/plugin/pytest_restdoc.py @@ -175,7 +175,7 @@ class ReSTSyntaxTest(py.test.collect.Ite 'to the py package') % (text,) relpath = '/'.join(text.split('/')[1:]) if check: - pkgroot = py.path.local(py._py.__file__).dirpath() + pkgroot = py._impldir abspath = pkgroot.join(relpath) assert pkgroot.join(relpath).check(), ( 'problem with linkrole :source:`%s`: ' --- a/py/__init__.py +++ b/py/__init__.py @@ -26,6 +26,9 @@ _py.apipkg.initpkg(__name__, dict( # access to all posix errno's as classes error = '_py.error:error', + _impldir = '_py._metainfo:impldir', + version = 'py:__version__', # backward compatibility + _com = { 'Registry': '_py._com:Registry', 'MultiCall': '_py._com:MultiCall', --- /dev/null +++ b/_py/_metainfo.py @@ -0,0 +1,5 @@ + +import py +import _py + +impldir = py.path.local(_py.__file__).dirpath() --- a/_py/apipkg.py +++ b/_py/apipkg.py @@ -5,18 +5,21 @@ see http://pypi.python.org/pypi/apipkg (c) holger krekel, 2009 - MIT license """ -import os, sys +import sys from types import ModuleType -__version__ = "1.0b1" +__version__ = "1.0b2" def initpkg(pkgname, exportdefs): - """ initialize given package from the export definitions. """ - pkgmodule = sys.modules[pkgname] + """ initialize given package from the export definitions. + replace it in sys.modules + """ mod = ApiModule(pkgname, exportdefs) - for name, value in mod.__dict__.items(): - if name[:2] != "__" or name == "__all__": - setattr(pkgmodule, name, value) + oldmod = sys.modules[pkgname] + mod.__file__ = getattr(oldmod, '__file__', None) + mod.__version__ = getattr(oldmod, '__version__', None) + mod.__path__ = getattr(oldmod, '__path__', None) + sys.modules[pkgname] = mod def importobj(importspec): """ return object specified by importspec.""" @@ -29,26 +32,24 @@ class ApiModule(ModuleType): self.__name__ = name self.__all__ = list(importspec) self.__map__ = {} - if parent: - fullname = parent.__fullname__ + "." + name - setattr(parent, name, self) - else: - fullname = name - self.__fullname__ = fullname for name, importspec in importspec.items(): if isinstance(importspec, dict): - apimod = ApiModule(name, importspec, parent=self) - sys.modules[apimod.__fullname__] = apimod + package = '%s.%s'%(self.__name__, name) + apimod = ApiModule(package, importspec, parent=self) + sys.modules[package] = apimod + setattr(self, name, apimod) else: if not importspec.count(":") == 1: raise ValueError("invalid importspec %r" % (importspec,)) if name == '__doc__': self.__doc__ = importobj(importspec) else: + if importspec[0] == '.': + importspec = self.__name__ + importspec self.__map__[name] = importspec def __repr__(self): - return '' % (self.__fullname__,) + return '' % (self.__name__,) def __getattr__(self, name): try: --- a/testing/pytest/plugin/conftest.py +++ b/testing/pytest/plugin/conftest.py @@ -1,7 +1,7 @@ import py pytest_plugins = "pytester" -plugindir = py.path.local(py._py.__file__).dirpath('test', 'plugin') +plugindir = py._impldir.join('test', 'plugin') from _py.test.defaultconftest import pytest_plugins as default_plugins def pytest_collect_file(path, parent): --- a/testing/test_py_imports.py +++ b/testing/test_py_imports.py @@ -7,6 +7,7 @@ def checksubpackage(name): if hasattr(obj, '__map__'): # isinstance(obj, Module): keys = dir(obj) assert len(keys) > 0 + print (obj.__map__) assert getattr(obj, '__map__') == {} def test_dir(): --- a/_py/test/pycollect.py +++ b/_py/test/pycollect.py @@ -19,7 +19,6 @@ a tree of collectors and test items that import py import inspect from _py.test.collect import configproperty, warnoldcollect -pydir = py.path.local(py._py.__file__).dirpath() from _py.test import funcargs class PyobjMixin(object): @@ -258,7 +257,7 @@ class FunctionMixin(PyobjMixin): if ntraceback == traceback: ntraceback = ntraceback.cut(path=path) if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=pydir) + ntraceback = ntraceback.cut(excludepath=py._impldir) traceback = ntraceback.filter() return traceback --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -112,7 +112,7 @@ class TestTraceback_f_g_h: def test_traceback_cut_excludepath(self, testdir): p = testdir.makepyfile("def f(): raise ValueError") excinfo = py.test.raises(ValueError, "p.pyimport().f()") - basedir = py.path.local(py._py.__file__).dirpath() + basedir = py._impldir newtraceback = excinfo.traceback.cut(excludepath=basedir) assert len(newtraceback) == 1 assert newtraceback[0].frame.code.path == p --- a/_py/test/collect.py +++ b/_py/test/collect.py @@ -4,7 +4,6 @@ Collectors and test Items form a tree that is usually built iteratively. """ import py -pydir = py.path.local(py._py.__file__).dirpath() def configproperty(name): def fget(self): @@ -336,7 +335,7 @@ class Collector(Node): path = self.fspath ntraceback = traceback.cut(path=self.fspath) if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=pydir) + ntraceback = ntraceback.cut(excludepath=py._impldir) traceback = ntraceback.filter() return traceback --- a/_py/test/config.py +++ b/_py/test/config.py @@ -261,8 +261,8 @@ class Config(object): conftestroots = config.getconftest_pathlist("rsyncdirs") if conftestroots: roots.extend(conftestroots) - pydirs = [py.path.local(x).dirpath() - for x in (py.__file__, py._py.__file__)] + pydirs = [py.path.local(py.__file__).dirpath(), + py._impldir] roots = [py.path.local(root) for root in roots] for root in roots: if not root.check(): From commits-noreply at bitbucket.org Tue Oct 27 21:51:19 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 20:51:19 +0000 (UTC) Subject: [py-svn] py-trunk commit acf745993e90: enabling assertions with jython, fixing one .format occurence Message-ID: <20091027205119.2717F7EF5F@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256676665 -3600 # Node ID acf745993e90a77c326d8a9d41a054dc8b2a61cd # Parent 68c5479b8f72860ee9f5117388ab0471c019ab1c enabling assertions with jython, fixing one .format occurence to provide the setting for http://paste.pocoo.org/show/147361/ --- a/_py/code/_assertionnew.py +++ b/_py/code/_assertionnew.py @@ -109,7 +109,7 @@ class DebugInterpreter(ast.NodeVisitor): raise Failure() return None, None else: - raise AssertionError("can't handle {0}".format(node)) + raise AssertionError("can't handle %s" %(node,)) def _compile(self, source, mode="eval"): return compile(source, "", mode) --- a/_py/test/plugin/pytest_assertion.py +++ b/_py/test/plugin/pytest_assertion.py @@ -8,8 +8,8 @@ def pytest_addoption(parser): help="disable python assert expression reinterpretation."), def pytest_configure(config): - if sys.platform.startswith("java"): - return # XXX assertions don't work yet with jython 2.5.1 + #if sys.platform.startswith("java"): + # return # XXX assertions don't work yet with jython 2.5.1 if not config.getvalue("noassert") and not config.getvalue("nomagic"): warn_about_missing_assertion() --- a/testing/code/test_assertion.py +++ b/testing/code/test_assertion.py @@ -1,6 +1,6 @@ import py -pytestmark = py.test.mark.skipif("sys.platform.startswith('java')") +#pytestmark = py.test.mark.skipif("sys.platform.startswith('java')") def exvalue(): return py.std.sys.exc_info()[1] From commits-noreply at bitbucket.org Tue Oct 27 22:24:23 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 21:24:23 +0000 (UTC) Subject: [py-svn] py-trunk commit 4c1168157b73: hack around Jython's incorrect AST heriachy Message-ID: <20091027212423.017787EF66@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Benjamin Peterson # Date 1256678566 18000 # Node ID 4c1168157b73f09eebe441f9011d217cf2b7c79b # Parent 68c5479b8f72860ee9f5117388ab0471c019ab1c hack around Jython's incorrect AST heriachy --- a/_py/code/_assertionnew.py +++ b/_py/code/_assertionnew.py @@ -10,6 +10,29 @@ import py from _py.code.assertion import _format_explanation, BuiltinAssertionError +if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): + # See http://bugs.jython.org/issue1497 + _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", + "ListComp", "GeneratorExp", "Yield", "Compare", "Call", + "Repr", "Num", "Str", "Attribute", "Subscript", "Name", + "List", "Tuple") + _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign", + "AugAssign", "Print", "For", "While", "If", "With", "Raise", + "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", + "Exec", "Global", "Expr", "Pass", "Break", "Continue") + _expr_nodes = set(getattr(ast, name) for name in _exprs) + _stmt_nodes = set(getattr(ast, name) for name in _stmts) + def _is_ast_expr(node): + return node.__class__ in _expr_nodes + def _is_ast_stmt(node): + return node.__class__ in _stmt_nodes +else: + def _is_ast_expr(node): + return isinstance(node, ast.expr) + def _is_ast_stmt(node): + return isinstance(node, ast.stmt) + + class Failure(Exception): """Error found while interpreting AST.""" @@ -91,7 +114,7 @@ class DebugInterpreter(ast.NodeVisitor): def generic_visit(self, node): # Fallback when we don't have a special implementation. - if isinstance(node, ast.expr): + if _is_ast_expr(node): mod = ast.Expression(node) co = self._compile(mod) try: @@ -100,7 +123,7 @@ class DebugInterpreter(ast.NodeVisitor): raise Failure() explanation = self.frame.repr(result) return explanation, result - elif isinstance(node, ast.stmt): + elif _is_ast_stmt(node): mod = ast.Module([node]) co = self._compile(mod, "exec") try: From commits-noreply at bitbucket.org Tue Oct 27 22:24:24 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 21:24:24 +0000 (UTC) Subject: [py-svn] py-trunk commit f74d7c1b3e47: merge trunk Message-ID: <20091027212424.B34CD7EF69@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Benjamin Peterson # Date 1256678619 18000 # Node ID f74d7c1b3e478a4affff5923048402c1c8ded50c # Parent 4c1168157b73f09eebe441f9011d217cf2b7c79b # Parent acf745993e90a77c326d8a9d41a054dc8b2a61cd merge trunk --- a/_py/code/_assertionnew.py +++ b/_py/code/_assertionnew.py @@ -132,7 +132,7 @@ class DebugInterpreter(ast.NodeVisitor): raise Failure() return None, None else: - raise AssertionError("can't handle {0}".format(node)) + raise AssertionError("can't handle %s" %(node,)) def _compile(self, source, mode="eval"): return compile(source, "", mode) From commits-noreply at bitbucket.org Tue Oct 27 22:44:15 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 27 Oct 2009 21:44:15 +0000 (UTC) Subject: [py-svn] py-trunk commit 9f7669e79f88: can't use .format() on jython :( Message-ID: <20091027214415.219B47EF6A@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Benjamin Peterson # Date 1256679951 18000 # Node ID 9f7669e79f883c65463b4e029e87a1604a74564a # Parent f74d7c1b3e478a4affff5923048402c1c8ded50c can't use .format() on jython :( --- a/_py/code/_assertionnew.py +++ b/_py/code/_assertionnew.py @@ -66,9 +66,9 @@ def getfailure(failure): lines = explanation.splitlines() if not lines: lines.append("") - lines[0] += " << {0}".format(value) + lines[0] += " << %s" % (value,) explanation = "\n".join(lines) - text = "{0}: {1}".format(failure.cause[0].__name__, explanation) + text = "%s: %s" % (failure.cause[0].__name__, explanation) if text.startswith("AssertionError: assert "): text = text[16:] return text @@ -99,10 +99,10 @@ operator_map = { } unary_map = { - ast.Not : "not {0}", - ast.Invert : "~{0}", - ast.USub : "-{0}", - ast.UAdd : "+{0}" + ast.Not : "not %s", + ast.Invert : "~%s", + ast.USub : "-%s", + ast.UAdd : "+%s" } @@ -147,7 +147,7 @@ class DebugInterpreter(ast.NodeVisitor): def visit_Name(self, name): explanation, result = self.generic_visit(name) # See if the name is local. - source = "{0!r} in locals() is not globals()".format(name.id) + source = "%r in locals() is not globals()" % (name.id,) co = self._compile(source) try: local = self.frame.eval(co) @@ -167,9 +167,9 @@ class DebugInterpreter(ast.NodeVisitor): break next_explanation, next_result = self.visit(next_op) op_symbol = operator_map[op.__class__] - explanation = "{0} {1} {2}".format(left_explanation, op_symbol, - next_explanation) - source = "__exprinfo_left {0} __exprinfo_right".format(op_symbol) + explanation = "%s %s %s" % (left_explanation, op_symbol, + next_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_left=left_result, @@ -196,8 +196,8 @@ class DebugInterpreter(ast.NodeVisitor): def visit_UnaryOp(self, unary): pattern = unary_map[unary.op.__class__] operand_explanation, operand_result = self.visit(unary.operand) - explanation = pattern.format(operand_explanation) - co = self._compile(pattern.format("__exprinfo_expr")) + explanation = pattern % (operand_explanation,) + co = self._compile(pattern % ("__exprinfo_expr",)) try: result = self.frame.eval(co, __exprinfo_expr=operand_result) except Exception: @@ -208,9 +208,9 @@ class DebugInterpreter(ast.NodeVisitor): left_explanation, left_result = self.visit(binop.left) right_explanation, right_result = self.visit(binop.right) symbol = operator_map[binop.op.__class__] - explanation = "({0} {1} {2})".format(left_explanation, symbol, - right_explanation) - source = "__exprinfo_left {0} __exprinfo_right".format(symbol) + explanation = "(%s %s %s)" % (left_explanation, symbol, + right_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (symbol,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_left=left_result, @@ -226,33 +226,33 @@ class DebugInterpreter(ast.NodeVisitor): arguments = [] for arg in call.args: arg_explanation, arg_result = self.visit(arg) - arg_name = "__exprinfo_{0}".format(len(ns)) + arg_name = "__exprinfo_%s" % (len(ns),) ns[arg_name] = arg_result arguments.append(arg_name) arg_explanations.append(arg_explanation) for keyword in call.keywords: arg_explanation, arg_result = self.visit(keyword.value) - arg_name = "__exprinfo_{0}".format(len(ns)) + arg_name = "__exprinfo_%s" % (len(ns),) ns[arg_name] = arg_result - keyword_source = "{0}={{0}}".format(keyword.id) - arguments.append(keyword_source.format(arg_name)) - arg_explanations.append(keyword_source.format(arg_explanation)) + keyword_source = "%s=%%s" % (keyword.id) + arguments.append(keyword_source % (arg_name,)) + arg_explanations.append(keyword_source % (arg_explanation,)) if call.starargs: arg_explanation, arg_result = self.visit(call.starargs) arg_name = "__exprinfo_star" ns[arg_name] = arg_result - arguments.append("*{0}".format(arg_name)) - arg_explanations.append("*{0}".format(arg_explanation)) + arguments.append("*%s" % (arg_name,)) + arg_explanations.append("*%s" % (arg_explanation,)) if call.kwargs: arg_explanation, arg_result = self.visit(call.kwargs) arg_name = "__exprinfo_kwds" ns[arg_name] = arg_result - arguments.append("**{0}".format(arg_name)) - arg_explanations.append("**{0}".format(arg_explanation)) + arguments.append("**%s" % (arg_name,)) + arg_explanations.append("**%s" % (arg_explanation,)) args_explained = ", ".join(arg_explanations) - explanation = "{0}({1})".format(func_explanation, args_explained) + explanation = "%s(%s)" % (func_explanation, args_explained) args = ", ".join(arguments) - source = "__exprinfo_func({0})".format(args) + source = "__exprinfo_func(%s)" % (args,) co = self._compile(source) try: result = self.frame.eval(co, **ns) @@ -269,14 +269,14 @@ class DebugInterpreter(ast.NodeVisitor): except Exception: is_bool = False if not is_bool: - pattern = "{0}\n{{{0} = {1}\n}}" + pattern = "%s\n{%s = %s\n}" rep = self.frame.repr(result) - explanation = pattern.format(rep, explanation) + explanation = pattern % (rep, rep, explanation) return explanation, result def _is_builtin_name(self, name): - pattern = "{0!r} not in globals() and {0!r} not in locals()" - source = pattern.format(name.id) + pattern = "%r not in globals() and %r not in locals()" + source = pattern % (name.id, name.id) co = self._compile(source) try: return self.frame.eval(co) @@ -287,16 +287,16 @@ class DebugInterpreter(ast.NodeVisitor): if not isinstance(attr.ctx, ast.Load): return self.generic_visit(attr) source_explanation, source_result = self.visit(attr.value) - explanation = "{0}.{1}".format(source_explanation, attr.attr) - source = "__exprinfo_expr.{0}".format(attr.attr) + explanation = "%s.%s" % (source_explanation, attr.attr) + source = "__exprinfo_expr.%s" % (attr.attr,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_expr=source_result) except Exception: raise Failure(explanation) # Check if the attr is from an instance. - source = "{0!r} in getattr(__exprinfo_expr, '__dict__', {{}})" - source = source.format(attr.attr) + source = "%r in getattr(__exprinfo_expr, '__dict__', {})" + source = source % (attr.attr,) co = self._compile(source) try: from_instance = self.frame.eval(co, __exprinfo_expr=source_result) @@ -304,8 +304,8 @@ class DebugInterpreter(ast.NodeVisitor): from_instance = True if from_instance: rep = self.frame.repr(result) - pattern = "{0}\n{{{0} = {1}\n}}" - explanation = pattern.format(rep, explanation) + pattern = "%s\n{%s = %s\n}" + explanation = pattern % (rep, rep, explanation) return explanation, result def visit_Assert(self, assrt): @@ -313,7 +313,7 @@ class DebugInterpreter(ast.NodeVisitor): if test_explanation.startswith("False\n{False =") and \ test_explanation.endswith("\n"): test_explanation = test_explanation[15:-2] - explanation = "assert {0}".format(test_explanation) + explanation = "assert %s" % (test_explanation,) if not test_result: try: raise BuiltinAssertionError @@ -323,7 +323,7 @@ class DebugInterpreter(ast.NodeVisitor): def visit_Assign(self, assign): value_explanation, value_result = self.visit(assign.value) - explanation = "... = {0}".format(value_explanation) + explanation = "... = %s" % (value_explanation,) name = ast.Name("__exprinfo_expr", ast.Load(), assign.value.lineno, assign.value.col_offset) new_assign = ast.Assign(assign.targets, name, assign.lineno, From commits-noreply at bitbucket.org Wed Oct 28 19:51:48 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 28 Oct 2009 18:51:48 +0000 (UTC) Subject: [py-svn] py-trunk commit 78979cd0cdec: resolves issue #59 Message-ID: <20091028185148.A36BF7EF62@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256755880 -3600 # Node ID 78979cd0cdecadd5d10eda31f755eb950bf551f3 # Parent 9f7669e79f883c65463b4e029e87a1604a74564a resolves issue #59 resolves issue #48 Have the path.pyimport() helper raise an EnvironmentError if an import of a given file returns a module that does not appear to be coming from the actual path. E.g. for a directory layout like this: a / test_whatever.py b / test_whatever.py calling py.path.local("b/test_whatever.py").pyimport() will fail if the other globally scoped test_whatever module was loaded already. --- a/testing/pytest/test_pycollect.py +++ b/testing/pytest/test_pycollect.py @@ -16,6 +16,18 @@ class TestModule: py.test.raises(ImportError, modcol.collect) py.test.raises(ImportError, modcol.run) + def test_import_duplicate(self, testdir): + a = testdir.mkdir("a") + b = testdir.mkdir("b") + p = a.ensure("test_whatever.py") + p.pyimport() + del py.std.sys.modules['test_whatever'] + b.ensure("test_whatever.py") + result = testdir.runpytest() + s = result.stdout.str() + assert 'mismatch' in s + assert 'test_whatever' in s + def test_syntax_error_in_module(self, testdir): modcol = testdir.getmodulecol("this is a syntax error") py.test.raises(SyntaxError, modcol.collect) --- a/_py/test/plugin/pytest_pytester.py +++ b/_py/test/plugin/pytest_pytester.py @@ -70,6 +70,12 @@ class TmpTestdir: py.std.sys.path.remove(p) if hasattr(self, '_olddir'): self._olddir.chdir() + # delete modules that have been loaded from tmpdir + for name, mod in list(sys.modules.items()): + if mod: + fn = getattr(mod, '__file__', None) + if fn and fn.startswith(str(self.tmpdir)): + del sys.modules[name] def getreportrecorder(self, obj): if isinstance(obj, py._com.Registry): --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,11 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* fix issue48 and issue59: raise an Error if the module + from an imported test file does not seem to come from + the filepath - avoids "same-name" confusion that has + been reported repeatedly + * merged Ronny's nose-compatibility hacks: now nose-style setup_module() and setup() functions are supported --- a/testing/path/test_local.py +++ b/testing/path/test_local.py @@ -336,6 +336,27 @@ class TestImport: from xxxpackage import module1 assert module1 is mod1 + def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir): + name = 'pointsback123' + ModuleType = type(py.std.sys) + p = tmpdir.ensure(name + '.py') + for ending in ('.pyc', '$py.class', '.pyo'): + mod = ModuleType(name) + pseudopath = tmpdir.ensure(name+ending) + mod.__file__ = str(pseudopath) + monkeypatch.setitem(sys.modules, name, mod) + newmod = p.pyimport() + assert mod == newmod + monkeypatch.undo() + mod = ModuleType(name) + pseudopath = tmpdir.ensure(name+"123.py") + mod.__file__ = str(pseudopath) + monkeypatch.setitem(sys.modules, name, mod) + excinfo = py.test.raises(EnvironmentError, "p.pyimport()") + s = str(excinfo.value) + assert "mismatch" in s + assert name+"123" in s + def test_pypkgdir(tmpdir): pkg = tmpdir.ensure('pkg1', dir=1) pkg.ensure("__init__.py") --- a/_py/path/local.py +++ b/_py/path/local.py @@ -516,7 +516,16 @@ class LocalPath(FSBase): self._prependsyspath(self.dirpath()) modname = self.purebasename mod = __import__(modname, None, None, ['__doc__']) - #self._module = mod + modfile = mod.__file__ + if modfile[-4:] in ('.pyc', '.pyo'): + modfile = modfile[:-1] + elif modfile.endswith('$py.class'): + modfile = modfile[:-9] + '.py' + if not self.samefile(modfile): + raise EnvironmentError("mismatch:\n" + "imported module %r\n" + "does not stem from %r\n" + "maybe __init__.py files are missing?" % (mod, str(self))) return mod else: try: From commits-noreply at bitbucket.org Wed Oct 28 20:39:57 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 28 Oct 2009 19:39:57 +0000 (UTC) Subject: [py-svn] py-trunk commit 8aafd94741a0: fix three python3 issues Message-ID: <20091028193957.372E67EF64@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256758784 -3600 # Node ID 8aafd94741a023b19993ab4098efb0991a161a5b # Parent 78979cd0cdecadd5d10eda31f755eb950bf551f3 fix three python3 issues --- a/_py/test/plugin/pytest_mark.py +++ b/_py/test/plugin/pytest_mark.py @@ -129,7 +129,9 @@ def pytest_pycollect_makeitem(__multical if isinstance(item, py.test.collect.Function): cls = collector.getparent(py.test.collect.Class) mod = collector.getparent(py.test.collect.Module) - func = getattr(item.obj, 'im_func', item.obj) + func = item.obj + func = getattr(func, '__func__', func) # py3 + func = getattr(func, 'im_func', func) # py2 for parent in [x for x in (mod, cls) if x]: marker = getattr(parent.obj, 'pytestmark', None) if isinstance(marker, MarkerDecorator): --- a/_py/test/plugin/pytest_hooklog.py +++ b/_py/test/plugin/pytest_hooklog.py @@ -29,3 +29,5 @@ def pytest_unconfigure(config): del config.hook.__dict__['_performcall'] except KeyError: pass + else: + config._hooklogfile.close() --- a/_py/test/plugin/pytest_terminal.py +++ b/_py/test/plugin/pytest_terminal.py @@ -272,7 +272,7 @@ class TerminalReporter: items = self.config.pluginmanager._name2plugin.items() for name, plugin in items: repr_plugin = repr(plugin) - fullwidth = getattr(self._tw, 'fullwidth', sys.maxint) + fullwidth = getattr(self._tw, 'fullwidth', 65000) if len(repr_plugin)+26 > fullwidth: repr_plugin = repr_plugin[:(fullwidth-30)] + '...' self.write_line(" %-20s: %s" %(name, repr_plugin)) From commits-noreply at bitbucket.org Wed Oct 28 22:01:25 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 28 Oct 2009 21:01:25 +0000 (UTC) Subject: [py-svn] py-trunk commit d47e03af77c2: fix a test-import issue occuring when there is a second 'testing' directory in PYTHONPATH or so. Message-ID: <20091028210125.61CFE7EF68@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256762006 -3600 # Node ID d47e03af77c208ac5957482a8fb06dbcc933ae90 # Parent 8aafd94741a023b19993ab4098efb0991a161a5b fix a test-import issue occuring when there is a second 'testing' directory in PYTHONPATH or so. --- a/testing/pytest/dist/test_mypickle.py +++ b/testing/pytest/dist/test_mypickle.py @@ -120,6 +120,11 @@ class TestPickleChannelFunctional: def setup_class(cls): cls.gw = execnet.PopenGateway() cls.gw.remote_init_threads(5) + # we need the remote test code to import + # the same test module here + cls.gw.remote_exec( + "import py ; py.path.local(%r).pyimport()" %(__file__) + ) def test_popen_send_instance(self): channel = self.gw.remote_exec(""" From commits-noreply at bitbucket.org Wed Oct 28 22:01:25 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 28 Oct 2009 21:01:25 +0000 (UTC) Subject: [py-svn] py-trunk commit e8d9ef867c3f: remove unnecessary builtin directory in favour of a single file Message-ID: <20091028210125.9D6957EF6A@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256763638 -3600 # Node ID e8d9ef867c3f2a4bdf1dd8934f15ed99da8b7fba # Parent d47e03af77c208ac5957482a8fb06dbcc933ae90 remove unnecessary builtin directory in favour of a single file --- a/_py/builtin/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -""" backports and additions of builtins """ - --- a/py/__init__.py +++ b/py/__init__.py @@ -121,26 +121,26 @@ _py.apipkg.initpkg(__name__, dict( # backports and additions of builtins builtin = { '__doc__' : '_py.builtin:__doc__', - 'enumerate' : '_py.builtin.builtin24:enumerate', - 'reversed' : '_py.builtin.builtin24:reversed', - 'sorted' : '_py.builtin.builtin24:sorted', - 'set' : '_py.builtin.builtin24:set', - 'frozenset' : '_py.builtin.builtin24:frozenset', - 'BaseException' : '_py.builtin.builtin25:BaseException', - 'GeneratorExit' : '_py.builtin.builtin25:GeneratorExit', - 'print_' : '_py.builtin.builtin31:print_', - '_reraise' : '_py.builtin.builtin31:_reraise', - '_tryimport' : '_py.builtin.builtin31:_tryimport', - 'exec_' : '_py.builtin.builtin31:exec_', - '_basestring' : '_py.builtin.builtin31:_basestring', - '_totext' : '_py.builtin.builtin31:_totext', - '_isbytes' : '_py.builtin.builtin31:_isbytes', - '_istext' : '_py.builtin.builtin31:_istext', - '_getimself' : '_py.builtin.builtin31:_getimself', - '_getfuncdict' : '_py.builtin.builtin31:_getfuncdict', - 'builtins' : '_py.builtin.builtin31:builtins', - 'execfile' : '_py.builtin.builtin31:execfile', - 'callable' : '_py.builtin.builtin31:callable', + 'enumerate' : '_py.builtin:enumerate', + 'reversed' : '_py.builtin:reversed', + 'sorted' : '_py.builtin:sorted', + 'set' : '_py.builtin:set', + 'frozenset' : '_py.builtin:frozenset', + 'BaseException' : '_py.builtin:BaseException', + 'GeneratorExit' : '_py.builtin:GeneratorExit', + 'print_' : '_py.builtin:print_', + '_reraise' : '_py.builtin:_reraise', + '_tryimport' : '_py.builtin:_tryimport', + 'exec_' : '_py.builtin:exec_', + '_basestring' : '_py.builtin:_basestring', + '_totext' : '_py.builtin:_totext', + '_isbytes' : '_py.builtin:_isbytes', + '_istext' : '_py.builtin:_istext', + '_getimself' : '_py.builtin:_getimself', + '_getfuncdict' : '_py.builtin:_getfuncdict', + 'builtins' : '_py.builtin:builtins', + 'execfile' : '_py.builtin:execfile', + 'callable' : '_py.builtin:callable', }, # input-output helping --- a/_py/builtin/builtin25.py +++ /dev/null @@ -1,15 +0,0 @@ - -try: - BaseException = BaseException -except NameError: - BaseException = Exception - -try: - GeneratorExit = GeneratorExit -except NameError: - class GeneratorExit(Exception): - """ This exception is never raised, it is there to make it possible to - write code compatible with CPython 2.5 even in lower CPython - versions.""" - pass - GeneratorExit.__module__ = 'exceptions' --- a/_py/builtin/builtin31.py +++ /dev/null @@ -1,117 +0,0 @@ -import py -import sys - -if sys.version_info >= (3, 0): - exec ("print_ = print ; exec_=exec") - import builtins - - # some backward compatibility helpers - _basestring = str - def _totext(obj, encoding): - if isinstance(obj, bytes): - obj = obj.decode(encoding) - elif not isinstance(obj, str): - obj = str(obj) - return obj - - def _isbytes(x): - return isinstance(x, bytes) - def _istext(x): - return isinstance(x, str) - - def _getimself(function): - return getattr(function, '__self__', None) - - def _getfuncdict(function): - return getattr(function, "__dict__", None) - - def execfile(fn, globs=None, locs=None): - if globs is None: - back = sys._getframe(1) - globs = back.f_globals - locs = back.f_locals - del back - elif locs is None: - locs = globs - fp = open(fn, "rb") - try: - source = fp.read() - finally: - fp.close() - co = compile(source, fn, "exec", dont_inherit=True) - exec_(co, globs, locs) - - def callable(obj): - return hasattr(obj, "__call__") - -else: - import __builtin__ as builtins - _totext = unicode - _basestring = basestring - execfile = execfile - callable = callable - def _isbytes(x): - return isinstance(x, str) - def _istext(x): - return isinstance(x, unicode) - - def _getimself(function): - return getattr(function, 'im_self', None) - - def _getfuncdict(function): - return getattr(function, "__dict__", None) - - def print_(*args, **kwargs): - """ minimal backport of py3k print statement. """ - sep = ' ' - if 'sep' in kwargs: - sep = kwargs.pop('sep') - end = '\n' - if 'end' in kwargs: - end = kwargs.pop('end') - file = 'file' in kwargs and kwargs.pop('file') or sys.stdout - if kwargs: - args = ", ".join([str(x) for x in kwargs]) - raise TypeError("invalid keyword arguments: %s" % args) - at_start = True - for x in args: - if not at_start: - file.write(sep) - file.write(str(x)) - at_start = False - file.write(end) - - def exec_(obj, globals=None, locals=None): - """ minimal backport of py3k exec statement. """ - if globals is None: - frame = sys._getframe(1) - globals = frame.f_globals - if locals is None: - locals = frame.f_locals - elif locals is None: - locals = globals - exec2(obj, globals, locals) - -if sys.version_info >= (3,0): - exec (""" -def _reraise(cls, val, tb): - assert hasattr(val, '__traceback__') - raise val -""") -else: - exec (""" -def _reraise(cls, val, tb): - raise cls, val, tb -def exec2(obj, globals, locals): - exec obj in globals, locals -""") - -def _tryimport(*names): - """ return the first successfully imported module. """ - assert names - for name in names: - try: - return __import__(name, None, None, '__doc__') - except ImportError: - excinfo = sys.exc_info() - py.builtin._reraise(*excinfo) --- /dev/null +++ b/_py/builtin.py @@ -0,0 +1,203 @@ +import sys + +try: + reversed = reversed +except NameError: + def reversed(sequence): + """reversed(sequence) -> reverse iterator over values of the sequence + + Return a reverse iterator + """ + if hasattr(sequence, '__reversed__'): + return sequence.__reversed__() + if not hasattr(sequence, '__getitem__'): + raise TypeError("argument to reversed() must be a sequence") + return reversed_iterator(sequence) + + class reversed_iterator(object): + + def __init__(self, seq): + self.seq = seq + self.remaining = len(seq) + + def __iter__(self): + return self + + def next(self): + i = self.remaining + if i > 0: + i -= 1 + item = self.seq[i] + self.remaining = i + return item + raise StopIteration + + def __length_hint__(self): + return self.remaining + +try: + sorted = sorted +except NameError: + builtin_cmp = cmp # need to use cmp as keyword arg + + def sorted(iterable, cmp=None, key=None, reverse=0): + use_cmp = None + if key is not None: + if cmp is None: + def use_cmp(x, y): + return builtin_cmp(x[0], y[0]) + else: + def use_cmp(x, y): + return cmp(x[0], y[0]) + l = [(key(element), element) for element in iterable] + else: + if cmp is not None: + use_cmp = cmp + l = list(iterable) + if use_cmp is not None: + l.sort(use_cmp) + else: + l.sort() + if reverse: + l.reverse() + if key is not None: + return [element for (_, element) in l] + return l + +try: + set, frozenset = set, frozenset +except NameError: + from sets import set, frozenset + +# pass through +enumerate = enumerate + +try: + BaseException = BaseException +except NameError: + BaseException = Exception + +try: + GeneratorExit = GeneratorExit +except NameError: + class GeneratorExit(Exception): + """ This exception is never raised, it is there to make it possible to + write code compatible with CPython 2.5 even in lower CPython + versions.""" + pass + GeneratorExit.__module__ = 'exceptions' + +if sys.version_info >= (3, 0): + exec ("print_ = print ; exec_=exec") + import builtins + + # some backward compatibility helpers + _basestring = str + def _totext(obj, encoding): + if isinstance(obj, bytes): + obj = obj.decode(encoding) + elif not isinstance(obj, str): + obj = str(obj) + return obj + + def _isbytes(x): + return isinstance(x, bytes) + def _istext(x): + return isinstance(x, str) + + def _getimself(function): + return getattr(function, '__self__', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def execfile(fn, globs=None, locs=None): + if globs is None: + back = sys._getframe(1) + globs = back.f_globals + locs = back.f_locals + del back + elif locs is None: + locs = globs + fp = open(fn, "rb") + try: + source = fp.read() + finally: + fp.close() + co = compile(source, fn, "exec", dont_inherit=True) + exec_(co, globs, locs) + + def callable(obj): + return hasattr(obj, "__call__") + +else: + import __builtin__ as builtins + _totext = unicode + _basestring = basestring + execfile = execfile + callable = callable + def _isbytes(x): + return isinstance(x, str) + def _istext(x): + return isinstance(x, unicode) + + def _getimself(function): + return getattr(function, 'im_self', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def print_(*args, **kwargs): + """ minimal backport of py3k print statement. """ + sep = ' ' + if 'sep' in kwargs: + sep = kwargs.pop('sep') + end = '\n' + if 'end' in kwargs: + end = kwargs.pop('end') + file = 'file' in kwargs and kwargs.pop('file') or sys.stdout + if kwargs: + args = ", ".join([str(x) for x in kwargs]) + raise TypeError("invalid keyword arguments: %s" % args) + at_start = True + for x in args: + if not at_start: + file.write(sep) + file.write(str(x)) + at_start = False + file.write(end) + + def exec_(obj, globals=None, locals=None): + """ minimal backport of py3k exec statement. """ + if globals is None: + frame = sys._getframe(1) + globals = frame.f_globals + if locals is None: + locals = frame.f_locals + elif locals is None: + locals = globals + exec2(obj, globals, locals) + +if sys.version_info >= (3,0): + exec (""" +def _reraise(cls, val, tb): + assert hasattr(val, '__traceback__') + raise val +""") +else: + exec (""" +def _reraise(cls, val, tb): + raise cls, val, tb +def exec2(obj, globals, locals): + exec obj in globals, locals +""") + +def _tryimport(*names): + """ return the first successfully imported module. """ + assert names + for name in names: + try: + return __import__(name, None, None, '__doc__') + except ImportError: + excinfo = sys.exc_info() + _reraise(*excinfo) --- a/_py/builtin/builtin24.py +++ /dev/null @@ -1,71 +0,0 @@ -try: - reversed = reversed -except NameError: - def reversed(sequence): - """reversed(sequence) -> reverse iterator over values of the sequence - - Return a reverse iterator - """ - if hasattr(sequence, '__reversed__'): - return sequence.__reversed__() - if not hasattr(sequence, '__getitem__'): - raise TypeError("argument to reversed() must be a sequence") - return reversed_iterator(sequence) - - class reversed_iterator(object): - - def __init__(self, seq): - self.seq = seq - self.remaining = len(seq) - - def __iter__(self): - return self - - def next(self): - i = self.remaining - if i > 0: - i -= 1 - item = self.seq[i] - self.remaining = i - return item - raise StopIteration - - def __length_hint__(self): - return self.remaining - -try: - sorted = sorted -except NameError: - builtin_cmp = cmp # need to use cmp as keyword arg - - def sorted(iterable, cmp=None, key=None, reverse=0): - use_cmp = None - if key is not None: - if cmp is None: - def use_cmp(x, y): - return builtin_cmp(x[0], y[0]) - else: - def use_cmp(x, y): - return cmp(x[0], y[0]) - l = [(key(element), element) for element in iterable] - else: - if cmp is not None: - use_cmp = cmp - l = list(iterable) - if use_cmp is not None: - l.sort(use_cmp) - else: - l.sort() - if reverse: - l.reverse() - if key is not None: - return [element for (_, element) in l] - return l - -try: - set, frozenset = set, frozenset -except NameError: - from sets import set, frozenset - -# pass through -enumerate = enumerate From commits-noreply at bitbucket.org Thu Oct 29 11:31:59 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 10:31:59 +0000 (UTC) Subject: [py-svn] apipkg commit d833d467838c: fix relative imports for __doc__ reading and for deeper hierarchies Message-ID: <20091029103159.23E3E7EFC5@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1256807193 -3600 # Node ID d833d467838c8aaf2fe503a05e9fa7248f92801f # Parent d53b2f585f67404894285671874827e0232a26bb fix relative imports for __doc__ reading and for deeper hierarchies --- a/test_apipkg.py +++ b/test_apipkg.py @@ -89,15 +89,21 @@ class TestScenarios: pkgdir = tmpdir.mkdir("mymodule") pkgdir.join('__init__.py').write(py.code.Source(""" import apipkg - apipkg.initpkg(__name__, { - 'x': '.submod:x' + apipkg.initpkg(__name__, exportdefs={ + '__doc__': '.submod:maindoc', + 'x': '.submod:x', + 'y': { + 'z': '.submod:x' + }, }) """)) - pkgdir.join('submod.py').write("x=3\n") + pkgdir.join('submod.py').write("x=3\nmaindoc='hello'") monkeypatch.syspath_prepend(tmpdir) import mymodule assert isinstance(mymodule, apipkg.ApiModule) assert mymodule.x == 3 + assert mymodule.__doc__ == 'hello' + assert mymodule.y.z == 3 def xtest_nested_absolute_imports(): import email --- a/apipkg.py +++ b/apipkg.py @@ -11,53 +11,49 @@ from types import ModuleType __version__ = "1.0b2" def initpkg(pkgname, exportdefs): - """ initialize given package from the export definitions. - replace it in sys.modules - """ - mod = ApiModule(pkgname, exportdefs) + """ initialize given package from the export definitions. """ + mod = ApiModule(pkgname, exportdefs, implprefix=pkgname) oldmod = sys.modules[pkgname] mod.__file__ = getattr(oldmod, '__file__', None) mod.__version__ = getattr(oldmod, '__version__', None) mod.__path__ = getattr(oldmod, '__path__', None) sys.modules[pkgname] = mod -def importobj(importspec): - """ return object specified by importspec.""" - modpath, attrname = importspec.split(":") +def importobj(modpath, attrname): module = __import__(modpath, None, None, ['__doc__']) return getattr(module, attrname) class ApiModule(ModuleType): - def __init__(self, name, importspec, parent=None): + def __init__(self, name, importspec, implprefix=None): self.__name__ = name self.__all__ = list(importspec) self.__map__ = {} + self.__implprefix__ = implprefix or name for name, importspec in importspec.items(): if isinstance(importspec, dict): - package = '%s.%s'%(self.__name__, name) - apimod = ApiModule(package, importspec, parent=self) - sys.modules[package] = apimod + subname = '%s.%s'%(self.__name__, name) + apimod = ApiModule(subname, importspec, implprefix) + sys.modules[subname] = apimod setattr(self, name, apimod) else: - if not importspec.count(":") == 1: - raise ValueError("invalid importspec %r" % (importspec,)) + modpath, attrname = importspec.split(':') + if modpath[0] == '.': + modpath = implprefix + modpath if name == '__doc__': - self.__doc__ = importobj(importspec) + self.__doc__ = importobj(modpath, attrname) else: - if importspec[0] == '.': - importspec = self.__name__ + importspec - self.__map__[name] = importspec + self.__map__[name] = (modpath, attrname) def __repr__(self): return '' % (self.__name__,) def __getattr__(self, name): try: - importspec = self.__map__.pop(name) + modpath, attrname = self.__map__.pop(name) except KeyError: raise AttributeError(name) else: - result = importobj(importspec) + result = importobj(modpath, attrname) setattr(self, name, result) return result From commits-noreply at bitbucket.org Thu Oct 29 11:32:00 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 10:32:00 +0000 (UTC) Subject: [py-svn] apipkg commit 40bc1c4ff6c5: unhide import errors Message-ID: <20091029103200.DF89E7EFE0@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1256808699 -3600 # Node ID 40bc1c4ff6c5cbd8b64b6489551f7ca56e4ac11a # Parent d833d467838c8aaf2fe503a05e9fa7248f92801f unhide import errors --- a/test_apipkg.py +++ b/test_apipkg.py @@ -190,3 +190,21 @@ def test_name_attribute(): }) assert api.__name__ == 'name_test' assert api.subpkg.__name__ == 'name_test.subpkg' + +def test_error_loading_one_element(monkeypatch, tmpdir): + pkgdir = tmpdir.mkdir("errorloading1") + pkgdir.join('__init__.py').write(py.code.Source(""" + import apipkg + apipkg.initpkg(__name__, exportdefs={ + 'x': '.notexists:x', + 'y': '.submod:y' + }, + ) + """)) + pkgdir.join('submod.py').write("y=0") + monkeypatch.syspath_prepend(tmpdir) + import errorloading1 + assert isinstance(errorloading1, apipkg.ApiModule) + assert errorloading1.y == 0 + py.test.raises(ImportError, 'errorloading1.x') + py.test.raises(ImportError, 'errorloading1.x') --- a/apipkg.py +++ b/apipkg.py @@ -49,12 +49,13 @@ class ApiModule(ModuleType): def __getattr__(self, name): try: - modpath, attrname = self.__map__.pop(name) + modpath, attrname = self.__map__[name] except KeyError: raise AttributeError(name) else: result = importobj(modpath, attrname) setattr(self, name, result) + del self.__map__[name] return result def __dict__(self): From commits-noreply at bitbucket.org Thu Oct 29 12:28:30 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 11:28:30 +0000 (UTC) Subject: [py-svn] py-trunk commit e92da679b6f9: use new apipkg version Message-ID: <20091029112830.5C7CA7EFDE@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256813232 -3600 # Node ID e92da679b6f90839a45a02c3c9fae8f8af9de16c # Parent e8d9ef867c3f2a4bdf1dd8934f15ed99da8b7fba use new apipkg version --- a/_py/apipkg.py +++ b/_py/apipkg.py @@ -11,54 +11,51 @@ from types import ModuleType __version__ = "1.0b2" def initpkg(pkgname, exportdefs): - """ initialize given package from the export definitions. - replace it in sys.modules - """ - mod = ApiModule(pkgname, exportdefs) + """ initialize given package from the export definitions. """ + mod = ApiModule(pkgname, exportdefs, implprefix=pkgname) oldmod = sys.modules[pkgname] mod.__file__ = getattr(oldmod, '__file__', None) mod.__version__ = getattr(oldmod, '__version__', None) mod.__path__ = getattr(oldmod, '__path__', None) sys.modules[pkgname] = mod -def importobj(importspec): - """ return object specified by importspec.""" - modpath, attrname = importspec.split(":") +def importobj(modpath, attrname): module = __import__(modpath, None, None, ['__doc__']) return getattr(module, attrname) class ApiModule(ModuleType): - def __init__(self, name, importspec, parent=None): + def __init__(self, name, importspec, implprefix=None): self.__name__ = name self.__all__ = list(importspec) self.__map__ = {} + self.__implprefix__ = implprefix or name for name, importspec in importspec.items(): if isinstance(importspec, dict): - package = '%s.%s'%(self.__name__, name) - apimod = ApiModule(package, importspec, parent=self) - sys.modules[package] = apimod + subname = '%s.%s'%(self.__name__, name) + apimod = ApiModule(subname, importspec, implprefix) + sys.modules[subname] = apimod setattr(self, name, apimod) else: - if not importspec.count(":") == 1: - raise ValueError("invalid importspec %r" % (importspec,)) + modpath, attrname = importspec.split(':') + if modpath[0] == '.': + modpath = implprefix + modpath if name == '__doc__': - self.__doc__ = importobj(importspec) + self.__doc__ = importobj(modpath, attrname) else: - if importspec[0] == '.': - importspec = self.__name__ + importspec - self.__map__[name] = importspec + self.__map__[name] = (modpath, attrname) def __repr__(self): return '' % (self.__name__,) def __getattr__(self, name): try: - importspec = self.__map__.pop(name) + modpath, attrname = self.__map__[name] except KeyError: raise AttributeError(name) else: - result = importobj(importspec) + result = importobj(modpath, attrname) setattr(self, name, result) + del self.__map__[name] return result def __dict__(self): From commits-noreply at bitbucket.org Thu Oct 29 12:28:32 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 11:28:32 +0000 (UTC) Subject: [py-svn] py-trunk commit 88cd8639f646: remove pyrest and _py/rest before first 1.1. release Message-ID: <20091029112832.484AE7EFE0@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256815547 -3600 # Node ID 88cd8639f646211cf2e0b60020fae3d684718211 # Parent e92da679b6f90839a45a02c3c9fae8f8af9de16c remove pyrest and _py/rest before first 1.1. release --- a/py/bin/py.rest +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pyrest() --- a/_py/rest/convert.py +++ /dev/null @@ -1,163 +0,0 @@ -import py - -from _py.process.cmdexec import ExecutionFailed -# utility functions to convert between various formats - -format_to_dotargument = {"png": "png", - "eps": "ps", - "ps": "ps", - "pdf": "ps", - } - -def ps2eps(ps): - # XXX write a pure python version - if not py.path.local.sysfind("ps2epsi") and \ - not py.path.local.sysfind("ps2eps"): - raise SystemExit("neither ps2eps nor ps2epsi found") - try: - eps = ps.new(ext=".eps") - py.process.cmdexec('ps2epsi "%s" "%s"' % (ps, eps)) - except ExecutionFailed: - py.process.cmdexec('ps2eps -l -f "%s"' % ps) - -def ps2pdf(ps, compat_level="1.2"): - if not py.path.local.sysfind("gs"): - raise SystemExit("ERROR: gs not found") - pdf = ps.new(ext=".pdf") - options = dict(OPTIONS="-dSAFER -dCompatibilityLevel=%s" % compat_level, - infile=ps, outfile=pdf) - cmd = ('gs %(OPTIONS)s -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite ' - '"-sOutputFile=%(outfile)s" %(OPTIONS)s -c .setpdfwrite ' - '-f "%(infile)s"') % options - py.process.cmdexec(cmd) - return pdf - -def eps2pdf(eps): - # XXX write a pure python version - if not py.path.local.sysfind("epstopdf"): - raise SystemExit("ERROR: epstopdf not found") - py.process.cmdexec('epstopdf "%s"' % eps) - -def dvi2eps(dvi, dest=None): - if dest is None: - dest = eps.new(ext=".eps") - command = 'dvips -q -E -n 1 -D 600 -p 1 -o "%s" "%s"' % (dest, dvi) - if not py.path.local.sysfind("dvips"): - raise SystemExit("ERROR: dvips not found") - py.process.cmdexec(command) - -def convert_dot(fn, new_extension): - if not py.path.local.sysfind("dot"): - raise SystemExit("ERROR: dot not found") - result = fn.new(ext=new_extension) - print(result) - arg = "-T%s" % (format_to_dotargument[new_extension], ) - py.std.os.system('dot "%s" "%s" > "%s"' % (arg, fn, result)) - if new_extension == "eps": - ps = result.new(ext="ps") - result.move(ps) - ps2eps(ps) - ps.remove() - elif new_extension == "pdf": - # convert to eps file first, to get the bounding box right - eps = result.new(ext="eps") - ps = result.new(ext="ps") - result.move(ps) - ps2eps(ps) - eps2pdf(eps) - ps.remove() - eps.remove() - return result - - -class latexformula2png(object): - def __init__(self, formula, dest, temp=None): - self.formula = formula - try: - import Image - self.Image = Image - self.scale = 2 # create a larger image - self.upscale = 5 # create the image upscale times larger, then scale it down - except ImportError: - self.scale = 2 - self.upscale = 1 - self.Image = None - self.output_format = ('pngmono', 'pnggray', 'pngalpha')[2] - if temp is None: - temp = py.test.ensuretemp("latexformula") - self.temp = temp - self.latex = self.temp.join('formula.tex') - self.dvi = self.temp.join('formula.dvi') - self.eps = self.temp.join('formula.eps') - self.png = self.temp.join('formula.png') - self.saveas(dest) - - def saveas(self, dest): - self.gen_latex() - self.gen_dvi() - dvi2eps(self.dvi, self.eps) - self.gen_png() - self.scale_image() - self.png.copy(dest) - - def gen_latex(self): - self.latex.write (""" - \\documentclass{article} - \\pagestyle{empty} - \\begin{document} - - %s - \\pagebreak - - \\end{document} - """ % (self.formula)) - - def gen_dvi(self): - origdir = py.path.local() - self.temp.chdir() - py.process.cmdexec('latex "%s"' % (self.latex)) - origdir.chdir() - - def gen_png(self): - tempdir = py.path.local.mkdtemp() - - re_bbox = py.std.re.compile('%%BoundingBox:\s*(\d+) (\d+) (\d+) (\d+)') - eps = self.eps.read() - x1, y1, x2, y2 = [int(i) for i in re_bbox.search(eps).groups()] - X = x2 - x1 + 2 - Y = y2 - y1 + 2 - mx = -x1 - my = -y1 - ps = self.temp.join('temp.ps') - source = self.eps - ps.write(""" - 1 1 1 setrgbcolor - newpath - -1 -1 moveto - %(X)d -1 lineto - %(X)d %(Y)d lineto - -1 %(Y)d lineto - closepath - fill - %(mx)d %(my)d translate - 0 0 0 setrgbcolor - (%(source)s) run - - """ % locals()) - - sx = int((x2 - x1) * self.scale * self.upscale) - sy = int((y2 - y1) * self.scale * self.upscale) - res = 72 * self.scale * self.upscale - command = ('gs -q -g%dx%d -r%dx%d -sDEVICE=%s -sOutputFile="%s" ' - '-dNOPAUSE -dBATCH "%s"') % ( - sx, sy, res, res, self.output_format, self.png, ps) - py.process.cmdexec(command) - - def scale_image(self): - if self.Image is None: - return - image = self.Image.open(str(self.png)) - image.resize((image.size[0] / self.upscale, - image.size[1] / self.upscale), - self.Image.ANTIALIAS).save(str(self.png)) - --- a/_py/rest/latex.py +++ /dev/null @@ -1,154 +0,0 @@ -import py - -from _py.process.cmdexec import ExecutionFailed - -font_to_package = {"times": "times", "helvetica": "times", - "new century schoolbock": "newcent", "avant garde": "newcent", - "palatino": "palatino", - } -sans_serif_fonts = {"helvetica": True, - "avant garde": True, - } - - -def merge_files(pathlist, pagebreak=False): - if len(pathlist) == 1: - return pathlist[0].read() - sectnum = False - toc = False - result = [] - includes = {} - for path in pathlist: - lines = path.readlines() - for line in lines: - # prevent several table of contents - # and especially sectnum several times - if ".. contents::" in line: - if not toc: - toc = True - result.append(line) - elif ".. sectnum::" in line: - if not sectnum: - sectnum = True - result.append(line) - elif line.strip().startswith(".. include:: "): - #XXX slightly unsafe - inc = line.strip()[13:] - if inc not in includes: - includes[inc] = True - result.append(line) - else: - result.append(line) - if pagebreak: - result.append(".. raw:: latex \n\n \\newpage\n\n") - if pagebreak: - result.pop() #remove the last pagebreak again - return "".join(result) - -def create_stylesheet(options, path): - fill_in = {} - if "logo" in options: - fill_in["have_logo"] = "" - fill_in["logo"] = options["logo"] - else: - fill_in["have_logo"] = "%" - fill_in["logo"] = "" - if "font" in options: - font = options["font"].lower() - fill_in["font_package"] = font_to_package[font] - fill_in["specified_font"] = "" - fill_in["sans_serif"] = font not in sans_serif_fonts and "%" or "" - else: - fill_in["specified_font"] = "%" - fill_in["sans_serif"] = "%" - fill_in["font_package"] = "" - if 'toc_depth' in options: - fill_in["have_tocdepth"] = "" - fill_in["toc_depth"] = options["toc_depth"] - else: - fill_in["have_tocdepth"] = "%" - fill_in["toc_depth"] = "" - fill_in["heading"] = options.get("heading", "") - template_file = path.join("rest.sty.template") - if not template_file.check(): - template_file = py.path.local(__file__).dirpath("rest.sty.template") - return template_file.read() % fill_in - -def process_configfile(configfile, debug=False): - old = py.path.local() - py.path.local(configfile).dirpath().chdir() - configfile = py.path.local(configfile) - path = configfile.dirpath() - configfile_dic = {} - py.std.sys.path.insert(0, str(path)) - py.builtin.execfile(str(configfile), configfile_dic) - pagebreak = configfile_dic.get("pagebreak", False) - rest_sources = [py.path.local(p) - for p in configfile_dic['rest_sources']] - rest = configfile.new(ext='txt') - if len(rest_sources) > 1: - assert rest not in rest_sources - content = merge_files(rest_sources, pagebreak) - if len(rest_sources) > 1: - rest.write(content) - sty = configfile.new(ext='sty') - content = create_stylesheet(configfile_dic, path) - sty.write(content) - rest_options = None - if 'rest_options' in configfile_dic: - rest_options = configfile_dic['rest_options'] - process_rest_file(rest, sty.basename, debug, rest_options) - #cleanup: - if not debug: - sty.remove() - if rest not in rest_sources: - rest.remove() - old.chdir() - -def process_rest_file(restfile, stylesheet=None, debug=False, rest_options=None): - from docutils.core import publish_cmdline - if not py.path.local.sysfind("pdflatex"): - raise SystemExit("ERROR: pdflatex not found") - old = py.path.local() - f = py.path.local(restfile) - path = f.dirpath() - path.chdir() - pdf = f.new(ext="pdf") - if pdf.check(): - pdf.remove() - tex = f.new(ext="tex").basename - options = [f, "--input-encoding=latin-1", "--graphicx-option=auto", - "--traceback"] - if stylesheet is not None: - sty = path.join(stylesheet) - if sty.check(): - options.append('--stylesheet=%s' % (sty.relto(f.dirpath()), )) - options.append(f.new(basename=tex)) - options = map(str, options) - if rest_options is not None: - options.extend(rest_options) - publish_cmdline(writer_name='latex', argv=options) - i = 0 - while i < 10: # there should never be as many as five reruns, but to be sure - try: - latexoutput = py.process.cmdexec('pdflatex "%s"' % (tex, )) - except ExecutionFailed: - e = py.std.sys.exc_info()[1] - print("ERROR: pdflatex execution failed") - print("pdflatex stdout:") - print(e.out) - print("pdflatex stderr:") - print(e.err) - raise SystemExit - if debug: - print(latexoutput) - if py.std.re.search("LaTeX Warning:.*Rerun", latexoutput) is None: - break - i += 1 - - old.chdir() - #cleanup: - if not debug: - for ext in "log aux tex out".split(): - p = pdf.new(ext=ext) - p.remove() --- a/_py/rest/directive.py +++ /dev/null @@ -1,115 +0,0 @@ -# XXX this file is messy since it tries to deal with several docutils versions -import py - -from _py.rest.convert import convert_dot, latexformula2png - -import sys -import docutils -from docutils import nodes -from docutils.parsers.rst import directives, states, roles -from docutils.parsers.rst.directives import images - -if hasattr(images, "image"): - directives_are_functions = True -else: - directives_are_functions = False - -try: - from docutils.utils import unescape # docutils version > 0.3.5 -except ImportError: - from docutils.parsers.rst.states import unescape # docutils 0.3.5 - -if not directives_are_functions: - ImageClass = images.Image - -else: - class ImageClass(object): - option_spec = images.image.options - def run(self): - return images.image('image', - self.arguments, - self.options, - self.content, - self.lineno, - self.content_offset, - self.block_text, - self.state, - self.state_machine) - - -backend_to_image_format = {"html": "png", "latex": "pdf"} - -class GraphvizDirective(ImageClass): - def convert(self, fn, path): - path = py.path.local(path).dirpath() - dot = path.join(fn) - result = convert_dot(dot, backend_to_image_format[_backend]) - return result.relto(path) - - def run(self): - newname = self.convert(self.arguments[0], - self.state.document.settings._source) - text = self.block_text.replace("graphviz", "image", 1) - self.block_text = text.replace(self.arguments[0], newname, 1) - self.name = 'image' - self.arguments = [newname] - return ImageClass.run(self) - - def old_interface(self): - def f(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - for arg in "name arguments options content lineno " \ - "content_offset block_text state state_machine".split(): - setattr(self, arg, locals()[arg]) - return self.run() - f.arguments = (1, 0, 1) - f.options = self.option_spec - return f - - -_backend = None -def set_backend_and_register_directives(backend): - #XXX this is only used to work around the inflexibility of docutils: - # a directive does not know the target format - global _backend - _backend = backend - if not directives_are_functions: - directives.register_directive("graphviz", GraphvizDirective) - else: - directives.register_directive("graphviz", - GraphvizDirective().old_interface()) - roles.register_canonical_role("latexformula", latexformula_role) - -def latexformula_role(name, rawtext, text, lineno, inliner, - options={}, content=[]): - if _backend == 'latex': - options['format'] = 'latex' - return roles.raw_role(name, rawtext, text, lineno, inliner, - options, content) - else: - # XXX: make the place of the image directory configurable - sourcedir = py.path.local(inliner.document.settings._source).dirpath() - imagedir = sourcedir.join("img") - if not imagedir.check(): - imagedir.mkdir() - # create halfway senseful imagename: - # use hash of formula + alphanumeric characters of it - # could - imagename = "%s_%s.png" % ( - hash(text), "".join([c for c in text if c.isalnum()])) - image = imagedir.join(imagename) - latexformula2png(unescape(text, True), image) - imagenode = nodes.image(image.relto(sourcedir), uri=image.relto(sourcedir)) - return [imagenode], [] -latexformula_role.content = True -latexformula_role.options = {} - -def register_linkrole(role_name, callback): - def source_role(name, rawtext, text, lineno, inliner, options={}, - content=[]): - text, target = callback(name, text) - reference_node = nodes.reference(rawtext, text, name=text, refuri=target) - return [reference_node], [] - source_role.content = True - source_role.options = {} - roles.register_canonical_role(role_name, source_role) --- a/testing/rest/data/graphviz.txt +++ /dev/null @@ -1,7 +0,0 @@ -This tests the graphviz directive -================================= - -let's embed the cool graphviz example: - -.. graphviz:: example1.dot - :scale: 50 --- a/testing/pytest/plugin/test_pytest_restdoc.py +++ b/testing/pytest/plugin/test_pytest_restdoc.py @@ -45,7 +45,8 @@ class TestDoctest: def pytest_funcarg__testdir(self, request): testdir = request.getfuncargvalue("testdir") assert request.module.__name__ == __name__ - testdir.makepyfile(confrest="from _py.rest.resthtml import Project") + testdir.makepyfile(confrest= + "from _py.test.plugin.pytest_restdoc import Project") for p in testdir.plugins: if p == globals(): break --- a/_py/rest/resthtml.py +++ /dev/null @@ -1,87 +0,0 @@ -import py -import sys, os, traceback -import re - -if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): - def log(msg): - print(msg) -else: - def log(msg): - pass - -def convert_rest_html(source, source_path, stylesheet=None, encoding='latin1'): - from _py.rest import directive - """ return html latin1-encoded document for the given input. - source a ReST-string - sourcepath where to look for includes (basically) - stylesheet path (to be used if any) - """ - from docutils.core import publish_string - directive.set_backend_and_register_directives("html") - kwargs = { - 'stylesheet' : stylesheet, - 'stylesheet_path': None, - 'traceback' : 1, - 'embed_stylesheet': 0, - 'output_encoding' : encoding, - #'halt' : 0, # 'info', - 'halt_level' : 2, - } - # docutils uses os.getcwd() :-( - source_path = os.path.abspath(str(source_path)) - prevdir = os.getcwd() - try: - #os.chdir(os.path.dirname(source_path)) - return publish_string(source, source_path, writer_name='html', - settings_overrides=kwargs) - finally: - os.chdir(prevdir) - -def process(txtpath, encoding='latin1'): - """ process a textfile """ - log("processing %s" % txtpath) - assert txtpath.check(ext='.txt') - if isinstance(txtpath, py.path.svnwc): - txtpath = txtpath.localpath - htmlpath = txtpath.new(ext='.html') - #svninfopath = txtpath.localpath.new(ext='.svninfo') - - style = txtpath.dirpath('style.css') - if style.check(): - stylesheet = style.basename - else: - stylesheet = None - content = unicode(txtpath.read(), encoding) - doc = convert_rest_html(content, txtpath, stylesheet=stylesheet, encoding=encoding) - htmlpath.open('wb').write(doc) - #log("wrote %r" % htmlpath) - #if txtpath.check(svnwc=1, versioned=1): - # info = txtpath.info() - # svninfopath.dump(info) - -if sys.version_info > (3, 0): - def _uni(s): return s -else: - def _uni(s): - return unicode(s) - -rex1 = re.compile(r'.*(.*).*', re.MULTILINE | re.DOTALL) -rex2 = re.compile(r'.*
(.*)
.*', re.MULTILINE | re.DOTALL) - -def strip_html_header(string, encoding='utf8'): - """ return the content of the body-tag """ - uni = unicode(string, encoding) - for rex in rex1,rex2: - match = rex.search(uni) - if not match: - break - uni = match.group(1) - return uni - -class Project: # used for confrest.py files - def __init__(self, sourcepath): - self.sourcepath = sourcepath - def process(self, path): - return process(path) - def get_htmloutputpath(self, path): - return path.new(ext='html') --- a/testing/rest/test_htmlrest.py +++ /dev/null @@ -1,20 +0,0 @@ - -import py -from _py.rest import resthtml -from testing.rest.setup import getdata - -def setup_module(mod): - py.test.importorskip("docutils") - if not py.path.local.sysfind("gs") or \ - not py.path.local.sysfind("dot") or \ - not py.path.local.sysfind("latex"): - py.test.skip("ghostscript, graphviz and latex needed") - mod.datadir = getdata() - -def test_process_simple(): - # fallback test: only checks that no exception is raised - def rec(p): - return p.check(dotfile=0) - for x in datadir.visit("*.txt", rec=rec,): - yield resthtml.process, x - --- a/testing/rest/data/formula1.txt +++ /dev/null @@ -1,2 +0,0 @@ -this formula contains a fraction, that means it also contains a backslash: -:latexformula:`$\frac{x^2}{y^2}$` --- a/testing/rest/test_rst2pdf.py +++ /dev/null @@ -1,50 +0,0 @@ -import py -from _py.rest.latex import process_configfile, process_rest_file -from testing.rest.setup import getdata - -docutils = py.test.importorskip("docutils") - -def setup_module(mod): - if not py.path.local.sysfind("gs") or \ - not py.path.local.sysfind("dot") or \ - not py.path.local.sysfind("latex"): - py.test.skip("ghostscript, graphviz and latex needed") - mod.datadir = getdata() - -class TestRst2Pdf(object): - def _process_rest_file(self): - part2 = datadir.join("part1.txt") - pdf = part2.new(ext="pdf") - process_rest_file(part2) - assert pdf.check() - pdf.remove() - - def _process_configfile(self): - config = datadir.join("example.rst2pdfconfig") - pdf = config.new(ext="pdf") - tex = datadir.join('example.tex') - process_configfile(config, debug=True) - assert pdf.check() - assert tex.check() - texcontent = tex.read() - assert "Generated by" in texcontent - assert "Docutils" in texcontent - process_configfile(config, debug=False) - assert pdf.check() - assert not tex.check() - pdf.remove() - - def _process_all(self): - # fallback test: only checks that no exception is raised - def rec(p): - return p.check(dotfile=0) - - for x in datadir.visit("*.rst2pdfconfig", rec=rec): - process_configfile(x) - for x in datadir.visit("*.txt", rec=rec): - process_rest_file(x) - - def test_rst2pdf(self): - self._process_rest_file() - self._process_configfile() - self._process_all() --- a/testing/rest/data/example.rst2pdfconfig +++ /dev/null @@ -1,3 +0,0 @@ -rest_sources = ['part1.txt', 'part2.txt'] - -rest_options = ["--use-latex-toc", "--generator"] # generator is easy to test --- a/py/bin/win32/py.rest.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -python "%~dp0\..\py.rest" %* --- a/setup.py +++ b/setup.py @@ -31,14 +31,14 @@ def main(): platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], author='holger krekel, Guido Wesdorp, Carl Friedrich Bolz, Armin Rigo, Maciej Fijalkowski & others', author_email='holger at merlinux.eu', - entry_points={'console_scripts': ['py.cleanup = py.cmdline:pycleanup', - 'py.convert_unittest = py.cmdline:pyconvert_unittest', - 'py.countloc = py.cmdline:pycountloc', - 'py.lookup = py.cmdline:pylookup', - 'py.rest = py.cmdline:pyrest', - 'py.svnwcrevert = py.cmdline:pysvnwcrevert', - 'py.test = py.cmdline:pytest', - 'py.which = py.cmdline:pywhich']}, + entry_points={'console_scripts': [ + 'py.cleanup = py.cmdline:pycleanup', + 'py.convert_unittest = py.cmdline:pyconvert_unittest', + 'py.countloc = py.cmdline:pycountloc', + 'py.lookup = py.cmdline:pylookup', + 'py.svnwcrevert = py.cmdline:pysvnwcrevert', + 'py.test = py.cmdline:pytest', + 'py.which = py.cmdline:pywhich']}, classifiers=['Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', @@ -52,7 +52,6 @@ def main(): 'Programming Language :: Python'], packages=['py', '_py', - '_py.builtin', '_py.cmdline', '_py.code', '_py.compat', @@ -61,7 +60,6 @@ def main(): '_py.path', '_py.path.gateway', '_py.process', - '_py.rest', '_py.test', '_py.test.dist', '_py.test.looponfail', @@ -73,7 +71,6 @@ def main(): 'bin/py.convert_unittest', 'bin/py.countloc', 'bin/py.lookup', - 'bin/py.rest', 'bin/py.svnwcrevert', 'bin/py.test', 'bin/py.which', @@ -81,11 +78,9 @@ def main(): 'bin/win32/py.convert_unittest.cmd', 'bin/win32/py.countloc.cmd', 'bin/win32/py.lookup.cmd', - 'bin/win32/py.rest.cmd', 'bin/win32/py.svnwcrevert.cmd', 'bin/win32/py.test.cmd', 'bin/win32/py.which.cmd',], - '_py': ['rest/rest.sty.template']}, zip_safe=True, ) --- a/testing/rest/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -import py - -rootdir = py.path.local(__file__).dirpath() -mydatadir = py.path.local(__file__).dirpath('data') - -def getdata(): - rel = mydatadir.relto(rootdir) - tmpdir = py.test.ensuretemp(rel.replace(rootdir.sep, '_')) - mydatadir.copy(tmpdir) - return tmpdir - --- a/testing/rest/test_directive.py +++ /dev/null @@ -1,63 +0,0 @@ -import py - -docutils = py.test.importorskip("docutils") -from _py.rest import directive, resthtml -from _py.rest.latex import process_rest_file -from testing.rest.setup import getdata - -def setup_module(mod): - mod.datadir = getdata() - mod.testdir = py.test.ensuretemp("rest") - -class TestGraphviz(object): - def _graphviz_html(self): - if not py.path.local.sysfind("dot"): - py.test.skip("graphviz needed") - directive.set_backend_and_register_directives("html") - if not py.path.local.sysfind("svn"): - py.test.skip("svn needed") - txt = datadir.join("graphviz.txt") - html = txt.new(ext="html") - png = datadir.join("example1.png") - resthtml.process(txt) - assert html.check() - assert png.check() - html_content = html.read() - assert png.basename in html_content - html.remove() - png.remove() - - def _graphviz_pdf(self): - for exe in 'dot latex epstopdf ps2eps'.split(): - if not py.path.local.sysfind(exe): - py.test.skip("%r needed" %(exe,)) - - directive.set_backend_and_register_directives("latex") - txt = py.path.local(datadir.join("graphviz.txt")) - pdf = txt.new(ext="pdf") - dotpdf = datadir.join("example1.pdf") - process_rest_file(txt) - assert pdf.check() - assert dotpdf.check() - pdf.remove() - dotpdf.remove() - - def test_graphviz(self): - self._graphviz_html() - self._graphviz_pdf() - -def test_own_links(): - def callback(name, text): - assert name == "foo" - return "bar xyz", "http://codespeak.net/noclue" - directive.register_linkrole("foo", callback) - txt = testdir.join("link-role.txt") - txt.write(""" -:foo:`whatever` -""") - html = txt.new(ext="html") - resthtml.process(txt) - assert html.check() - htmlcontent = html.read() - assert "http://codespeak.net/noclue" in htmlcontent - assert "bar xyz" in htmlcontent --- a/testing/rest/data/part1.txt +++ /dev/null @@ -1,19 +0,0 @@ -.. contents:: - -This is the first part of the example rest file -=============================================== - - -some content. - -fancy subsection heading -------------------------- - -some more content. - -really stupid document ------------------------- - -we are all thankful that it ends now. - - --- a/testing/rest/test_convert.py +++ /dev/null @@ -1,30 +0,0 @@ -import py -from _py.rest.convert import convert_dot, latexformula2png -from testing.rest.setup import getdata - -def setup_module(mod): - required = 'gs', 'dot', 'latex', 'epstopdf', - for exe in required: - if not py.path.local.sysfind(exe): - py.test.skip("%r not found, required: %r" %(exe, required)) - mod.datadir = getdata() - -def test_convert_dot(): - # XXX not really clear that the result is valid pdf/eps - dot = datadir.join("example1.dot") - convert_dot(dot, "pdf") - pdf = dot.new(ext="pdf") - assert pdf.check() - pdf.remove() - convert_dot(dot, "eps") - eps = dot.new(ext="eps") - assert eps.check() - eps.remove() - -def test_latexformula(): - png = datadir.join("test.png") - formula = r'$$Entropy(T) = - \sum^{m}_{j=1} \frac{|T_j|}{|T|} \log \frac{|T_j|}{|T|}$$' - #does not crash - latexformula2png(formula, png) - assert png.check() - png.remove() --- a/_py/cmdline/pyrest.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python -""" -invoke - - py.rest filename1.txt directory - -to generate html files from ReST. - -It is also possible to generate pdf files using the --topdf option. - -http://docutils.sourceforge.net/docs/user/rst/quickref.html - -""" - -import os, sys -import py - -if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): - def log(msg): - print(msg) -else: - def log(msg): - pass - - -parser = py.std.optparse.OptionParser(usage=__doc__) -parser.add_option("--topdf", action="store_true", dest="topdf", default=False, - help="generate pdf files") -parser.add_option("--stylesheet", dest="stylesheet", default=None, - help="use specified latex style sheet") -parser.add_option("--debug", action="store_true", dest="debug", - default=False, - help="print debug output and don't delete files") - - -def main(): - try: - from _py.rest import directive, resthtml - from _py.rest.latex import process_rest_file, process_configfile - except ImportError: - e = sys.exc_info()[1] - print(str(e)) - sys.exit(1) - - (options, args) = parser.parse_args() - - if len(args) == 0: - filenames = [py.path.svnwc()] - else: - filenames = [py.path.svnwc(x) for x in args] - - if options.topdf: - directive.set_backend_and_register_directives("latex") - - for p in filenames: - if not p.check(): - log("path %s not found, ignoring" % p) - continue - def fil(p): - return p.check(fnmatch='*.txt', versioned=True) - def rec(p): - return p.check(dotfile=0) - if p.check(dir=1): - for x in p.visit(fil, rec): - resthtml.process(x) - elif p.check(file=1): - if p.ext == ".rst2pdfconfig": - directive.set_backend_and_register_directives("latex") - process_configfile(p, options.debug) - else: - if options.topdf: - cfg = p.new(ext=".rst2pdfconfig") - if cfg.check(): - print("using config file %s" % (cfg, )) - process_configfile(cfg, options.debug) - else: - process_rest_file(p.localpath, - options.stylesheet, - options.debug) - else: - resthtml.process(p) - --- a/doc/changelog.txt +++ b/doc/changelog.txt @@ -1,6 +1,11 @@ Changes between 1.0.2 and '1.1.0b1' ===================================== +* remove py.rest tool and internal namespace - it was + never really advertised and can still be used with + the old release if needed. If there is interest + it could be revived into its own tool i guess. + * fix issue48 and issue59: raise an Error if the module from an imported test file does not seem to come from the filepath - avoids "same-name" confusion that has --- a/_py/rest/rest.sty.template +++ /dev/null @@ -1,26 +0,0 @@ -\usepackage{fancyhdr} -\usepackage{lastpage} -\pagestyle{fancy} -\usepackage[pdftex]{graphicx} - -%(have_tocdepth)s\setcounter{tocdepth}{%(toc_depth)s} - -%(sans_serif)s\renewcommand{\familydefault}{\sfdefault} -%(specified_font)s\usepackage{%(font_package)s} -\lhead{ -\begin{tabular}{l} -\textbf{\Large %(heading)s}\tabularnewline -\thepage\ of \pageref{LastPage}, \today -\tabularnewline -\tabularnewline -\end{tabular} -} -\rhead{ -%(have_logo)s\includegraphics[height=4\baselineskip]{%(logo)s} -} -\cfoot{} -\addtolength{\headheight}{3\baselineskip} -\addtolength{\headheight}{0.61pt} -\setlength\parskip{\medskipamount} -\setlength\parindent{0pt} - --- a/doc/confrest.py +++ b/doc/confrest.py @@ -1,5 +1,6 @@ import py -from _py.rest.resthtml import convert_rest_html, strip_html_header + +from _py.test.plugin.pytest_restdoc import convert_rest_html, strip_html_header html = py.xml.html @@ -284,5 +285,3 @@ def relpath(p1, p2, sep=os.path.sep, bac if tolist_diff: return sep.join([back,]*(backcount-1) + tolist_diff) return sep.join([back,]*(backcount) + tolist[commonindex:]) - - --- a/testing/rest/data/formula.txt +++ /dev/null @@ -1,8 +0,0 @@ -Euklids proof about the infinitude of primes -============================================ - -If there were only a finite amount of primes then there would be a largest -prime :latexformula:`p`. However, the number :latexformula:`p! + 1` is not -divisible by any number :latexformula:`1, ..., p`. Therefore, a prime dividing -:latexformula:`p! + 1` has to be bigger than :latexformula:`p`. Therefore there -is an infinite number of primes. --- a/_py/test/plugin/pytest_restdoc.py +++ b/_py/test/plugin/pytest_restdoc.py @@ -2,7 +2,7 @@ perform ReST syntax, local and remote reference tests on .rst/.txt files. """ import py -import sys +import sys, os, re def pytest_addoption(parser): group = parser.getgroup("ReST", "ReST documentation check options") @@ -87,12 +87,11 @@ class ReSTSyntaxTest(py.test.collect.Ite py.test.fail("docutils processing failed, see captured stderr") def register_linkrole(self): - from _py.rest import directive - directive.register_linkrole('api', self.resolve_linkrole) - directive.register_linkrole('source', self.resolve_linkrole) - - # XXX fake sphinx' "toctree" and refs - directive.register_linkrole('ref', self.resolve_linkrole) + #directive.register_linkrole('api', self.resolve_linkrole) + #directive.register_linkrole('source', self.resolve_linkrole) +# +# # XXX fake sphinx' "toctree" and refs +# directive.register_linkrole('ref', self.resolve_linkrole) from docutils.parsers.rst import directives def toctree_directive(name, arguments, options, content, lineno, @@ -349,3 +348,85 @@ def localrefcheck(tryfn, path, lineno): else: py.test.fail("anchor reference error %s#%s in %s:%d" %( tryfn, anchor, path.basename, lineno+1)) + +if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): + def log(msg): + print(msg) +else: + def log(msg): + pass + +def convert_rest_html(source, source_path, stylesheet=None, encoding='latin1'): + """ return html latin1-encoded document for the given input. + source a ReST-string + sourcepath where to look for includes (basically) + stylesheet path (to be used if any) + """ + from docutils.core import publish_string + kwargs = { + 'stylesheet' : stylesheet, + 'stylesheet_path': None, + 'traceback' : 1, + 'embed_stylesheet': 0, + 'output_encoding' : encoding, + #'halt' : 0, # 'info', + 'halt_level' : 2, + } + # docutils uses os.getcwd() :-( + source_path = os.path.abspath(str(source_path)) + prevdir = os.getcwd() + try: + #os.chdir(os.path.dirname(source_path)) + return publish_string(source, source_path, writer_name='html', + settings_overrides=kwargs) + finally: + os.chdir(prevdir) + +def process(txtpath, encoding='latin1'): + """ process a textfile """ + log("processing %s" % txtpath) + assert txtpath.check(ext='.txt') + if isinstance(txtpath, py.path.svnwc): + txtpath = txtpath.localpath + htmlpath = txtpath.new(ext='.html') + #svninfopath = txtpath.localpath.new(ext='.svninfo') + + style = txtpath.dirpath('style.css') + if style.check(): + stylesheet = style.basename + else: + stylesheet = None + content = unicode(txtpath.read(), encoding) + doc = convert_rest_html(content, txtpath, stylesheet=stylesheet, encoding=encoding) + htmlpath.open('wb').write(doc) + #log("wrote %r" % htmlpath) + #if txtpath.check(svnwc=1, versioned=1): + # info = txtpath.info() + # svninfopath.dump(info) + +if sys.version_info > (3, 0): + def _uni(s): return s +else: + def _uni(s): + return unicode(s) + +rex1 = re.compile(r'.*(.*).*', re.MULTILINE | re.DOTALL) +rex2 = re.compile(r'.*
(.*)
.*', re.MULTILINE | re.DOTALL) + +def strip_html_header(string, encoding='utf8'): + """ return the content of the body-tag """ + uni = unicode(string, encoding) + for rex in rex1,rex2: + match = rex.search(uni) + if not match: + break + uni = match.group(1) + return uni + +class Project: # used for confrest.py files + def __init__(self, sourcepath): + self.sourcepath = sourcepath + def process(self, path): + return process(path) + def get_htmloutputpath(self, path): + return path.new(ext='html') --- a/py/__init__.py +++ b/py/__init__.py @@ -37,11 +37,9 @@ _py.apipkg.initpkg(__name__, dict( }, cmdline = { 'pytest': '_py.cmdline.pytest:main', - 'pyrest': '_py.cmdline.pyrest:main', 'pylookup': '_py.cmdline.pylookup:main', 'pycountloc': '_py.cmdline.pycountlog:main', 'pytest': '_py.test.cmdline:main', - 'pyrest': '_py.cmdline.pyrest:main', 'pylookup': '_py.cmdline.pylookup:main', 'pycountloc': '_py.cmdline.pycountloc:main', 'pycleanup': '_py.cmdline.pycleanup:main', --- a/testing/rest/data/example1.dot +++ /dev/null @@ -1,3 +0,0 @@ -digraph G { - a -> b -> c -> d; -} --- a/testing/rest/data/tocdepth.rst2pdfconfig +++ /dev/null @@ -1,5 +0,0 @@ -rest_sources = ['part1.txt', 'part2.txt'] - -rest_options = ["--use-latex-toc", "--generator"] # generator is easy to test - -toc_depth = 1 --- a/testing/rest/data/part2.txt +++ /dev/null @@ -1,7 +0,0 @@ -This is the second part of the test file -========================================= - -.. contents:: - -the text in it is not much more interesting. - From commits-noreply at bitbucket.org Thu Oct 29 18:09:43 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 17:09:43 +0000 (UTC) Subject: [py-svn] py-trunk commit 70276ab3a29d: rewrite nose-optional-call check, fixes python2.4 compat Message-ID: <20091029170943.C07017EFDB@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256836085 -3600 # Node ID 70276ab3a29dbaf96a3609e9ab99ab8dccf232d1 # Parent cb903127b5c073e41ea3f82c589a097a873ac800 rewrite nose-optional-call check, fixes python2.4 compat --- a/_py/test/plugin/pytest_nose.py +++ b/_py/test/plugin/pytest_nose.py @@ -91,9 +91,8 @@ def pytest_make_collect_report(collector def call_optional(obj, name): method = getattr(obj, name, None) if method: - argspec = inspect.getargspec(method) - if argspec[0] == ['self']: - argspec = argspec[1:] - if not any(argspec): + ismethod = inspect.ismethod(method) + rawcode = py.code.getrawcode(method) + if not rawcode.co_varnames[ismethod:]: method() return True From commits-noreply at bitbucket.org Thu Oct 29 18:09:44 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 17:09:44 +0000 (UTC) Subject: [py-svn] py-trunk commit f13c96e6ec1f: moving py/bin to rootlevel bin/ and fixing tests Message-ID: <20091029170944.2B6D37EFDF@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256831582 -3600 # Node ID f13c96e6ec1fa4313d7b10bbddb24ccc74d67b05 # Parent 88cd8639f646211cf2e0b60020fae3d684718211 moving py/bin to rootlevel bin/ and fixing tests --- /dev/null +++ b/bin/env.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import sys, os, os.path + +progpath = sys.argv[0] +packagedir = os.path.dirname(os.path.dirname(os.path.abspath(progpath))) +packagename = os.path.basename(packagedir) +bindir = os.path.join(packagedir, 'bin') +if sys.platform == 'win32': + bindir = os.path.join(bindir, 'win32') +rootdir = os.path.dirname(packagedir) + +def prepend_path(name, value): + sep = os.path.pathsep + curpath = os.environ.get(name, '') + newpath = [value] + [ x for x in curpath.split(sep) if x and x != value ] + return setenv(name, sep.join(newpath)) + +def setenv(name, value): + shell = os.environ.get('SHELL', '') + comspec = os.environ.get('COMSPEC', '') + if shell.endswith('csh'): + cmd = 'setenv %s "%s"' % (name, value) + elif shell.endswith('sh'): + cmd = '%s="%s"; export %s' % (name, value, name) + elif comspec.endswith('cmd.exe'): + cmd = 'set %s=%s' % (name, value) + else: + assert False, 'Shell not supported.' + return cmd + +print(prepend_path('PATH', bindir)) +print(prepend_path('PYTHONPATH', rootdir)) --- a/py/bin/py.lookup +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pylookup() --- /dev/null +++ b/bin/win32/py.test.cmd @@ -0,0 +1,2 @@ + at echo off +python "%~dp0\..\py.test" %* --- a/py/bin/win32/py.svnwcrevert.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -python "%~dp0\..\py.svnwcrevert" %* --- /dev/null +++ b/bin/py.which @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from _findpy import py +py.cmdline.pywhich() --- /dev/null +++ b/bin/win32/py.lookup.cmd @@ -0,0 +1,2 @@ + at echo off +python "%~dp0\..\py.lookup" %* --- /dev/null +++ b/bin/py.cleanup @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from _findpy import py +py.cmdline.pycleanup() --- /dev/null +++ b/bin/py.convert_unittest @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from _findpy import py +py.cmdline.pyconvert_unittest() --- /dev/null +++ b/bin/win32/py.svnwcrevert.cmd @@ -0,0 +1,2 @@ + at echo off +python "%~dp0\..\py.svnwcrevert" %* --- /dev/null +++ b/bin/win32/py.countloc.cmd @@ -0,0 +1,2 @@ + at echo off +python "%~dp0\..\py.countloc" %* --- a/py/bin/win32/py.lookup.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -python "%~dp0\..\py.lookup" %* --- /dev/null +++ b/bin/py.lookup @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from _findpy import py +py.cmdline.pylookup() --- a/testing/cmdline/test_generic.py +++ b/testing/cmdline/test_generic.py @@ -1,10 +1,12 @@ import py import sys -binpath = py.path.local(py.__file__).dirpath("bin") -binwinpath = binpath.join("win32") def setup_module(mod): + mod.binpath = py._impldir.dirpath('bin') + if not mod.binpath.check(): + py.test.skip("bin-source scripts not installed") + mod.binwinpath = binpath.join("win32") mod.tmpdir = py.test.ensuretemp(__name__) mod.iswin32 = sys.platform == "win32" --- a/py/bin/py.convert_unittest +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pyconvert_unittest() --- a/py/bin/win32/py.convert_unittest.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -python "%~dp0\..\py.convert_unittest" %* --- a/py/bin/win32/py.test.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -python "%~dp0\..\py.test" %* --- a/py/bin/env.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python - -import sys, os, os.path - -progpath = sys.argv[0] -packagedir = os.path.dirname(os.path.dirname(os.path.abspath(progpath))) -packagename = os.path.basename(packagedir) -bindir = os.path.join(packagedir, 'bin') -if sys.platform == 'win32': - bindir = os.path.join(bindir, 'win32') -rootdir = os.path.dirname(packagedir) - -def prepend_path(name, value): - sep = os.path.pathsep - curpath = os.environ.get(name, '') - newpath = [value] + [ x for x in curpath.split(sep) if x and x != value ] - return setenv(name, sep.join(newpath)) - -def setenv(name, value): - shell = os.environ.get('SHELL', '') - comspec = os.environ.get('COMSPEC', '') - if shell.endswith('csh'): - cmd = 'setenv %s "%s"' % (name, value) - elif shell.endswith('sh'): - cmd = '%s="%s"; export %s' % (name, value, name) - elif comspec.endswith('cmd.exe'): - cmd = 'set %s=%s' % (name, value) - else: - assert False, 'Shell not supported.' - return cmd - -print(prepend_path('PATH', bindir)) -print(prepend_path('PYTHONPATH', rootdir)) --- /dev/null +++ b/bin/_findpy.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# +# find and import a version of 'py' +# +import sys +import os +from os.path import dirname as opd, exists, join, basename, abspath + +def searchpy(current): + while 1: + last = current + initpy = join(current, '__init__.py') + if not exists(initpy): + pydir = join(current, 'py') + # recognize py-package and ensure it is importable + if exists(pydir) and exists(join(pydir, '__init__.py')): + #for p in sys.path: + # if p == current: + # return True + if current != sys.path[0]: # if we are already first, then ok + sys.stderr.write("inserting into sys.path: %s\n" % current) + sys.path.insert(0, current) + return True + current = opd(current) + if last == current: + return False + +if not searchpy(abspath(os.curdir)): + if not searchpy(opd(abspath(sys.argv[0]))): + if not searchpy(opd(__file__)): + pass # let's hope it is just on sys.path + +import py + +if __name__ == '__main__': + print ("py lib is at %s" % py.__file__) --- a/py/bin/py.which +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pywhich() --- /dev/null +++ b/bin/win32/py.cleanup.cmd @@ -0,0 +1,2 @@ + at echo off +python "%~dp0\..\py.cleanup" %* --- a/py/bin/py.cleanup +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pycleanup() --- a/_py/test/plugin/pytest_pytester.py +++ b/_py/test/plugin/pytest_pytester.py @@ -301,7 +301,7 @@ class TmpTestdir: return self.run(*fullargs) def _getpybinargs(self, scriptname): - bindir = py.path.local(py.__file__).dirpath("bin") + bindir = py._impldir.dirpath('bin') script = bindir.join(scriptname) assert script.check() return py.std.sys.executable, script --- /dev/null +++ b/bin/py.svnwcrevert @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from _findpy import py +py.cmdline.pysvnwcrevert() --- a/py/bin/_findpy.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python - -# -# find and import a version of 'py' -# -import sys -import os -from os.path import dirname as opd, exists, join, basename, abspath - -def searchpy(current): - while 1: - last = current - initpy = join(current, '__init__.py') - if not exists(initpy): - pydir = join(current, 'py') - # recognize py-package and ensure it is importable - if exists(pydir) and exists(join(pydir, '__init__.py')): - #for p in sys.path: - # if p == current: - # return True - if current != sys.path[0]: # if we are already first, then ok - sys.stderr.write("inserting into sys.path: %s\n" % current) - sys.path.insert(0, current) - return True - current = opd(current) - if last == current: - return False - -if not searchpy(abspath(os.curdir)): - if not searchpy(opd(abspath(sys.argv[0]))): - if not searchpy(opd(__file__)): - pass # let's hope it is just on sys.path - -import py - -if __name__ == '__main__': - print ("py lib is at %s" % py.__file__) --- /dev/null +++ b/bin/win32/py.convert_unittest.cmd @@ -0,0 +1,2 @@ + at echo off +python "%~dp0\..\py.convert_unittest" %* --- a/py/bin/win32/py.countloc.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -python "%~dp0\..\py.countloc" %* --- a/py/bin/env.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -for /F "usebackq delims=" %%i in (`python "%~dp0\env.py"`) do %%i --- /dev/null +++ b/bin/py.test @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from _findpy import py +py.cmdline.pytest() --- a/py/bin/win32/py.cleanup.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -python "%~dp0\..\py.cleanup" %* --- a/py/bin/py.test +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pytest() --- /dev/null +++ b/bin/env.cmd @@ -0,0 +1,2 @@ + at echo off +for /F "usebackq delims=" %%i in (`python "%~dp0\env.py"`) do %%i --- a/py/bin/win32/py.which.cmd +++ /dev/null @@ -1,2 +0,0 @@ - at echo off -python "%~dp0\..\py.which" %* --- a/py/bin/py.countloc +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pycountloc() --- /dev/null +++ b/bin/win32/py.which.cmd @@ -0,0 +1,2 @@ + at echo off +python "%~dp0\..\py.which" %* --- /dev/null +++ b/bin/py.countloc @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from _findpy import py +py.cmdline.pycountloc() --- a/py/bin/py.svnwcrevert +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pysvnwcrevert() From commits-noreply at bitbucket.org Thu Oct 29 18:09:46 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 17:09:46 +0000 (UTC) Subject: [py-svn] py-trunk commit f0bb4c025c85: some release preps and cleanups Message-ID: <20091029170946.113507EFE2@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256816712 -3600 # Node ID f0bb4c025c85966f7acec860239eb7df19fb38b9 # Parent f13c96e6ec1fa4313d7b10bbddb24ccc74d67b05 some release preps and cleanups - update setup.py for release - use distributes_setup on python3 - remove unncessary package_data - remove execnet example --- a/example/execnet/redirect_remote_output.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -redirect output from remote to a local function -showcasing features of the channel object: - -- sending a channel over a channel -- adapting a channel to a file object -- setting a callback for receiving channel data - -""" - -import py - -gw = execnet.PopenGateway() - -outchan = gw.remote_exec(""" - import sys - outchan = channel.gateway.newchannel() - sys.stdout = outchan.makefile("w") - channel.send(outchan) -""").receive() - -# note: callbacks execute in receiver thread! -def write(data): - print "received:", repr(data) -outchan.setcallback(write) - -gw.remote_exec(""" - print 'hello world' - print 'remote execution ends' -""").waitclose() - --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,16 +1,14 @@ include CHANGELOG include README.txt include setup.py +include distribute_setup.py include LICENSE -include py/LICENSE -include py/path/testing/repotest.dump -include py/rest/rest.sty.template graft doc graft contrib graft example -graft py/bin -graft py/rest/testing/data +graft bin exclude *.orig exclude *.rej +exclude .hginore prune .svn prune .hg --- /dev/null +++ b/distribute_setup.py @@ -0,0 +1,456 @@ +#!python +"""Bootstrap distribute installation + +If you want to use setuptools in your package's setup.py, just include this +file in the same directory with it, and add this to the top of your setup.py:: + + from distribute_setup import use_setuptools + use_setuptools() + +If you want to require a specific version of setuptools, set a download +mirror, or use an alternate download directory, you can do so by supplying +the appropriate options to ``use_setuptools()``. + +This file can also be run as a script to install or upgrade setuptools. +""" +import os +import sys +import time +import fnmatch +import tempfile +import tarfile +from distutils import log + +try: + from site import USER_SITE +except ImportError: + USER_SITE = None + +try: + import subprocess + + def _python_cmd(*args): + args = (sys.executable,) + args + return subprocess.call(args) == 0 + +except ImportError: + # will be used for python 2.3 + def _python_cmd(*args): + args = (sys.executable,) + args + # quoting arguments if windows + if sys.platform == 'win32': + def quote(arg): + if ' ' in arg: + return '"%s"' % arg + return arg + args = [quote(arg) for arg in args] + return os.spawnl(os.P_WAIT, sys.executable, *args) == 0 + +DEFAULT_VERSION = "0.6.6" +DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/" +SETUPTOOLS_PKG_INFO = """\ +Metadata-Version: 1.0 +Name: setuptools +Version: 0.6c9 +Summary: xxxx +Home-page: xxx +Author: xxx +Author-email: xxx +License: xxx +Description: xxx +""" + + +def _install(tarball): + # extracting the tarball + tmpdir = tempfile.mkdtemp() + log.warn('Extracting in %s', tmpdir) + old_wd = os.getcwd() + try: + os.chdir(tmpdir) + tar = tarfile.open(tarball) + _extractall(tar) + tar.close() + + # going in the directory + subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) + os.chdir(subdir) + log.warn('Now working in %s', subdir) + + # installing + log.warn('Installing Distribute') + assert _python_cmd('setup.py', 'install') + finally: + os.chdir(old_wd) + + +def _build_egg(tarball, to_dir): + # extracting the tarball + tmpdir = tempfile.mkdtemp() + log.warn('Extracting in %s', tmpdir) + old_wd = os.getcwd() + try: + os.chdir(tmpdir) + tar = tarfile.open(tarball) + _extractall(tar) + tar.close() + + # going in the directory + subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) + os.chdir(subdir) + log.warn('Now working in %s', subdir) + + # building an egg + log.warn('Building a Distribute egg in %s', to_dir) + _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) + + # returning the result + for file in os.listdir(to_dir): + if fnmatch.fnmatch(file, 'distribute-%s*.egg' % DEFAULT_VERSION): + return os.path.join(to_dir, file) + + raise IOError('Could not build the egg.') + finally: + os.chdir(old_wd) + + +def _do_download(version, download_base, to_dir, download_delay): + tarball = download_setuptools(version, download_base, + to_dir, download_delay) + egg = _build_egg(tarball, to_dir) + sys.path.insert(0, egg) + import setuptools + setuptools.bootstrap_install_from = egg + + +def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, + to_dir=os.curdir, download_delay=15, no_fake=False): + # making sure we use the absolute path + to_dir = os.path.abspath(to_dir) + was_imported = 'pkg_resources' in sys.modules or \ + 'setuptools' in sys.modules + try: + try: + import pkg_resources + if not hasattr(pkg_resources, '_distribute'): + if not no_fake: + fake_setuptools() + raise ImportError + except ImportError: + return _do_download(version, download_base, to_dir, download_delay) + try: + pkg_resources.require("distribute>="+version) + return + except pkg_resources.VersionConflict: + e = sys.exc_info()[1] + if was_imported: + sys.stderr.write( + "The required version of distribute (>=%s) is not available,\n" + "and can't be installed while this script is running. Please\n" + "install a more recent version first, using\n" + "'easy_install -U distribute'." + "\n\n(Currently using %r)\n" % (version, e.args[0])) + sys.exit(2) + else: + del pkg_resources, sys.modules['pkg_resources'] # reload ok + return _do_download(version, download_base, to_dir, + download_delay) + except pkg_resources.DistributionNotFound: + return _do_download(version, download_base, to_dir, + download_delay) + finally: + _create_fake_setuptools_pkg_info(to_dir) + +def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, + to_dir=os.curdir, delay=15): + """Download distribute from a specified location and return its filename + + `version` should be a valid distribute version number that is available + as an egg for download under the `download_base` URL (which should end + with a '/'). `to_dir` is the directory where the egg will be downloaded. + `delay` is the number of seconds to pause before an actual download + attempt. + """ + # making sure we use the absolute path + to_dir = os.path.abspath(to_dir) + try: + from urllib.request import urlopen + except ImportError: + from urllib2 import urlopen + tgz_name = "distribute-%s.tar.gz" % version + url = download_base + tgz_name + saveto = os.path.join(to_dir, tgz_name) + src = dst = None + if not os.path.exists(saveto): # Avoid repeated downloads + try: + log.warn("Downloading %s", url) + src = urlopen(url) + # Read/write all in one block, so we don't create a corrupt file + # if the download is interrupted. + data = src.read() + dst = open(saveto, "wb") + dst.write(data) + finally: + if src: + src.close() + if dst: + dst.close() + return os.path.realpath(saveto) + + +def _patch_file(path, content): + """Will backup the file then patch it""" + existing_content = open(path).read() + if existing_content == content: + # already patched + log.warn('Already patched.') + return False + log.warn('Patching...') + _rename_path(path) + f = open(path, 'w') + try: + f.write(content) + finally: + f.close() + return True + + +def _same_content(path, content): + return open(path).read() == content + + +def _rename_path(path): + new_name = path + '.OLD.%s' % time.time() + log.warn('Renaming %s into %s', path, new_name) + try: + from setuptools.sandbox import DirectorySandbox + def _violation(*args): + pass + DirectorySandbox._violation = _violation + except ImportError: + pass + + os.rename(path, new_name) + return new_name + + +def _remove_flat_installation(placeholder): + if not os.path.isdir(placeholder): + log.warn('Unkown installation at %s', placeholder) + return False + found = False + for file in os.listdir(placeholder): + if fnmatch.fnmatch(file, 'setuptools*.egg-info'): + found = True + break + if not found: + log.warn('Could not locate setuptools*.egg-info') + return + + log.warn('Removing elements out of the way...') + pkg_info = os.path.join(placeholder, file) + if os.path.isdir(pkg_info): + patched = _patch_egg_dir(pkg_info) + else: + patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO) + + if not patched: + log.warn('%s already patched.', pkg_info) + return False + # now let's move the files out of the way + for element in ('setuptools', 'pkg_resources.py', 'site.py'): + element = os.path.join(placeholder, element) + if os.path.exists(element): + _rename_path(element) + else: + log.warn('Could not find the %s element of the ' + 'Setuptools distribution', element) + return True + + +def _after_install(dist): + log.warn('After install bootstrap.') + placeholder = dist.get_command_obj('install').install_purelib + _create_fake_setuptools_pkg_info(placeholder) + +def _create_fake_setuptools_pkg_info(placeholder): + if not placeholder or not os.path.exists(placeholder): + log.warn('Could not find the install location') + return + pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1]) + setuptools_file = 'setuptools-0.6c9-py%s.egg-info' % pyver + pkg_info = os.path.join(placeholder, setuptools_file) + if os.path.exists(pkg_info): + log.warn('%s already exists', pkg_info) + return + log.warn('Creating %s', pkg_info) + f = open(pkg_info, 'w') + try: + f.write(SETUPTOOLS_PKG_INFO) + finally: + f.close() + pth_file = os.path.join(placeholder, 'setuptools.pth') + log.warn('Creating %s', pth_file) + f = open(pth_file, 'w') + try: + f.write(os.path.join(os.curdir, setuptools_file)) + finally: + f.close() + + +def _patch_egg_dir(path): + # let's check if it's already patched + pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') + if os.path.exists(pkg_info): + if _same_content(pkg_info, SETUPTOOLS_PKG_INFO): + log.warn('%s already patched.', pkg_info) + return False + _rename_path(path) + os.mkdir(path) + os.mkdir(os.path.join(path, 'EGG-INFO')) + pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') + f = open(pkg_info, 'w') + try: + f.write(SETUPTOOLS_PKG_INFO) + finally: + f.close() + return True + + +def _before_install(): + log.warn('Before install bootstrap.') + fake_setuptools() + + +def _under_prefix(location): + if 'install' not in sys.argv: + return True + args = sys.argv[sys.argv.index('install')+1:] + for index, arg in enumerate(args): + for option in ('--root', '--prefix'): + if arg.startswith('%s=' % option): + top_dir = arg.split('root=')[-1] + return location.startswith(top_dir) + elif arg == option: + if len(args) > index: + top_dir = args[index+1] + return location.startswith(top_dir) + elif option == '--user' and USER_SITE is not None: + return location.startswith(USER_SITE) + return True + + +def fake_setuptools(): + log.warn('Scanning installed packages') + try: + import pkg_resources + except ImportError: + # we're cool + log.warn('Setuptools or Distribute does not seem to be installed.') + return + ws = pkg_resources.working_set + try: + setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools', + replacement=False)) + except TypeError: + # old distribute API + setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools')) + + if setuptools_dist is None: + log.warn('No setuptools distribution found') + return + # detecting if it was already faked + setuptools_location = setuptools_dist.location + log.warn('Setuptools installation detected at %s', setuptools_location) + + # if --root or --preix was provided, and if + # setuptools is not located in them, we don't patch it + if not _under_prefix(setuptools_location): + log.warn('Not patching, --root or --prefix is installing Distribute' + ' in another location') + return + + # let's see if its an egg + if not setuptools_location.endswith('.egg'): + log.warn('Non-egg installation') + res = _remove_flat_installation(setuptools_location) + if not res: + return + else: + log.warn('Egg installation') + pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO') + if (os.path.exists(pkg_info) and + _same_content(pkg_info, SETUPTOOLS_PKG_INFO)): + log.warn('Already patched.') + return + log.warn('Patching...') + # let's create a fake egg replacing setuptools one + res = _patch_egg_dir(setuptools_location) + if not res: + return + log.warn('Patched done.') + _relaunch() + + +def _relaunch(): + log.warn('Relaunching...') + # we have to relaunch the process + args = [sys.executable] + sys.argv + sys.exit(subprocess.call(args)) + + +def _extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + import copy + import operator + from tarfile import ExtractError + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 448 # decimal for oct 0700 + self.extract(tarinfo, path) + + # Reverse sort directories. + if sys.version_info < (2, 4): + def sorter(dir1, dir2): + return cmp(dir1.name, dir2.name) + directories.sort(sorter) + directories.reverse() + else: + directories.sort(key=operator.attrgetter('name'), reverse=True) + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError: + e = sys.exc_info()[1] + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + +def main(argv, version=DEFAULT_VERSION): + """Install or upgrade setuptools and EasyInstall""" + tarball = download_setuptools() + _install(tarball) + + +if __name__ == '__main__': + main(sys.argv[1:]) --- a/example/execnet/popen_read_multiple.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -example - -reading results from possibly blocking code running in sub processes. -""" -import py - -NUM_PROCESSES = 5 - -channels = [] -for i in range(NUM_PROCESSES): - gw = execnet.PopenGateway() # or use SSH or socket gateways - channel = gw.remote_exec(""" - import time - secs = channel.receive() - time.sleep(secs) - channel.send("waited %d secs" % secs) - """) - channels.append(channel) - print "*** instantiated subprocess", gw - -mc = execnet.MultiChannel(channels) -queue = mc.make_receive_queue() - -print "***", "verifying that timeout on receiving results from blocked subprocesses works" -try: - queue.get(timeout=1.0) -except Exception: - pass - -print "*** sending subprocesses some data to have them unblock" -mc.send_each(1) - -print "*** receiving results asynchronously" -for i in range(NUM_PROCESSES): - channel, result = queue.get(timeout=2.0) - print "result", channel.gateway, result --- a/example/execnet/sysinfo.py +++ /dev/null @@ -1,139 +0,0 @@ -""" -sysinfo.py [host1] [host2] [options] - -obtain system info from remote machine. -""" - -import py -import sys - - -parser = py.std.optparse.OptionParser(usage=__doc__) -parser.add_option("-f", "--sshconfig", action="store", dest="ssh_config", default=None, - help="use given ssh config file, and add info all contained hosts for getting info") -parser.add_option("-i", "--ignore", action="store", dest="ignores", default=None, - help="ignore hosts (useful if the list of hostnames come from a file list)") - -def parsehosts(path): - path = py.path.local(path) - l = [] - rex = py.std.re.compile(r'Host\s*(\S+)') - for line in path.readlines(): - m = rex.match(line) - if m is not None: - sshname, = m.groups() - l.append(sshname) - return l - -class RemoteInfo: - def __init__(self, gateway): - self.gw = gateway - self._cache = {} - - def exreceive(self, execstring): - if execstring not in self._cache: - channel = self.gw.remote_exec(execstring) - self._cache[execstring] = channel.receive() - return self._cache[execstring] - - def getmodattr(self, modpath): - module = modpath.split(".")[0] - return self.exreceive(""" - import %s - channel.send(%s) - """ %(module, modpath)) - - def islinux(self): - return self.getmodattr('sys.platform').find("linux") != -1 - - def getfqdn(self): - return self.exreceive(""" - import socket - channel.send(socket.getfqdn()) - """) - - def getmemswap(self): - if self.islinux(): - return self.exreceive(""" - import commands, re - out = commands.getoutput("free") - mem = re.search(r"Mem:\s+(\S*)", out).group(1) - swap = re.search(r"Swap:\s+(\S*)", out).group(1) - channel.send((mem, swap)) - """) - - def getcpuinfo(self): - if self.islinux(): - return self.exreceive(""" - # a hyperthreaded cpu core only counts as 1, although it - # is present as 2 in /proc/cpuinfo. Counting it as 2 is - # misleading because it is *by far* not as efficient as - # two independent cores. - cpus = {} - cpuinfo = {} - f = open("/proc/cpuinfo") - lines = f.readlines() - f.close() - for line in lines + ['']: - if line.strip(): - key, value = line.split(":", 1) - cpuinfo[key.strip()] = value.strip() - else: - corekey = (cpuinfo.get("physical id"), - cpuinfo.get("core id")) - cpus[corekey] = 1 - numcpus = len(cpus) - model = cpuinfo.get("model name") - channel.send((numcpus, model)) - """) - -def debug(*args): - print >>sys.stderr, " ".join(map(str, args)) -def error(*args): - debug("ERROR", args[0] + ":", *args[1:]) - -def getinfo(sshname, ssh_config=None, loginfo=sys.stdout): - debug("connecting to", sshname) - try: - gw = execnet.SshGateway(sshname, ssh_config=ssh_config) - except IOError: - error("could not get sshagteway", sshname) - else: - ri = RemoteInfo(gw) - #print "%s info:" % sshname - prefix = sshname.upper() + " " - print >>loginfo, prefix, "fqdn:", ri.getfqdn() - for attr in ( - "sys.platform", - "sys.version_info", - ): - loginfo.write("%s %s: " %(prefix, attr,)) - loginfo.flush() - value = ri.getmodattr(attr) - loginfo.write(str(value)) - loginfo.write("\n") - loginfo.flush() - memswap = ri.getmemswap() - if memswap: - mem,swap = memswap - print >>loginfo, prefix, "Memory:", mem, "Swap:", swap - cpuinfo = ri.getcpuinfo() - if cpuinfo: - numcpu, model = cpuinfo - print >>loginfo, prefix, "number of cpus:", numcpu - print >>loginfo, prefix, "cpu model", model - return ri - -if __name__ == '__main__': - options, args = parser.parse_args() - hosts = list(args) - ssh_config = options.ssh_config - if ssh_config: - hosts.extend(parsehosts(ssh_config)) - ignores = options.ignores or () - if ignores: - ignores = ignores.split(",") - for host in hosts: - if host not in ignores: - getinfo(host, ssh_config=ssh_config) - --- a/example/execnet/svn-sync-repo.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python - -import py -import sys, os - -def usage(): - arg0 = sys.argv[0] - print """%s [user@]remote-host:/repo/location localrepo [identity keyfile]""" % (arg0,) - - -def main(args): - remote = args[0] - localrepo = py.path.local(args[1]) - if not localrepo.check(dir=1): - raise SystemExit("localrepo %s does not exist" %(localrepo,)) - if len(args) == 3: - keyfile = py.path.local(args[2]) - else: - keyfile = None - remote_host, path = remote.split(':', 1) - print "ssh-connecting to", remote_host - gw = getgateway(remote_host, keyfile) - - local_rev = get_svn_youngest(localrepo) - - # local protocol - # 1. client sends rev/repo -> server - # 2. server checks for newer revisions and sends dumps - # 3. client receives dumps, updates local repo - # 4. client goes back to step 1 - c = gw.remote_exec(""" - import py - import os - remote_rev, repopath = channel.receive() - while 1: - rev = py.process.cmdexec('svnlook youngest "%s"' % repopath) - rev = int(rev) - if rev > remote_rev: - revrange = (remote_rev+1, rev) - dumpchannel = channel.gateway.newchannel() - channel.send(revrange) - channel.send(dumpchannel) - - f = os.popen( - "svnadmin dump -q --incremental -r %s:%s %s" - % (revrange[0], revrange[1], repopath), 'r') - try: - while 1: - s = f.read(8192) - if not s: - raise EOFError - dumpchannel.send(s) - except EOFError: - dumpchannel.close() - remote_rev = rev - else: - # using svn-hook instead would be nice here - py.std.time.sleep(30) - """) - - c.send((local_rev, path)) - print "checking revisions from %d in %s" %(local_rev, remote) - while 1: - revstart, revend = c.receive() - dumpchannel = c.receive() - - print "receiving revisions", revstart, "-", revend, "replaying..." - svn_load(localrepo, dumpchannel) - print "current revision", revend - -def svn_load(repo, dumpchannel): - f = os.popen("svnadmin load -q %s" %(repo, ), "w") - for x in dumpchannel: - sys.stdout.write(".") - sys.stdout.flush() - f.write(x) - print >>sys.stdout - f.close() - -def get_svn_youngest(repo): - rev = py.process.cmdexec('svnlook youngest "%s"' % repo) - return int(rev) - -def getgateway(host, keyfile=None): - return execnet.SshGateway(host, identity=keyfile) - -if __name__ == '__main__': - if len(sys.argv) < 3: - usage() - raise SystemExit(1) - - main(sys.argv[1:]) - --- a/bin-for-dist/test_install.py +++ b/bin-for-dist/test_install.py @@ -1,6 +1,7 @@ import py import subprocess import os +import execnet # --- a/setup.py +++ b/setup.py @@ -1,15 +1,19 @@ """py lib / py.test setup.py file""" import os, sys +if sys.version_info >= (3,0): + from distribute_setup import use_setuptools + use_setuptools() from setuptools import setup + long_description = """ - py.test and pylib: rapid testing and development utils - `py.test`_: cross-project testing tool with many advanced features - `py.path`_: path abstractions over local and subversion files - `py.code`_: dynamic code compile and traceback printing support -Compatibility: Linux, Win32, OSX, Python versions 2.4 through to 3.1. +Platforms: Linux, Win32, OSX +Interpreters: Python versions 2.4 through to 3.1, Jython 2.5.1. For questions please check out http://pylib.org/contact.html .. _`py.test`: http://pylib.org/test.html @@ -17,7 +21,6 @@ For questions please check out http://py .. _`py.code`: http://pylib.org/code.html (c) Holger Krekel and others, 2009 - """ trunk = None def main(): @@ -58,30 +61,12 @@ def main(): '_py.io', '_py.log', '_py.path', - '_py.path.gateway', '_py.process', '_py.test', '_py.test.dist', '_py.test.looponfail', '_py.test.plugin',], - package_data={'py': ['bin/_findpy.py', - 'bin/env.cmd', - 'bin/env.py', - 'bin/py.cleanup', - 'bin/py.convert_unittest', - 'bin/py.countloc', - 'bin/py.lookup', - 'bin/py.svnwcrevert', - 'bin/py.test', - 'bin/py.which', - 'bin/win32/py.cleanup.cmd', - 'bin/win32/py.convert_unittest.cmd', - 'bin/win32/py.countloc.cmd', - 'bin/win32/py.lookup.cmd', - 'bin/win32/py.svnwcrevert.cmd', - 'bin/win32/py.test.cmd', - 'bin/win32/py.which.cmd',], - zip_safe=True, + zip_safe=False, ) if __name__ == '__main__': From commits-noreply at bitbucket.org Thu Oct 29 18:09:46 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 17:09:46 +0000 (UTC) Subject: [py-svn] py-trunk commit cb903127b5c0: move examples to doc directory Message-ID: <20091029170946.9E9257EFE5@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256835277 -3600 # Node ID cb903127b5c073e41ea3f82c589a097a873ac800 # Parent f0bb4c025c85966f7acec860239eb7df19fb38b9 move examples to doc directory --- a/example/genhtmlcss.py +++ /dev/null @@ -1,23 +0,0 @@ -import py -html = py.xml.html - -class my(html): - "a custom style" - class body(html.body): - style = html.Style(font_size = "120%") - - class h2(html.h2): - style = html.Style(background = "grey") - - class p(html.p): - style = html.Style(font_weight="bold") - -doc = my.html( - my.head(), - my.body( - my.h2("hello world"), - my.p("bold as bold can") - ) -) - -print doc.unicode(indent=2) --- a/example/assertion/test_failures.py +++ /dev/null @@ -1,14 +0,0 @@ - -import py -failure_demo = py.magic.autopath().dirpath('failure_demo.py') - -pytest_plugins = "pytest_pytester" - -def test_failure_demo_fails_properly(testdir): - reprec = testdir.inline_run(failure_demo) - passed, skipped, failed = reprec.countoutcomes() - assert passed == 0 - assert failed == 20, failed - colreports = reprec.getreports("pytest_collectreport") - failed = len([x.failed for x in colreports]) - assert failed == 4 --- /dev/null +++ b/doc/example/genxml.py @@ -0,0 +1,17 @@ + +import py +class ns(py.xml.Namespace): + pass + +doc = ns.books( + ns.book( + ns.author("May Day"), + ns.title("python for java programmers"),), + ns.book( + ns.author("why", class_="somecssclass"), + ns.title("Java for Python programmers"),), + publisher="N.N", + ) +print doc.unicode(indent=2).encode('utf8') + + --- a/example/assertion/global_testmodule_config/test_hello.py +++ /dev/null @@ -1,5 +0,0 @@ - -hello = "world" - -def test_func(): - pass --- a/example/genhtml.py +++ /dev/null @@ -1,13 +0,0 @@ -from py.xml import html - -paras = "First Para", "Second para" - -doc = html.html( - html.head( - html.meta(name="Content-Type", value="text/html; charset=latin1")), - html.body( - [html.p(p) for p in paras])) - -print unicode(doc).encode('latin1') - - --- /dev/null +++ b/doc/example/funcarg/mysetup2/test_sample.py @@ -0,0 +1,6 @@ + +def test_answer(mysetup): + app = mysetup.myapp() + answer = app.question() + assert answer == 42 + --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,7 +5,6 @@ include distribute_setup.py include LICENSE graft doc graft contrib -graft example graft bin exclude *.orig exclude *.rej --- a/example/assertion/global_testmodule_config/conftest.py +++ /dev/null @@ -1,7 +0,0 @@ -import py - -def pytest_runtest_setup(item): - if isinstance(item, py.test.collect.Function): - mod = item.getparent(py.test.collect.Module).obj - if hasattr(mod, 'hello'): - py.builtin.print_("mod.hello", mod.hello) --- /dev/null +++ b/doc/example/funcarg/mysetup2/conftest.py @@ -0,0 +1,24 @@ +import py +from mysetup2.myapp import MyApp + +def pytest_funcarg__mysetup(request): + return MySetup(request) + +def pytest_addoption(parser): + parser.addoption("--ssh", action="store", default=None, + help="specify ssh host to run tests with") + + +class MySetup: + def __init__(self, request): + self.config = request.config + + def myapp(self): + return MyApp() + + def getsshconnection(self): + host = self.config.option.ssh + if host is None: + py.test.skip("specify ssh host with --ssh") + return execnet.SshGateway(host) + --- /dev/null +++ b/doc/example/funcarg/conftest.py @@ -0,0 +1,3 @@ +import py + +collect_ignore = 'mysetup', 'mysetup2', 'test_simpleprovider.py', 'parametrize' --- a/example/funcarg/costlysetup/sub1/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- a/example/funcarg/urloption/conftest.py +++ /dev/null @@ -1,15 +0,0 @@ -# conftest.py -import py - - -def pytest_addoption(parser): - grp = parser.getgroup("testserver options") - grp.addoption("--url", action="store", default=None, - help="url for testserver") - -def pytest_funcarg__url(request): - url = request.config.getvalue("url") - if url is None: - py.test.skip("need --url") - return url - --- /dev/null +++ b/doc/example/funcarg/costlysetup/sub1/test_quick.py @@ -0,0 +1,3 @@ + +def test_quick(): + pass --- a/example/funcarg/costlysetup/sub2/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- /dev/null +++ b/doc/example/genhtml.py @@ -0,0 +1,13 @@ +from py.xml import html + +paras = "First Para", "Second para" + +doc = html.html( + html.head( + html.meta(name="Content-Type", value="text/html; charset=latin1")), + html.body( + [html.p(p) for p in paras])) + +print unicode(doc).encode('latin1') + + --- a/example/assertion/test_setup_flow_example.py +++ /dev/null @@ -1,42 +0,0 @@ -def setup_module(module): - module.TestStateFullThing.classcount = 0 - -class TestStateFullThing: - def setup_class(cls): - cls.classcount += 1 - - def teardown_class(cls): - cls.classcount -= 1 - - def setup_method(self, method): - self.id = eval(method.__name__[5:]) - - def test_42(self): - assert self.classcount == 1 - assert self.id == 42 - - def test_23(self): - assert self.classcount == 1 - assert self.id == 23 - -def teardown_module(module): - assert module.TestStateFullThing.classcount == 0 - -""" For this example the control flow happens as follows:: - import test_setup_flow_example - setup_module(test_setup_flow_example) - setup_class(TestStateFullThing) - instance = TestStateFullThing() - setup_method(instance, instance.test_42) - instance.test_42() - setup_method(instance, instance.test_23) - instance.test_23() - teardown_class(TestStateFullThing) - teardown_module(test_setup_flow_example) - -Note that ``setup_class(TestStateFullThing)`` is called and not -``TestStateFullThing.setup_class()`` which would require you -to insert ``setup_class = classmethod(setup_class)`` to make -your setup function callable. -""" - --- /dev/null +++ b/doc/example/funcarg/parametrize/test_parametrize.py @@ -0,0 +1,17 @@ +import py + +def pytest_generate_tests(metafunc): + for funcargs in metafunc.cls.params[metafunc.function.__name__]: + metafunc.addcall(funcargs=funcargs) + +class TestClass: + params = { + 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)], + 'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)], + } + + def test_equals(self, a, b): + assert a == b + + def test_zerodivision(self, a, b): + py.test.raises(ZeroDivisionError, "a/b") --- /dev/null +++ b/doc/example/funcarg/parametrize/test_parametrize3.py @@ -0,0 +1,15 @@ + +# following hook can be put unchanged into a local or global plugin +def pytest_generate_tests(metafunc): + for scenario in metafunc.cls.scenarios: + metafunc.addcall(id=scenario[0], funcargs=scenario[1]) + + +scenario1 = ('basic', {'attribute': 'value'}) +scenario2 = ('advanced', {'attribute': 'value2'}) + +class TestSampleWithScenarios: + scenarios = [scenario1, scenario2] + + def test_demo(self, attribute): + assert isinstance(attribute, str) --- /dev/null +++ b/doc/example/funcarg/test_simpleprovider.py @@ -0,0 +1,7 @@ +# ./test_simpleprovider.py +def pytest_funcarg__myfuncarg(request): + return 42 + +def test_function(myfuncarg): + assert myfuncarg == 17 + --- a/example/funcarg/mysetup2/myapp.py +++ /dev/null @@ -1,5 +0,0 @@ - -class MyApp: - def question(self): - return 6 * 9 - --- /dev/null +++ b/doc/example/assertion/global_testmodule_config/test_hello.py @@ -0,0 +1,5 @@ + +hello = "world" + +def test_func(): + pass --- /dev/null +++ b/doc/example/genhtmlcss.py @@ -0,0 +1,23 @@ +import py +html = py.xml.html + +class my(html): + "a custom style" + class body(html.body): + style = html.Style(font_size = "120%") + + class h2(html.h2): + style = html.Style(background = "grey") + + class p(html.p): + style = html.Style(font_weight="bold") + +doc = my.html( + my.head(), + my.body( + my.h2("hello world"), + my.p("bold as bold can") + ) +) + +print doc.unicode(indent=2) --- a/example/funcarg/parametrize/test_parametrize.py +++ /dev/null @@ -1,17 +0,0 @@ -import py - -def pytest_generate_tests(metafunc): - for funcargs in metafunc.cls.params[metafunc.function.__name__]: - metafunc.addcall(funcargs=funcargs) - -class TestClass: - params = { - 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)], - 'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)], - } - - def test_equals(self, a, b): - assert a == b - - def test_zerodivision(self, a, b): - py.test.raises(ZeroDivisionError, "a/b") --- /dev/null +++ b/doc/example/funcarg/test_multi_python.py @@ -0,0 +1,65 @@ +""" + +module containing a parametrized tests testing cross-python +serialization via the pickle module. +""" +import py + +pythonlist = ['python2.3', 'python2.4', 'python2.5', 'python2.6'] +# 'jython' 'python3.1'] + +def pytest_generate_tests(metafunc): + if 'python1' in metafunc.funcargnames: + assert 'python2' in metafunc.funcargnames + for obj in metafunc.function.multiarg.obj: + for py1 in pythonlist: + for py2 in pythonlist: + metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj), + param=(py1, py2, obj)) + + at py.test.mark.multiarg(obj=[42, {}, {1:3},]) +def test_basic_objects(python1, python2, obj): + python1.dumps(obj) + python2.load_and_is_true("obj == %s" % obj) + +def pytest_funcarg__python1(request): + tmpdir = request.getfuncargvalue("tmpdir") + picklefile = tmpdir.join("data.pickle") + return Python(request.param[0], picklefile) + +def pytest_funcarg__python2(request): + python1 = request.getfuncargvalue("python1") + return Python(request.param[1], python1.picklefile) + +def pytest_funcarg__obj(request): + return request.param[2] + +class Python: + def __init__(self, version, picklefile): + self.pythonpath = py.path.local.sysfind(version) + if not self.pythonpath: + py.test.skip("%r not found" %(version,)) + self.picklefile = picklefile + def dumps(self, obj): + dumpfile = self.picklefile.dirpath("dump.py") + dumpfile.write(py.code.Source(""" + import pickle + f = open(%r, 'wb') + s = pickle.dump(%r, f) + f.close() + """ % (str(self.picklefile), obj))) + py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile)) + + def load_and_is_true(self, expression): + loadfile = self.picklefile.dirpath("load.py") + loadfile.write(py.code.Source(""" + import pickle + f = open(%r, 'rb') + obj = pickle.load(f) + f.close() + res = eval(%r) + if not res: + raise SystemExit(1) + """ % (str(self.picklefile), expression))) + print loadfile + py.process.cmdexec("%s %s" %(self.pythonpath, loadfile)) --- /dev/null +++ b/doc/example/funcarg/costlysetup/sub2/__init__.py @@ -0,0 +1,1 @@ +# --- a/example/funcarg/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -import py - -collect_ignore = 'mysetup', 'mysetup2', 'test_simpleprovider.py', 'parametrize' --- a/example/funcarg/mysetup2/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# XXX this file should not need to be here but is here for proper sys.path mangling --- /dev/null +++ b/doc/example/funcarg/mysetup2/__init__.py @@ -0,0 +1,1 @@ +# XXX this file should not need to be here but is here for proper sys.path mangling --- /dev/null +++ b/doc/example/assertion/test_setup_flow_example.py @@ -0,0 +1,42 @@ +def setup_module(module): + module.TestStateFullThing.classcount = 0 + +class TestStateFullThing: + def setup_class(cls): + cls.classcount += 1 + + def teardown_class(cls): + cls.classcount -= 1 + + def setup_method(self, method): + self.id = eval(method.__name__[5:]) + + def test_42(self): + assert self.classcount == 1 + assert self.id == 42 + + def test_23(self): + assert self.classcount == 1 + assert self.id == 23 + +def teardown_module(module): + assert module.TestStateFullThing.classcount == 0 + +""" For this example the control flow happens as follows:: + import test_setup_flow_example + setup_module(test_setup_flow_example) + setup_class(TestStateFullThing) + instance = TestStateFullThing() + setup_method(instance, instance.test_42) + instance.test_42() + setup_method(instance, instance.test_23) + instance.test_23() + teardown_class(TestStateFullThing) + teardown_module(test_setup_flow_example) + +Note that ``setup_class(TestStateFullThing)`` is called and not +``TestStateFullThing.setup_class()`` which would require you +to insert ``setup_class = classmethod(setup_class)`` to make +your setup function callable. +""" + --- a/example/funcarg/costlysetup/sub1/test_quick.py +++ /dev/null @@ -1,3 +0,0 @@ - -def test_quick(): - pass --- a/example/funcarg/mysetup/myapp.py +++ /dev/null @@ -1,5 +0,0 @@ - -class MyApp: - def question(self): - return 6 * 9 - --- /dev/null +++ b/doc/example/funcarg/costlysetup/sub1/__init__.py @@ -0,0 +1,1 @@ +# --- a/example/funcarg/parametrize/test_parametrize2.py +++ /dev/null @@ -1,25 +0,0 @@ -import py - -# test support code -def params(funcarglist): - def wrapper(function): - function.funcarglist = funcarglist - return function - return wrapper - -def pytest_generate_tests(metafunc): - for funcargs in getattr(metafunc.function, 'funcarglist', ()): - metafunc.addcall(funcargs=funcargs) - - -# actual test code - -class TestClass: - @params([dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)], ) - def test_equals(self, a, b): - assert a == b - - @params([dict(a=1, b=0), dict(a=3, b=2)]) - def test_zerodivision(self, a, b): - py.test.raises(ZeroDivisionError, "a/b") - --- /dev/null +++ b/doc/example/funcarg/mysetup/myapp.py @@ -0,0 +1,5 @@ + +class MyApp: + def question(self): + return 6 * 9 + --- a/example/funcarg/parametrize/test_parametrize3.py +++ /dev/null @@ -1,15 +0,0 @@ - -# following hook can be put unchanged into a local or global plugin -def pytest_generate_tests(metafunc): - for scenario in metafunc.cls.scenarios: - metafunc.addcall(id=scenario[0], funcargs=scenario[1]) - - -scenario1 = ('basic', {'attribute': 'value'}) -scenario2 = ('advanced', {'attribute': 'value2'}) - -class TestSampleWithScenarios: - scenarios = [scenario1, scenario2] - - def test_demo(self, attribute): - assert isinstance(attribute, str) --- a/example/funcarg/test_simpleprovider.py +++ /dev/null @@ -1,7 +0,0 @@ -# ./test_simpleprovider.py -def pytest_funcarg__myfuncarg(request): - return 42 - -def test_function(myfuncarg): - assert myfuncarg == 17 - --- a/example/funcarg/costlysetup/sub2/test_two.py +++ /dev/null @@ -1,6 +0,0 @@ -def test_something(setup): - assert setup.timecostly == 1 - -def test_something_more(setup): - assert setup.timecostly == 1 - --- a/example/funcarg/mysetup/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# XXX this file should not need to be here but is here for proper sys.path mangling --- /dev/null +++ b/doc/example/assertion/test_failures.py @@ -0,0 +1,14 @@ + +import py +failure_demo = py.magic.autopath().dirpath('failure_demo.py') + +pytest_plugins = "pytest_pytester" + +def test_failure_demo_fails_properly(testdir): + reprec = testdir.inline_run(failure_demo) + passed, skipped, failed = reprec.countoutcomes() + assert passed == 0 + assert failed == 20, failed + colreports = reprec.getreports("pytest_collectreport") + failed = len([x.failed for x in colreports]) + assert failed == 4 --- a/example/funcarg/test_multi_python.py +++ /dev/null @@ -1,65 +0,0 @@ -""" - -module containing a parametrized tests testing cross-python -serialization via the pickle module. -""" -import py - -pythonlist = ['python2.3', 'python2.4', 'python2.5', 'python2.6'] -# 'jython' 'python3.1'] - -def pytest_generate_tests(metafunc): - if 'python1' in metafunc.funcargnames: - assert 'python2' in metafunc.funcargnames - for obj in metafunc.function.multiarg.obj: - for py1 in pythonlist: - for py2 in pythonlist: - metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj), - param=(py1, py2, obj)) - - at py.test.mark.multiarg(obj=[42, {}, {1:3},]) -def test_basic_objects(python1, python2, obj): - python1.dumps(obj) - python2.load_and_is_true("obj == %s" % obj) - -def pytest_funcarg__python1(request): - tmpdir = request.getfuncargvalue("tmpdir") - picklefile = tmpdir.join("data.pickle") - return Python(request.param[0], picklefile) - -def pytest_funcarg__python2(request): - python1 = request.getfuncargvalue("python1") - return Python(request.param[1], python1.picklefile) - -def pytest_funcarg__obj(request): - return request.param[2] - -class Python: - def __init__(self, version, picklefile): - self.pythonpath = py.path.local.sysfind(version) - if not self.pythonpath: - py.test.skip("%r not found" %(version,)) - self.picklefile = picklefile - def dumps(self, obj): - dumpfile = self.picklefile.dirpath("dump.py") - dumpfile.write(py.code.Source(""" - import pickle - f = open(%r, 'wb') - s = pickle.dump(%r, f) - f.close() - """ % (str(self.picklefile), obj))) - py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile)) - - def load_and_is_true(self, expression): - loadfile = self.picklefile.dirpath("load.py") - loadfile.write(py.code.Source(""" - import pickle - f = open(%r, 'rb') - obj = pickle.load(f) - f.close() - res = eval(%r) - if not res: - raise SystemExit(1) - """ % (str(self.picklefile), expression))) - print loadfile - py.process.cmdexec("%s %s" %(self.pythonpath, loadfile)) --- /dev/null +++ b/doc/example/funcarg/mysetup/test_sample.py @@ -0,0 +1,5 @@ + +def test_answer(mysetup): + app = mysetup.myapp() + answer = app.question() + assert answer == 42 --- /dev/null +++ b/doc/example/funcarg/urloption/conftest.py @@ -0,0 +1,15 @@ +# conftest.py +import py + + +def pytest_addoption(parser): + grp = parser.getgroup("testserver options") + grp.addoption("--url", action="store", default=None, + help="url for testserver") + +def pytest_funcarg__url(request): + url = request.config.getvalue("url") + if url is None: + py.test.skip("need --url") + return url + --- /dev/null +++ b/doc/example/funcarg/parametrize/test_parametrize2.py @@ -0,0 +1,25 @@ +import py + +# test support code +def params(funcarglist): + def wrapper(function): + function.funcarglist = funcarglist + return function + return wrapper + +def pytest_generate_tests(metafunc): + for funcargs in getattr(metafunc.function, 'funcarglist', ()): + metafunc.addcall(funcargs=funcargs) + + +# actual test code + +class TestClass: + @params([dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)], ) + def test_equals(self, a, b): + assert a == b + + @params([dict(a=1, b=0), dict(a=3, b=2)]) + def test_zerodivision(self, a, b): + py.test.raises(ZeroDivisionError, "a/b") + --- /dev/null +++ b/doc/example/assertion/failure_demo.py @@ -0,0 +1,122 @@ +from py.test import raises +import py + +def otherfunc(a,b): + assert a==b + +def somefunc(x,y): + otherfunc(x,y) + +def otherfunc_multi(a,b): + assert (a == + b) + +def test_generative(param1, param2): + assert param1 * 2 < param2 + +def pytest_generate_tests(metafunc): + if 'param1' in metafunc.funcargnames: + metafunc.addcall(funcargs=dict(param1=3, param2=6)) + +class TestFailing(object): + def test_simple(self): + def f(): + return 42 + def g(): + return 43 + + assert f() == g() + + def test_simple_multiline(self): + otherfunc_multi( + 42, + 6*9) + + def test_not(self): + def f(): + return 42 + assert not f() + + def test_complex_error(self): + def f(): + return 44 + def g(): + return 43 + somefunc(f(), g()) + + def test_z1_unpack_error(self): + l = [] + a,b = l + + def test_z2_type_error(self): + l = 3 + a,b = l + + def test_startswith(self): + s = "123" + g = "456" + assert s.startswith(g) + + def test_startswith_nested(self): + def f(): + return "123" + def g(): + return "456" + assert f().startswith(g()) + + def test_global_func(self): + assert isinstance(globf(42), float) + + def test_instance(self): + self.x = 6*7 + assert self.x != 42 + + def test_compare(self): + assert globf(10) < 5 + + def test_try_finally(self): + x = 1 + try: + assert x == 0 + finally: + x = 0 + + def test_raises(self): + s = 'qwe' + raises(TypeError, "int(s)") + + def test_raises_doesnt(self): + raises(IOError, "int('3')") + + def test_raise(self): + raise ValueError("demo error") + + def test_tupleerror(self): + a,b = [1] + + def test_reinterpret_fails_with_print_for_the_fun_of_it(self): + l = [1,2,3] + print ("l is %r" % l) + a,b = l.pop() + + def test_some_error(self): + if namenotexi: + pass + + def func1(self): + assert 41 == 42 + + +# thanks to Matthew Scott for this test +def test_dynamic_compile_shows_nicely(): + src = 'def foo():\n assert 1 == 0\n' + name = 'abc-123' + module = py.std.imp.new_module(name) + code = py.code.compile(src, name, 'exec') + py.builtin.exec_(code, module.__dict__) + py.std.sys.modules[name] = module + module.foo() + + +def globf(x): + return x+1 --- /dev/null +++ b/doc/example/funcarg/costlysetup/conftest.py @@ -0,0 +1,16 @@ + +def pytest_funcarg__setup(request): + return request.cached_setup( + setup=lambda: CostlySetup(), + teardown=lambda costlysetup: costlysetup.finalize(), + scope="session", + ) + +class CostlySetup: + def __init__(self): + import time + time.sleep(5) + self.timecostly = 1 + + def finalize(self): + del self.timecostly --- a/example/funcarg/mysetup2/test_ssh.py +++ /dev/null @@ -1,5 +0,0 @@ - -class TestClass: - def test_function(self, mysetup): - conn = mysetup.getsshconnection() - # work with conn --- a/example/funcarg/mysetup/conftest.py +++ /dev/null @@ -1,9 +0,0 @@ - -from mysetup.myapp import MyApp - -def pytest_funcarg__mysetup(request): - return MySetup() - -class MySetup: - def myapp(self): - return MyApp() --- a/example/funcarg/mysetup2/test_sample.py +++ /dev/null @@ -1,6 +0,0 @@ - -def test_answer(mysetup): - app = mysetup.myapp() - answer = app.question() - assert answer == 42 - --- a/example/genxml.py +++ /dev/null @@ -1,17 +0,0 @@ - -import py -class ns(py.xml.Namespace): - pass - -doc = ns.books( - ns.book( - ns.author("May Day"), - ns.title("python for java programmers"),), - ns.book( - ns.author("why", class_="somecssclass"), - ns.title("Java for Python programmers"),), - publisher="N.N", - ) -print doc.unicode(indent=2).encode('utf8') - - --- /dev/null +++ b/doc/example/assertion/global_testmodule_config/conftest.py @@ -0,0 +1,7 @@ +import py + +def pytest_runtest_setup(item): + if isinstance(item, py.test.collect.Function): + mod = item.getparent(py.test.collect.Module).obj + if hasattr(mod, 'hello'): + py.builtin.print_("mod.hello", mod.hello) --- a/example/funcarg/costlysetup/conftest.py +++ /dev/null @@ -1,16 +0,0 @@ - -def pytest_funcarg__setup(request): - return request.cached_setup( - setup=lambda: CostlySetup(), - teardown=lambda costlysetup: costlysetup.finalize(), - scope="session", - ) - -class CostlySetup: - def __init__(self): - import time - time.sleep(5) - self.timecostly = 1 - - def finalize(self): - del self.timecostly --- /dev/null +++ b/doc/example/funcarg/mysetup/__init__.py @@ -0,0 +1,1 @@ +# XXX this file should not need to be here but is here for proper sys.path mangling --- /dev/null +++ b/doc/example/funcarg/mysetup2/test_ssh.py @@ -0,0 +1,5 @@ + +class TestClass: + def test_function(self, mysetup): + conn = mysetup.getsshconnection() + # work with conn --- /dev/null +++ b/doc/example/funcarg/mysetup2/myapp.py @@ -0,0 +1,5 @@ + +class MyApp: + def question(self): + return 6 * 9 + --- a/example/funcarg/mysetup2/conftest.py +++ /dev/null @@ -1,24 +0,0 @@ -import py -from mysetup2.myapp import MyApp - -def pytest_funcarg__mysetup(request): - return MySetup(request) - -def pytest_addoption(parser): - parser.addoption("--ssh", action="store", default=None, - help="specify ssh host to run tests with") - - -class MySetup: - def __init__(self, request): - self.config = request.config - - def myapp(self): - return MyApp() - - def getsshconnection(self): - host = self.config.option.ssh - if host is None: - py.test.skip("specify ssh host with --ssh") - return execnet.SshGateway(host) - --- /dev/null +++ b/doc/example/funcarg/costlysetup/sub2/test_two.py @@ -0,0 +1,6 @@ +def test_something(setup): + assert setup.timecostly == 1 + +def test_something_more(setup): + assert setup.timecostly == 1 + --- a/example/assertion/failure_demo.py +++ /dev/null @@ -1,122 +0,0 @@ -from py.test import raises -import py - -def otherfunc(a,b): - assert a==b - -def somefunc(x,y): - otherfunc(x,y) - -def otherfunc_multi(a,b): - assert (a == - b) - -def test_generative(param1, param2): - assert param1 * 2 < param2 - -def pytest_generate_tests(metafunc): - if 'param1' in metafunc.funcargnames: - metafunc.addcall(funcargs=dict(param1=3, param2=6)) - -class TestFailing(object): - def test_simple(self): - def f(): - return 42 - def g(): - return 43 - - assert f() == g() - - def test_simple_multiline(self): - otherfunc_multi( - 42, - 6*9) - - def test_not(self): - def f(): - return 42 - assert not f() - - def test_complex_error(self): - def f(): - return 44 - def g(): - return 43 - somefunc(f(), g()) - - def test_z1_unpack_error(self): - l = [] - a,b = l - - def test_z2_type_error(self): - l = 3 - a,b = l - - def test_startswith(self): - s = "123" - g = "456" - assert s.startswith(g) - - def test_startswith_nested(self): - def f(): - return "123" - def g(): - return "456" - assert f().startswith(g()) - - def test_global_func(self): - assert isinstance(globf(42), float) - - def test_instance(self): - self.x = 6*7 - assert self.x != 42 - - def test_compare(self): - assert globf(10) < 5 - - def test_try_finally(self): - x = 1 - try: - assert x == 0 - finally: - x = 0 - - def test_raises(self): - s = 'qwe' - raises(TypeError, "int(s)") - - def test_raises_doesnt(self): - raises(IOError, "int('3')") - - def test_raise(self): - raise ValueError("demo error") - - def test_tupleerror(self): - a,b = [1] - - def test_reinterpret_fails_with_print_for_the_fun_of_it(self): - l = [1,2,3] - print ("l is %r" % l) - a,b = l.pop() - - def test_some_error(self): - if namenotexi: - pass - - def func1(self): - assert 41 == 42 - - -# thanks to Matthew Scott for this test -def test_dynamic_compile_shows_nicely(): - src = 'def foo():\n assert 1 == 0\n' - name = 'abc-123' - module = py.std.imp.new_module(name) - code = py.code.compile(src, name, 'exec') - py.builtin.exec_(code, module.__dict__) - py.std.sys.modules[name] = module - module.foo() - - -def globf(x): - return x+1 --- /dev/null +++ b/doc/example/funcarg/mysetup/conftest.py @@ -0,0 +1,9 @@ + +from mysetup.myapp import MyApp + +def pytest_funcarg__mysetup(request): + return MySetup() + +class MySetup: + def myapp(self): + return MyApp() --- a/example/funcarg/mysetup/test_sample.py +++ /dev/null @@ -1,5 +0,0 @@ - -def test_answer(mysetup): - app = mysetup.myapp() - answer = app.question() - assert answer == 42 From commits-noreply at bitbucket.org Thu Oct 29 20:10:43 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 19:10:43 +0000 (UTC) Subject: [py-svn] py-trunk commit 4427bfe18bec: fix windows32 issues, introduce a simplistic path.samefile for it, fix tests Message-ID: <20091029191043.3082383861@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256843405 -3600 # Node ID 4427bfe18bec943c132d92ff66d3a253517142ce # Parent 70276ab3a29dbaf96a3609e9ab99ab8dccf232d1 fix windows32 issues, introduce a simplistic path.samefile for it, fix tests --- a/_py/path/common.py +++ b/_py/path/common.py @@ -300,6 +300,10 @@ newline will be removed from the end of else: res.sort() + def samefile(self, other): + """ return True if other refers to the same stat object as self. """ + return self.strpath == str(other) + class FNMatcher: def __init__(self, pattern): self.pattern = pattern --- a/conftest.py +++ b/conftest.py @@ -1,6 +1,15 @@ +import py + pytest_plugins = '_pytest doctest pytester'.split() -rsyncdirs = ['conftest.py', 'py', 'doc', 'testing'] + +rsyncdirs = ['conftest.py', 'bin', 'py', 'doc', 'testing'] +try: + import execnet +except ImportError: + pass +else: + rsyncdirs.append(str(py.path.local(execnet.__file__).dirpath())) import py def pytest_addoption(parser): --- a/testing/io_/test_terminalwriter.py +++ b/testing/io_/test_terminalwriter.py @@ -42,7 +42,7 @@ def test_unicode_encoding(): l = [] tw = py.io.TerminalWriter(l.append, encoding=encoding) tw.line(msg) - assert l[0] == msg.encode(encoding) + assert l[0].strip() == msg.encode(encoding) class BaseTests: def test_line(self): --- a/testing/path/test_local.py +++ b/testing/path/test_local.py @@ -368,14 +368,19 @@ def test_homedir(): homedir = py.path.local._gethomedir() assert homedir.check(dir=1) +def test_samefile(tmpdir): + assert tmpdir.samefile(tmpdir) + p = tmpdir.ensure("hello") + assert p.samefile(p) + class TestWINLocalPath: pytestmark = py.test.mark.skipif("sys.platform != 'win32'") - def test_owner_group_not_implemented(self): + def test_owner_group_not_implemented(self, path1): py.test.raises(NotImplementedError, "path1.stat().owner") py.test.raises(NotImplementedError, "path1.stat().group") - def test_chmod_simple_int(self): + def test_chmod_simple_int(self, path1): py.builtin.print_("path1 is", path1) mode = path1.stat().mode # Ensure that we actually change the mode to something different. @@ -388,18 +393,18 @@ class TestWINLocalPath: path1.chmod(mode) assert path1.stat().mode == mode - def test_path_comparison_lowercase_mixed(self): + def test_path_comparison_lowercase_mixed(self, path1): t1 = path1.join("a_path") t2 = path1.join("A_path") assert t1 == t1 assert t1 == t2 - def test_relto_with_mixed_case(self): + def test_relto_with_mixed_case(self, path1): t1 = path1.join("a_path", "fiLe") t2 = path1.join("A_path") assert t1.relto(t2) == "fiLe" - def test_allow_unix_style_paths(self): + def test_allow_unix_style_paths(self, path1): t1 = path1.join('a_path') assert t1 == str(path1) + '\\a_path' t1 = path1.join('a_path/') @@ -407,7 +412,7 @@ class TestWINLocalPath: t1 = path1.join('dir/a_path') assert t1 == str(path1) + '\\dir\\a_path' - def test_sysfind_in_currentdir(self): + def test_sysfind_in_currentdir(self, path1): cmd = py.path.local.sysfind('cmd') root = cmd.new(dirname='', basename='') # c:\ in most installations old = root.chdir() @@ -420,11 +425,6 @@ class TestWINLocalPath: class TestPOSIXLocalPath: pytestmark = py.test.mark.skipif("sys.platform == 'win32'") - def test_samefile(self, tmpdir): - assert tmpdir.samefile(tmpdir) - p = tmpdir.ensure("hello") - assert p.samefile(p) - def test_hardlink(self, tmpdir): linkpath = tmpdir.join('test') filepath = tmpdir.join('file') --- a/testing/test_py_imports.py +++ b/testing/test_py_imports.py @@ -8,7 +8,8 @@ def checksubpackage(name): keys = dir(obj) assert len(keys) > 0 print (obj.__map__) - assert getattr(obj, '__map__') == {} + for name in obj.__map__: + assert hasattr(obj, name), (obj, name) def test_dir(): for name in dir(py): --- a/testing/log/test_log.py +++ b/testing/log/test_log.py @@ -118,7 +118,7 @@ class TestLogConsumer: def test_log_file(self): customlog = tempdir.join('log.out') - py.log.setconsumer("default", open(str(customlog), 'w', buffering=1)) + py.log.setconsumer("default", open(str(customlog), 'w', buffering=0)) py.log.Producer("default")("hello world #1") assert customlog.readlines() == ['[default] hello world #1\n'] --- a/_py/path/local.py +++ b/_py/path/local.py @@ -70,7 +70,7 @@ class PosixPath(common.PathBase): def samefile(self, other): """ return True if other refers to the same stat object as self. """ - return py.std.os.path.samefile(str(self), str(other)) + return py.error.checked_call(os.path.samefile, str(self), str(other)) def getuserid(user): import pwd From commits-noreply at bitbucket.org Thu Oct 29 23:46:37 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 22:46:37 +0000 (UTC) Subject: [py-svn] py-trunk commit 1d5efcae2963: trying a bit harder to get a realpath for the py lib because Message-ID: <20091029224637.40D667F003@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256844609 -3600 # Node ID 1d5efcae2963bae69400a1f72fa6cd72348da4cb # Parent 4427bfe18bec943c132d92ff66d3a253517142ce trying a bit harder to get a realpath for the py lib because execnet-rsync does not support working with links --- a/_py/test/config.py +++ b/_py/test/config.py @@ -261,8 +261,8 @@ class Config(object): conftestroots = config.getconftest_pathlist("rsyncdirs") if conftestroots: roots.extend(conftestroots) - pydirs = [py.path.local(py.__file__).dirpath(), - py._impldir] + pydirs = [x.realpath() for x in [py.path.local(py.__file__).dirpath(), + py._impldir]] roots = [py.path.local(root) for root in roots] for root in roots: if not root.check(): From commits-noreply at bitbucket.org Thu Oct 29 23:46:38 2009 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 29 Oct 2009 22:46:38 +0000 (UTC) Subject: [py-svn] py-trunk commit 15fcc2ee8679: fix jython issue, flexibilize sysexec params Message-ID: <20091029224638.D6E427F004@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1256856374 -3600 # Node ID 15fcc2ee86797cf1ac3666a8dc4ec5bd89909d2a # Parent 1d5efcae2963bae69400a1f72fa6cd72348da4cb fix jython issue, flexibilize sysexec params --- a/testing/path/test_local.py +++ b/testing/path/test_local.py @@ -338,7 +338,7 @@ class TestImport: def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir): name = 'pointsback123' - ModuleType = type(py.std.sys) + ModuleType = type(py.std.os) p = tmpdir.ensure(name + '.py') for ending in ('.pyc', '$py.class', '.pyo'): mod = ModuleType(name) --- a/_py/path/local.py +++ b/_py/path/local.py @@ -542,14 +542,15 @@ class LocalPath(FSBase): raise return mod - def sysexec(self, *argv): + def sysexec(self, *argv, **popen_opts): """ return stdout text from executing a system child process, where the 'self' path points to executable. The process is directly invoked and not through a system shell. """ from subprocess import Popen, PIPE argv = map(str, argv) - proc = Popen([str(self)] + list(argv), stdout=PIPE, stderr=PIPE) + popen_opts['stdout'] = popen_opts['stderr'] = PIPE + proc = Popen([str(self)] + list(argv), **popen_opts) stdout, stderr = proc.communicate() ret = proc.wait() if py.builtin._isbytes(stdout):