[Python-checkins] bpo-32193: Convert asyncio to async/await usage (#4753)

Andrew Svetlov webhook-mailer at python.org
Fri Dec 8 17:23:53 EST 2017


https://github.com/python/cpython/commit/5f841b553814969220b096a2b4f959b7f6fcbaf6
commit: 5f841b553814969220b096a2b4f959b7f6fcbaf6
branch: master
author: Andrew Svetlov <andrew.svetlov at gmail.com>
committer: GitHub <noreply at github.com>
date: 2017-12-09T00:23:48+02:00
summary:

bpo-32193: Convert asyncio to async/await usage (#4753)

* Convert asyncio/tasks.py to async/await

* Convert asyncio/queues.py to async/await

* Convert asyncio/test_utils.py to async/await

* Convert asyncio/base_subprocess.py to async/await

* Convert asyncio/subprocess.py to async/await

* Convert asyncio/streams.py to async/await

* Fix comments

* Convert asyncio/locks.py to async/await

* Convert asyncio.sleep to async def

* Add a comment

* Add missing news

* Convert stubs from AbstrctEventLoop to async functions

* Convert subprocess_shell/subprocess_exec

* Convert connect_read_pipe/connect_write_pip to async/await syntax

* Convert create_datagram_endpoint

* Convert create_unix_server/create_unix_connection

* Get rid of old style coroutines in unix_events.py

* Convert selector_events.py to async/await

* Convert wait_closed and create_connection

* Drop redundant line

* Convert base_events.py

* Code cleanup

* Drop redundant comments

* Fix indentation

* Add explicit tests for compatibility between old and new coroutines

* Convert windows event loop to use async/await

* Fix double awaiting of async function

* Convert asyncio/locks.py

* Improve docstring

* Convert tests to async/await

* Convert more tests

* Convert more tests

* Convert more tests

* Convert tests

* Improve test

files:
A Misc/NEWS.d/next/Library/2017-12-08-11-02-26.bpo-32193.NJe_TQ.rst
M Lib/asyncio/base_events.py
M Lib/asyncio/base_subprocess.py
M Lib/asyncio/events.py
M Lib/asyncio/locks.py
M Lib/asyncio/queues.py
M Lib/asyncio/selector_events.py
M Lib/asyncio/streams.py
M Lib/asyncio/subprocess.py
M Lib/asyncio/tasks.py
M Lib/asyncio/test_utils.py
M Lib/asyncio/unix_events.py
M Lib/asyncio/windows_events.py
M Lib/test/test_asyncio/test_base_events.py
M Lib/test/test_asyncio/test_events.py
M Lib/test/test_asyncio/test_locks.py
M Lib/test/test_asyncio/test_queues.py
M Lib/test/test_asyncio/test_streams.py
M Lib/test/test_asyncio/test_subprocess.py
M Lib/test/test_asyncio/test_tasks.py
M Lib/test/test_asyncio/test_unix_events.py
M Lib/test/test_asyncio/test_windows_events.py

diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py
index ffdb50f4bee..ab92a0b5807 100644
--- a/Lib/asyncio/base_events.py
+++ b/Lib/asyncio/base_events.py
@@ -33,7 +33,6 @@
 from . import events
 from . import futures
 from . import tasks
-from .coroutines import coroutine
 from .log import logger
 
 
@@ -220,13 +219,12 @@ def _wakeup(self):
             if not waiter.done():
                 waiter.set_result(waiter)
 
-    @coroutine
-    def wait_closed(self):
+    async def wait_closed(self):
         if self.sockets is None or self._waiters is None:
             return
         waiter = self._loop.create_future()
         self._waiters.append(waiter)
-        yield from waiter
+        await waiter
 
 
 class BaseEventLoop(events.AbstractEventLoop):
@@ -330,10 +328,9 @@ def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
         """Create write pipe transport."""
         raise NotImplementedError
 
-    @coroutine
-    def _make_subprocess_transport(self, protocol, args, shell,
-                                   stdin, stdout, stderr, bufsize,
-                                   extra=None, **kwargs):
+    async def _make_subprocess_transport(self, protocol, args, shell,
+                                         stdin, stdout, stderr, bufsize,
+                                         extra=None, **kwargs):
         """Create subprocess transport."""
         raise NotImplementedError
 
@@ -371,8 +368,7 @@ def _asyncgen_firstiter_hook(self, agen):
 
         self._asyncgens.add(agen)
 
-    @coroutine
-    def shutdown_asyncgens(self):
+    async def shutdown_asyncgens(self):
         """Shutdown all active asynchronous generators."""
         self._asyncgens_shutdown_called = True
 
@@ -384,12 +380,11 @@ def shutdown_asyncgens(self):
         closing_agens = list(self._asyncgens)
         self._asyncgens.clear()
 
-        shutdown_coro = tasks.gather(
+        results = await tasks.gather(
             *[ag.aclose() for ag in closing_agens],
             return_exceptions=True,
             loop=self)
 
-        results = yield from shutdown_coro
         for result, agen in zip(results, closing_agens):
             if isinstance(result, Exception):
                 self.call_exception_handler({
@@ -671,10 +666,10 @@ def getaddrinfo(self, host, port, *,
     def getnameinfo(self, sockaddr, flags=0):
         return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
 
-    @coroutine
-    def create_connection(self, protocol_factory, host=None, port=None, *,
-                          ssl=None, family=0, proto=0, flags=0, sock=None,
-                          local_addr=None, server_hostname=None):
+    async def create_connection(self, protocol_factory, host=None, port=None,
+                                *, ssl=None, family=0,
+                                proto=0, flags=0, sock=None,
+                                local_addr=None, server_hostname=None):
         """Connect to a TCP server.
 
         Create a streaming transport connection to a given Internet host and
@@ -722,7 +717,7 @@ def create_connection(self, protocol_factory, host=None, port=None, *,
             else:
                 f2 = None
 
-            yield from tasks.wait(fs, loop=self)
+            await tasks.wait(fs, loop=self)
 
             infos = f1.result()
             if not infos:
@@ -755,7 +750,7 @@ def create_connection(self, protocol_factory, host=None, port=None, *,
                             continue
                     if self._debug:
                         logger.debug("connect %r to %r", sock, address)
-                    yield from self.sock_connect(sock, address)
+                    await self.sock_connect(sock, address)
                 except OSError as exc:
                     if sock is not None:
                         sock.close()
@@ -793,7 +788,7 @@ def create_connection(self, protocol_factory, host=None, port=None, *,
                 raise ValueError(
                     'A Stream Socket was expected, got {!r}'.format(sock))
 
-        transport, protocol = yield from self._create_connection_transport(
+        transport, protocol = await self._create_connection_transport(
             sock, protocol_factory, ssl, server_hostname)
         if self._debug:
             # Get the socket from the transport because SSL transport closes
@@ -803,9 +798,8 @@ def create_connection(self, protocol_factory, host=None, port=None, *,
                          sock, host, port, transport, protocol)
         return transport, protocol
 
-    @coroutine
-    def _create_connection_transport(self, sock, protocol_factory, ssl,
-                                     server_hostname, server_side=False):
+    async def _create_connection_transport(self, sock, protocol_factory, ssl,
+                                           server_hostname, server_side=False):
 
         sock.setblocking(False)
 
@@ -820,19 +814,18 @@ def _create_connection_transport(self, sock, protocol_factory, ssl,
             transport = self._make_socket_transport(sock, protocol, waiter)
 
         try:
-            yield from waiter
+            await waiter
         except:
             transport.close()
             raise
 
         return transport, protocol
 
-    @coroutine
-    def create_datagram_endpoint(self, protocol_factory,
-                                 local_addr=None, remote_addr=None, *,
-                                 family=0, proto=0, flags=0,
-                                 reuse_address=None, reuse_port=None,
-                                 allow_broadcast=None, sock=None):
+    async def create_datagram_endpoint(self, protocol_factory,
+                                       local_addr=None, remote_addr=None, *,
+                                       family=0, proto=0, flags=0,
+                                       reuse_address=None, reuse_port=None,
+                                       allow_broadcast=None, sock=None):
         """Create datagram connection."""
         if sock is not None:
             if not _is_dgram_socket(sock):
@@ -872,7 +865,7 @@ def create_datagram_endpoint(self, protocol_factory,
                         assert isinstance(addr, tuple) and len(addr) == 2, (
                             '2-tuple is expected')
 
-                        infos = yield from _ensure_resolved(
+                        infos = await _ensure_resolved(
                             addr, family=family, type=socket.SOCK_DGRAM,
                             proto=proto, flags=flags, loop=self)
                         if not infos:
@@ -918,7 +911,7 @@ def create_datagram_endpoint(self, protocol_factory,
                     if local_addr:
                         sock.bind(local_address)
                     if remote_addr:
-                        yield from self.sock_connect(sock, remote_address)
+                        await self.sock_connect(sock, remote_address)
                         r_addr = remote_address
                 except OSError as exc:
                     if sock is not None:
@@ -948,32 +941,30 @@ def create_datagram_endpoint(self, protocol_factory,
                              remote_addr, transport, protocol)
 
         try:
-            yield from waiter
+            await waiter
         except:
             transport.close()
             raise
 
         return transport, protocol
 
-    @coroutine
-    def _create_server_getaddrinfo(self, host, port, family, flags):
-        infos = yield from _ensure_resolved((host, port), family=family,
-                                            type=socket.SOCK_STREAM,
-                                            flags=flags, loop=self)
+    async def _create_server_getaddrinfo(self, host, port, family, flags):
+        infos = await _ensure_resolved((host, port), family=family,
+                                       type=socket.SOCK_STREAM,
+                                       flags=flags, loop=self)
         if not infos:
             raise OSError('getaddrinfo({!r}) returned empty list'.format(host))
         return infos
 
-    @coroutine
-    def create_server(self, protocol_factory, host=None, port=None,
-                      *,
-                      family=socket.AF_UNSPEC,
-                      flags=socket.AI_PASSIVE,
-                      sock=None,
-                      backlog=100,
-                      ssl=None,
-                      reuse_address=None,
-                      reuse_port=None):
+    async def create_server(self, protocol_factory, host=None, port=None,
+                            *,
+                            family=socket.AF_UNSPEC,
+                            flags=socket.AI_PASSIVE,
+                            sock=None,
+                            backlog=100,
+                            ssl=None,
+                            reuse_address=None,
+                            reuse_port=None):
         """Create a TCP server.
 
         The host parameter can be a string, in that case the TCP server is bound
@@ -1011,7 +1002,7 @@ def create_server(self, protocol_factory, host=None, port=None,
             fs = [self._create_server_getaddrinfo(host, port, family=family,
                                                   flags=flags)
                   for host in hosts]
-            infos = yield from tasks.gather(*fs, loop=self)
+            infos = await tasks.gather(*fs, loop=self)
             infos = set(itertools.chain.from_iterable(infos))
 
             completed = False
@@ -1068,8 +1059,8 @@ def create_server(self, protocol_factory, host=None, port=None,
             logger.info("%r is serving", server)
         return server
 
-    @coroutine
-    def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None):
+    async def connect_accepted_socket(self, protocol_factory, sock,
+                                      *, ssl=None):
         """Handle an accepted connection.
 
         This is used by servers that accept connections outside of
@@ -1082,7 +1073,7 @@ def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None):
             raise ValueError(
                 'A Stream Socket was expected, got {!r}'.format(sock))
 
-        transport, protocol = yield from self._create_connection_transport(
+        transport, protocol = await self._create_connection_transport(
             sock, protocol_factory, ssl, '', server_side=True)
         if self._debug:
             # Get the socket from the transport because SSL transport closes
@@ -1091,14 +1082,13 @@ def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None):
             logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
         return transport, protocol
 
-    @coroutine
-    def connect_read_pipe(self, protocol_factory, pipe):
+    async def connect_read_pipe(self, protocol_factory, pipe):
         protocol = protocol_factory()
         waiter = self.create_future()
         transport = self._make_read_pipe_transport(pipe, protocol, waiter)
 
         try:
-            yield from waiter
+            await waiter
         except:
             transport.close()
             raise
@@ -1108,14 +1098,13 @@ def connect_read_pipe(self, protocol_factory, pipe):
                          pipe.fileno(), transport, protocol)
         return transport, protocol
 
-    @coroutine
-    def connect_write_pipe(self, protocol_factory, pipe):
+    async def connect_write_pipe(self, protocol_factory, pipe):
         protocol = protocol_factory()
         waiter = self.create_future()
         transport = self._make_write_pipe_transport(pipe, protocol, waiter)
 
         try:
-            yield from waiter
+            await waiter
         except:
             transport.close()
             raise
@@ -1138,11 +1127,13 @@ def _log_subprocess(self, msg, stdin, stdout, stderr):
                 info.append('stderr=%s' % _format_pipe(stderr))
         logger.debug(' '.join(info))
 
-    @coroutine
-    def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
-                         stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                         universal_newlines=False, shell=True, bufsize=0,
-                         **kwargs):
+    async def subprocess_shell(self, protocol_factory, cmd, *,
+                               stdin=subprocess.PIPE,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE,
+                               universal_newlines=False,
+                               shell=True, bufsize=0,
+                               **kwargs):
         if not isinstance(cmd, (bytes, str)):
             raise ValueError("cmd must be a string")
         if universal_newlines:
@@ -1157,17 +1148,16 @@ def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
             # (password) and may be too long
             debug_log = 'run shell command %r' % cmd
             self._log_subprocess(debug_log, stdin, stdout, stderr)
-        transport = yield from self._make_subprocess_transport(
+        transport = await self._make_subprocess_transport(
             protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
         if self._debug:
             logger.info('%s: %r', debug_log, transport)
         return transport, protocol
 
-    @coroutine
-    def subprocess_exec(self, protocol_factory, program, *args,
-                        stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-                        stderr=subprocess.PIPE, universal_newlines=False,
-                        shell=False, bufsize=0, **kwargs):
+    async def subprocess_exec(self, protocol_factory, program, *args,
+                              stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+                              stderr=subprocess.PIPE, universal_newlines=False,
+                              shell=False, bufsize=0, **kwargs):
         if universal_newlines:
             raise ValueError("universal_newlines must be False")
         if shell:
@@ -1186,7 +1176,7 @@ def subprocess_exec(self, protocol_factory, program, *args,
             # (password) and may be too long
             debug_log = 'execute program %r' % program
             self._log_subprocess(debug_log, stdin, stdout, stderr)
-        transport = yield from self._make_subprocess_transport(
+        transport = await self._make_subprocess_transport(
             protocol, popen_args, False, stdin, stdout, stderr,
             bufsize, **kwargs)
         if self._debug:
diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py
index cac8d962c0b..7e5a901845d 100644
--- a/Lib/asyncio/base_subprocess.py
+++ b/Lib/asyncio/base_subprocess.py
@@ -4,7 +4,6 @@
 
 from . import protocols
 from . import transports
-from .coroutines import coroutine
 from .log import logger
 
 
@@ -154,26 +153,25 @@ def kill(self):
         self._check_proc()
         self._proc.kill()
 
-    @coroutine
-    def _connect_pipes(self, waiter):
+    async def _connect_pipes(self, waiter):
         try:
             proc = self._proc
             loop = self._loop
 
             if proc.stdin is not None:
-                _, pipe = yield from loop.connect_write_pipe(
+                _, pipe = await loop.connect_write_pipe(
                     lambda: WriteSubprocessPipeProto(self, 0),
                     proc.stdin)
                 self._pipes[0] = pipe
 
             if proc.stdout is not None:
-                _, pipe = yield from loop.connect_read_pipe(
+                _, pipe = await loop.connect_read_pipe(
                     lambda: ReadSubprocessPipeProto(self, 1),
                     proc.stdout)
                 self._pipes[1] = pipe
 
             if proc.stderr is not None:
-                _, pipe = yield from loop.connect_read_pipe(
+                _, pipe = await loop.connect_read_pipe(
                     lambda: ReadSubprocessPipeProto(self, 2),
                     proc.stderr)
                 self._pipes[2] = pipe
@@ -224,8 +222,7 @@ def _process_exited(self, returncode):
                 waiter.set_result(returncode)
         self._exit_waiters = None
 
-    @coroutine
-    def _wait(self):
+    async def _wait(self):
         """Wait until the process exit and return the process return code.
 
         This method is a coroutine."""
@@ -234,7 +231,7 @@ def _wait(self):
 
         waiter = self._loop.create_future()
         self._exit_waiters.append(waiter)
-        return (yield from waiter)
+        return await waiter
 
     def _try_finish(self):
         assert not self._finished
diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py
index e59d3d2760e..2cd6035973d 100644
--- a/Lib/asyncio/events.py
+++ b/Lib/asyncio/events.py
@@ -219,7 +219,7 @@ def close(self):
         """Stop serving.  This leaves existing connections open."""
         return NotImplemented
 
-    def wait_closed(self):
+    async def wait_closed(self):
         """Coroutine to wait until service is closed."""
         return NotImplemented
 
@@ -267,7 +267,7 @@ def close(self):
         """
         raise NotImplementedError
 
-    def shutdown_asyncgens(self):
+    async def shutdown_asyncgens(self):
         """Shutdown all active asynchronous generators."""
         raise NotImplementedError
 
@@ -302,7 +302,7 @@ def create_task(self, coro):
     def call_soon_threadsafe(self, callback, *args):
         raise NotImplementedError
 
-    def run_in_executor(self, executor, func, *args):
+    async def run_in_executor(self, executor, func, *args):
         raise NotImplementedError
 
     def set_default_executor(self, executor):
@@ -310,21 +310,23 @@ def set_default_executor(self, executor):
 
     # Network I/O methods returning Futures.
 
-    def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0):
+    async def getaddrinfo(self, host, port, *,
+                          family=0, type=0, proto=0, flags=0):
         raise NotImplementedError
 
-    def getnameinfo(self, sockaddr, flags=0):
+    async def getnameinfo(self, sockaddr, flags=0):
         raise NotImplementedError
 
-    def create_connection(self, protocol_factory, host=None, port=None, *,
-                          ssl=None, family=0, proto=0, flags=0, sock=None,
-                          local_addr=None, server_hostname=None):
+    async def create_connection(self, protocol_factory, host=None, port=None,
+                                *, ssl=None, family=0, proto=0,
+                                flags=0, sock=None, local_addr=None,
+                                server_hostname=None):
         raise NotImplementedError
 
-    def create_server(self, protocol_factory, host=None, port=None, *,
-                      family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
-                      sock=None, backlog=100, ssl=None, reuse_address=None,
-                      reuse_port=None):
+    async def create_server(self, protocol_factory, host=None, port=None,
+                            *, family=socket.AF_UNSPEC,
+                            flags=socket.AI_PASSIVE, sock=None, backlog=100,
+                            ssl=None, reuse_address=None, reuse_port=None):
         """A coroutine which creates a TCP server bound to host and port.
 
         The return value is a Server object which can be used to stop
@@ -362,13 +364,13 @@ def create_server(self, protocol_factory, host=None, port=None, *,
         """
         raise NotImplementedError
 
-    def create_unix_connection(self, protocol_factory, path=None, *,
-                               ssl=None, sock=None,
-                               server_hostname=None):
+    async def create_unix_connection(self, protocol_factory, path=None, *,
+                                     ssl=None, sock=None,
+                                     server_hostname=None):
         raise NotImplementedError
 
-    def create_unix_server(self, protocol_factory, path=None, *,
-                           sock=None, backlog=100, ssl=None):
+    async def create_unix_server(self, protocol_factory, path=None, *,
+                                 sock=None, backlog=100, ssl=None):
         """A coroutine which creates a UNIX Domain Socket server.
 
         The return value is a Server object, which can be used to stop
@@ -388,11 +390,11 @@ def create_unix_server(self, protocol_factory, path=None, *,
         """
         raise NotImplementedError
 
-    def create_datagram_endpoint(self, protocol_factory,
-                                 local_addr=None, remote_addr=None, *,
-                                 family=0, proto=0, flags=0,
-                                 reuse_address=None, reuse_port=None,
-                                 allow_broadcast=None, sock=None):
+    async def create_datagram_endpoint(self, protocol_factory,
+                                       local_addr=None, remote_addr=None, *,
+                                       family=0, proto=0, flags=0,
+                                       reuse_address=None, reuse_port=None,
+                                       allow_broadcast=None, sock=None):
         """A coroutine which creates a datagram endpoint.
 
         This method will try to establish the endpoint in the background.
@@ -425,7 +427,7 @@ def create_datagram_endpoint(self, protocol_factory,
 
     # Pipes and subprocesses.
 
-    def connect_read_pipe(self, protocol_factory, pipe):
+    async def connect_read_pipe(self, protocol_factory, pipe):
         """Register read pipe in event loop. Set the pipe to non-blocking mode.
 
         protocol_factory should instantiate object with Protocol interface.
@@ -438,7 +440,7 @@ def connect_read_pipe(self, protocol_factory, pipe):
         # close fd in pipe transport then close f and vise versa.
         raise NotImplementedError
 
-    def connect_write_pipe(self, protocol_factory, pipe):
+    async def connect_write_pipe(self, protocol_factory, pipe):
         """Register write pipe in event loop.
 
         protocol_factory should instantiate object with BaseProtocol interface.
@@ -451,14 +453,18 @@ def connect_write_pipe(self, protocol_factory, pipe):
         # close fd in pipe transport then close f and vise versa.
         raise NotImplementedError
 
-    def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
-                         stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                         **kwargs):
+    async def subprocess_shell(self, protocol_factory, cmd, *,
+                               stdin=subprocess.PIPE,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE,
+                               **kwargs):
         raise NotImplementedError
 
-    def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE,
-                        stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                        **kwargs):
+    async def subprocess_exec(self, protocol_factory, *args,
+                              stdin=subprocess.PIPE,
+                              stdout=subprocess.PIPE,
+                              stderr=subprocess.PIPE,
+                              **kwargs):
         raise NotImplementedError
 
     # Ready-based callback registration methods.
@@ -480,19 +486,19 @@ def remove_writer(self, fd):
 
     # Completion based I/O methods returning Futures.
 
-    def sock_recv(self, sock, nbytes):
+    async def sock_recv(self, sock, nbytes):
         raise NotImplementedError
 
-    def sock_recv_into(self, sock, buf):
+    async def sock_recv_into(self, sock, buf):
         raise NotImplementedError
 
-    def sock_sendall(self, sock, data):
+    async def sock_sendall(self, sock, data):
         raise NotImplementedError
 
-    def sock_connect(self, sock, address):
+    async def sock_connect(self, sock, address):
         raise NotImplementedError
 
-    def sock_accept(self, sock):
+    async def sock_accept(self, sock):
         raise NotImplementedError
 
     # Signal handling.
diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py
index 750c4359179..aa6ed3eaea6 100644
--- a/Lib/asyncio/locks.py
+++ b/Lib/asyncio/locks.py
@@ -66,20 +66,21 @@ def __iter__(self):
         yield from self.acquire()
         return _ContextManager(self)
 
+    async def __acquire_ctx(self):
+        await self.acquire()
+        return _ContextManager(self)
+
     def __await__(self):
         # To make "with await lock" work.
-        yield from self.acquire()
-        return _ContextManager(self)
+        return self.__acquire_ctx().__await__()
 
-    @coroutine
-    def __aenter__(self):
-        yield from self.acquire()
+    async def __aenter__(self):
+        await self.acquire()
         # We have no use for the "as ..."  clause in the with
         # statement for locks.
         return None
 
-    @coroutine
-    def __aexit__(self, exc_type, exc, tb):
+    async def __aexit__(self, exc_type, exc, tb):
         self.release()
 
 
@@ -156,8 +157,7 @@ def locked(self):
         """Return True if lock is acquired."""
         return self._locked
 
-    @coroutine
-    def acquire(self):
+    async def acquire(self):
         """Acquire a lock.
 
         This method blocks until the lock is unlocked, then sets it to
@@ -170,7 +170,7 @@ def acquire(self):
         fut = self._loop.create_future()
         self._waiters.append(fut)
         try:
-            yield from fut
+            await fut
             self._locked = True
             return True
         except futures.CancelledError:
@@ -251,8 +251,7 @@ def clear(self):
         to true again."""
         self._value = False
 
-    @coroutine
-    def wait(self):
+    async def wait(self):
         """Block until the internal flag is true.
 
         If the internal flag is true on entry, return True
@@ -265,7 +264,7 @@ def wait(self):
         fut = self._loop.create_future()
         self._waiters.append(fut)
         try:
-            yield from fut
+            await fut
             return True
         finally:
             self._waiters.remove(fut)
@@ -307,8 +306,7 @@ def __repr__(self):
             extra = '{},waiters:{}'.format(extra, len(self._waiters))
         return '<{} [{}]>'.format(res[1:-1], extra)
 
-    @coroutine
-    def wait(self):
+    async def wait(self):
         """Wait until notified.
 
         If the calling coroutine has not acquired the lock when this
@@ -327,7 +325,7 @@ def wait(self):
             fut = self._loop.create_future()
             self._waiters.append(fut)
             try:
-                yield from fut
+                await fut
                 return True
             finally:
                 self._waiters.remove(fut)
@@ -336,13 +334,12 @@ def wait(self):
             # Must reacquire lock even if wait is cancelled
             while True:
                 try:
-                    yield from self.acquire()
+                    await self.acquire()
                     break
                 except futures.CancelledError:
                     pass
 
-    @coroutine
-    def wait_for(self, predicate):
+    async def wait_for(self, predicate):
         """Wait until a predicate becomes true.
 
         The predicate should be a callable which result will be
@@ -351,7 +348,7 @@ def wait_for(self, predicate):
         """
         result = predicate()
         while not result:
-            yield from self.wait()
+            await self.wait()
             result = predicate()
         return result
 
@@ -432,8 +429,7 @@ def locked(self):
         """Returns True if semaphore can not be acquired immediately."""
         return self._value == 0
 
-    @coroutine
-    def acquire(self):
+    async def acquire(self):
         """Acquire a semaphore.
 
         If the internal counter is larger than zero on entry,
@@ -446,7 +442,7 @@ def acquire(self):
             fut = self._loop.create_future()
             self._waiters.append(fut)
             try:
-                yield from fut
+                await fut
             except:
                 # See the similar code in Queue.get.
                 fut.cancel()
diff --git a/Lib/asyncio/queues.py b/Lib/asyncio/queues.py
index 4fc681dde97..10e694f1393 100644
--- a/Lib/asyncio/queues.py
+++ b/Lib/asyncio/queues.py
@@ -7,7 +7,6 @@
 
 from . import events
 from . import locks
-from .coroutines import coroutine
 
 
 class QueueEmpty(Exception):
@@ -28,7 +27,7 @@ class Queue:
     """A queue, useful for coordinating producer and consumer coroutines.
 
     If maxsize is less than or equal to zero, the queue size is infinite. If it
-    is an integer greater than 0, then "yield from put()" will block when the
+    is an integer greater than 0, then "await put()" will block when the
     queue reaches maxsize, until an item is removed by get().
 
     Unlike the standard library Queue, you can reliably know this Queue's size
@@ -116,20 +115,17 @@ def full(self):
         else:
             return self.qsize() >= self._maxsize
 
-    @coroutine
-    def put(self, item):
+    async def put(self, item):
         """Put an item into the queue.
 
         Put an item into the queue. If the queue is full, wait until a free
         slot is available before adding item.
-
-        This method is a coroutine.
         """
         while self.full():
             putter = self._loop.create_future()
             self._putters.append(putter)
             try:
-                yield from putter
+                await putter
             except:
                 putter.cancel()  # Just in case putter is not done yet.
                 if not self.full() and not putter.cancelled():
@@ -151,19 +147,16 @@ def put_nowait(self, item):
         self._finished.clear()
         self._wakeup_next(self._getters)
 
-    @coroutine
-    def get(self):
+    async def get(self):
         """Remove and return an item from the queue.
 
         If queue is empty, wait until an item is available.
-
-        This method is a coroutine.
         """
         while self.empty():
             getter = self._loop.create_future()
             self._getters.append(getter)
             try:
-                yield from getter
+                await getter
             except:
                 getter.cancel()  # Just in case getter is not done yet.
 
@@ -210,8 +203,7 @@ def task_done(self):
         if self._unfinished_tasks == 0:
             self._finished.set()
 
-    @coroutine
-    def join(self):
+    async def join(self):
         """Block until all items in the queue have been gotten and processed.
 
         The count of unfinished tasks goes up whenever an item is added to the
@@ -220,7 +212,7 @@ def join(self):
         When the count of unfinished tasks drops to zero, join() unblocks.
         """
         if self._unfinished_tasks > 0:
-            yield from self._finished.wait()
+            await self._finished.wait()
 
 
 class PriorityQueue(Queue):
diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py
index 3639466f6c2..c30fde7f4da 100644
--- a/Lib/asyncio/selector_events.py
+++ b/Lib/asyncio/selector_events.py
@@ -24,7 +24,6 @@
 from . import futures
 from . import transports
 from . import sslproto
-from .coroutines import coroutine
 from .log import logger
 
 
@@ -189,9 +188,8 @@ def _accept_connection(self, protocol_factory, sock,
                                                   sslcontext, server)
                 self.create_task(accept)
 
-    @coroutine
-    def _accept_connection2(self, protocol_factory, conn, extra,
-                            sslcontext=None, server=None):
+    async def _accept_connection2(self, protocol_factory, conn, extra,
+                                  sslcontext=None, server=None):
         protocol = None
         transport = None
         try:
@@ -207,7 +205,7 @@ def _accept_connection2(self, protocol_factory, conn, extra,
                     server=server)
 
             try:
-                yield from waiter
+                await waiter
             except:
                 transport.close()
                 raise
@@ -452,8 +450,7 @@ def _sock_sendall(self, fut, registered_fd, sock, data):
             fd = sock.fileno()
             self.add_writer(fd, self._sock_sendall, fut, fd, sock, data)
 
-    @coroutine
-    def sock_connect(self, sock, address):
+    async def sock_connect(self, sock, address):
         """Connect to a remote socket at address.
 
         This method is a coroutine.
@@ -465,12 +462,12 @@ def sock_connect(self, sock, address):
             resolved = base_events._ensure_resolved(
                 address, family=sock.family, proto=sock.proto, loop=self)
             if not resolved.done():
-                yield from resolved
+                await resolved
             _, _, _, _, address = resolved.result()[0]
 
         fut = self.create_future()
         self._sock_connect(fut, sock, address)
-        return (yield from fut)
+        return await fut
 
     def _sock_connect(self, fut, sock, address):
         fd = sock.fileno()
diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py
index 15c9513527f..baa9ec94439 100644
--- a/Lib/asyncio/streams.py
+++ b/Lib/asyncio/streams.py
@@ -14,8 +14,8 @@
 from . import coroutines
 from . import events
 from . import protocols
-from .coroutines import coroutine
 from .log import logger
+from .tasks import sleep
 
 
 _DEFAULT_LIMIT = 2 ** 16
@@ -52,9 +52,8 @@ def __reduce__(self):
         return type(self), (self.args[0], self.consumed)
 
 
- at coroutine
-def open_connection(host=None, port=None, *,
-                    loop=None, limit=_DEFAULT_LIMIT, **kwds):
+async def open_connection(host=None, port=None, *,
+                          loop=None, limit=_DEFAULT_LIMIT, **kwds):
     """A wrapper for create_connection() returning a (reader, writer) pair.
 
     The reader returned is a StreamReader instance; the writer is a
@@ -76,15 +75,14 @@ def open_connection(host=None, port=None, *,
         loop = events.get_event_loop()
     reader = StreamReader(limit=limit, loop=loop)
     protocol = StreamReaderProtocol(reader, loop=loop)
-    transport, _ = yield from loop.create_connection(
+    transport, _ = await loop.create_connection(
         lambda: protocol, host, port, **kwds)
     writer = StreamWriter(transport, protocol, reader, loop)
     return reader, writer
 
 
- at coroutine
-def start_server(client_connected_cb, host=None, port=None, *,
-                 loop=None, limit=_DEFAULT_LIMIT, **kwds):
+async def start_server(client_connected_cb, host=None, port=None, *,
+                       loop=None, limit=_DEFAULT_LIMIT, **kwds):
     """Start a socket server, call back for each client connected.
 
     The first parameter, `client_connected_cb`, takes two parameters:
@@ -115,28 +113,26 @@ def factory():
                                         loop=loop)
         return protocol
 
-    return (yield from loop.create_server(factory, host, port, **kwds))
+    return await loop.create_server(factory, host, port, **kwds)
 
 
 if hasattr(socket, 'AF_UNIX'):
     # UNIX Domain Sockets are supported on this platform
 
-    @coroutine
-    def open_unix_connection(path=None, *,
-                             loop=None, limit=_DEFAULT_LIMIT, **kwds):
+    async def open_unix_connection(path=None, *,
+                                   loop=None, limit=_DEFAULT_LIMIT, **kwds):
         """Similar to `open_connection` but works with UNIX Domain Sockets."""
         if loop is None:
             loop = events.get_event_loop()
         reader = StreamReader(limit=limit, loop=loop)
         protocol = StreamReaderProtocol(reader, loop=loop)
-        transport, _ = yield from loop.create_unix_connection(
+        transport, _ = await loop.create_unix_connection(
             lambda: protocol, path, **kwds)
         writer = StreamWriter(transport, protocol, reader, loop)
         return reader, writer
 
-    @coroutine
-    def start_unix_server(client_connected_cb, path=None, *,
-                          loop=None, limit=_DEFAULT_LIMIT, **kwds):
+    async def start_unix_server(client_connected_cb, path=None, *,
+                                loop=None, limit=_DEFAULT_LIMIT, **kwds):
         """Similar to `start_server` but works with UNIX Domain Sockets."""
         if loop is None:
             loop = events.get_event_loop()
@@ -147,7 +143,7 @@ def factory():
                                             loop=loop)
             return protocol
 
-        return (yield from loop.create_unix_server(factory, path, **kwds))
+        return await loop.create_unix_server(factory, path, **kwds)
 
 
 class FlowControlMixin(protocols.Protocol):
@@ -203,8 +199,7 @@ def connection_lost(self, exc):
         else:
             waiter.set_exception(exc)
 
-    @coroutine
-    def _drain_helper(self):
+    async def _drain_helper(self):
         if self._connection_lost:
             raise ConnectionResetError('Connection lost')
         if not self._paused:
@@ -213,7 +208,7 @@ def _drain_helper(self):
         assert waiter is None or waiter.cancelled()
         waiter = self._loop.create_future()
         self._drain_waiter = waiter
-        yield from waiter
+        await waiter
 
 
 class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
@@ -313,14 +308,13 @@ def close(self):
     def get_extra_info(self, name, default=None):
         return self._transport.get_extra_info(name, default)
 
-    @coroutine
-    def drain(self):
+    async def drain(self):
         """Flush the write buffer.
 
         The intended use is to write
 
           w.write(data)
-          yield from w.drain()
+          await w.drain()
         """
         if self._reader is not None:
             exc = self._reader.exception()
@@ -331,11 +325,11 @@ def drain(self):
                 # Yield to the event loop so connection_lost() may be
                 # called.  Without this, _drain_helper() would return
                 # immediately, and code that calls
-                #     write(...); yield from drain()
+                #     write(...); await drain()
                 # in a loop would never call connection_lost(), so it
                 # would not see an error when the socket is closed.
-                yield
-        yield from self._protocol._drain_helper()
+                await sleep(0, loop=self._loop)
+        await self._protocol._drain_helper()
 
 
 class StreamReader:
@@ -436,8 +430,7 @@ def feed_data(self, data):
             else:
                 self._paused = True
 
-    @coroutine
-    def _wait_for_data(self, func_name):
+    async def _wait_for_data(self, func_name):
         """Wait until feed_data() or feed_eof() is called.
 
         If stream was paused, automatically resume it.
@@ -460,12 +453,11 @@ def _wait_for_data(self, func_name):
 
         self._waiter = self._loop.create_future()
         try:
-            yield from self._waiter
+            await self._waiter
         finally:
             self._waiter = None
 
-    @coroutine
-    def readline(self):
+    async def readline(self):
         """Read chunk of data from the stream until newline (b'\n') is found.
 
         On success, return chunk that ends with newline. If only partial
@@ -484,7 +476,7 @@ def readline(self):
         sep = b'\n'
         seplen = len(sep)
         try:
-            line = yield from self.readuntil(sep)
+            line = await self.readuntil(sep)
         except IncompleteReadError as e:
             return e.partial
         except LimitOverrunError as e:
@@ -496,8 +488,7 @@ def readline(self):
             raise ValueError(e.args[0])
         return line
 
-    @coroutine
-    def readuntil(self, separator=b'\n'):
+    async def readuntil(self, separator=b'\n'):
         """Read data from the stream until ``separator`` is found.
 
         On success, the data and separator will be removed from the
@@ -577,7 +568,7 @@ def readuntil(self, separator=b'\n'):
                 raise IncompleteReadError(chunk, None)
 
             # _wait_for_data() will resume reading if stream was paused.
-            yield from self._wait_for_data('readuntil')
+            await self._wait_for_data('readuntil')
 
         if isep > self._limit:
             raise LimitOverrunError(
@@ -588,8 +579,7 @@ def readuntil(self, separator=b'\n'):
         self._maybe_resume_transport()
         return bytes(chunk)
 
-    @coroutine
-    def read(self, n=-1):
+    async def read(self, n=-1):
         """Read up to `n` bytes from the stream.
 
         If n is not provided, or set to -1, read until EOF and return all read
@@ -623,14 +613,14 @@ def read(self, n=-1):
             # bytes.  So just call self.read(self._limit) until EOF.
             blocks = []
             while True:
-                block = yield from self.read(self._limit)
+                block = await self.read(self._limit)
                 if not block:
                     break
                 blocks.append(block)
             return b''.join(blocks)
 
         if not self._buffer and not self._eof:
-            yield from self._wait_for_data('read')
+            await self._wait_for_data('read')
 
         # This will work right even if buffer is less than n bytes
         data = bytes(self._buffer[:n])
@@ -639,8 +629,7 @@ def read(self, n=-1):
         self._maybe_resume_transport()
         return data
 
-    @coroutine
-    def readexactly(self, n):
+    async def readexactly(self, n):
         """Read exactly `n` bytes.
 
         Raise an IncompleteReadError if EOF is reached before `n` bytes can be
@@ -670,7 +659,7 @@ def readexactly(self, n):
                 self._buffer.clear()
                 raise IncompleteReadError(incomplete, n)
 
-            yield from self._wait_for_data('readexactly')
+            await self._wait_for_data('readexactly')
 
         if len(self._buffer) == n:
             data = bytes(self._buffer)
@@ -684,9 +673,8 @@ def readexactly(self, n):
     def __aiter__(self):
         return self
 
-    @coroutine
-    def __anext__(self):
-        val = yield from self.readline()
+    async def __anext__(self):
+        val = await self.readline()
         if val == b'':
             raise StopAsyncIteration
         return val
diff --git a/Lib/asyncio/subprocess.py b/Lib/asyncio/subprocess.py
index 4c85466859f..dd3d10c8879 100644
--- a/Lib/asyncio/subprocess.py
+++ b/Lib/asyncio/subprocess.py
@@ -6,7 +6,6 @@
 from . import protocols
 from . import streams
 from . import tasks
-from .coroutines import coroutine
 from .log import logger
 
 
@@ -121,12 +120,9 @@ def __repr__(self):
     def returncode(self):
         return self._transport.get_returncode()
 
-    @coroutine
-    def wait(self):
-        """Wait until the process exit and return the process return code.
-
-        This method is a coroutine."""
-        return (yield from self._transport._wait())
+    async def wait(self):
+        """Wait until the process exit and return the process return code."""
+        return await self._transport._wait()
 
     def send_signal(self, signal):
         self._transport.send_signal(signal)
@@ -137,15 +133,14 @@ def terminate(self):
     def kill(self):
         self._transport.kill()
 
-    @coroutine
-    def _feed_stdin(self, input):
+    async def _feed_stdin(self, input):
         debug = self._loop.get_debug()
         self.stdin.write(input)
         if debug:
             logger.debug('%r communicate: feed stdin (%s bytes)',
                         self, len(input))
         try:
-            yield from self.stdin.drain()
+            await self.stdin.drain()
         except (BrokenPipeError, ConnectionResetError) as exc:
             # communicate() ignores BrokenPipeError and ConnectionResetError
             if debug:
@@ -155,12 +150,10 @@ def _feed_stdin(self, input):
             logger.debug('%r communicate: close stdin', self)
         self.stdin.close()
 
-    @coroutine
-    def _noop(self):
+    async def _noop(self):
         return None
 
-    @coroutine
-    def _read_stream(self, fd):
+    async def _read_stream(self, fd):
         transport = self._transport.get_pipe_transport(fd)
         if fd == 2:
             stream = self.stderr
@@ -170,15 +163,14 @@ def _read_stream(self, fd):
         if self._loop.get_debug():
             name = 'stdout' if fd == 1 else 'stderr'
             logger.debug('%r communicate: read %s', self, name)
-        output = yield from stream.read()
+        output = await stream.read()
         if self._loop.get_debug():
             name = 'stdout' if fd == 1 else 'stderr'
             logger.debug('%r communicate: close %s', self, name)
         transport.close()
         return output
 
-    @coroutine
-    def communicate(self, input=None):
+    async def communicate(self, input=None):
         if input is not None:
             stdin = self._feed_stdin(input)
         else:
@@ -191,36 +183,36 @@ def communicate(self, input=None):
             stderr = self._read_stream(2)
         else:
             stderr = self._noop()
-        stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
-                                                        loop=self._loop)
-        yield from self.wait()
+        stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr,
+                                                   loop=self._loop)
+        await self.wait()
         return (stdout, stderr)
 
 
- at coroutine
-def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
-                            loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
+async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
+                                  loop=None, limit=streams._DEFAULT_LIMIT,
+                                  **kwds):
     if loop is None:
         loop = events.get_event_loop()
     protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
                                                         loop=loop)
-    transport, protocol = yield from loop.subprocess_shell(
-                                            protocol_factory,
-                                            cmd, stdin=stdin, stdout=stdout,
-                                            stderr=stderr, **kwds)
+    transport, protocol = await loop.subprocess_shell(
+        protocol_factory,
+        cmd, stdin=stdin, stdout=stdout,
+        stderr=stderr, **kwds)
     return Process(transport, protocol, loop)
 
- at coroutine
-def create_subprocess_exec(program, *args, stdin=None, stdout=None,
-                           stderr=None, loop=None,
-                           limit=streams._DEFAULT_LIMIT, **kwds):
+
+async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
+                                 stderr=None, loop=None,
+                                 limit=streams._DEFAULT_LIMIT, **kwds):
     if loop is None:
         loop = events.get_event_loop()
     protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
                                                         loop=loop)
-    transport, protocol = yield from loop.subprocess_exec(
-                                            protocol_factory,
-                                            program, *args,
-                                            stdin=stdin, stdout=stdout,
-                                            stderr=stderr, **kwds)
+    transport, protocol = await loop.subprocess_exec(
+        protocol_factory,
+        program, *args,
+        stdin=stdin, stdout=stdout,
+        stderr=stderr, **kwds)
     return Process(transport, protocol, loop)
diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py
index 5d744c3d30e..c23d06afd7c 100644
--- a/Lib/asyncio/tasks.py
+++ b/Lib/asyncio/tasks.py
@@ -9,6 +9,7 @@
 import concurrent.futures
 import functools
 import inspect
+import types
 import warnings
 import weakref
 
@@ -276,8 +277,7 @@ def _wakeup(self, future):
 ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
 
 
- at coroutine
-def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
+async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
     """Wait for the Futures and coroutines given by fs to complete.
 
     The sequence futures must not be empty.
@@ -288,7 +288,7 @@ def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
 
     Usage:
 
-        done, pending = yield from asyncio.wait(fs)
+        done, pending = await asyncio.wait(fs)
 
     Note: This does not raise TimeoutError! Futures that aren't done
     when the timeout occurs are returned in the second set.
@@ -305,7 +305,7 @@ def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
 
     fs = {ensure_future(f, loop=loop) for f in set(fs)}
 
-    return (yield from _wait(fs, timeout, return_when, loop))
+    return await _wait(fs, timeout, return_when, loop)
 
 
 def _release_waiter(waiter, *args):
@@ -313,8 +313,7 @@ def _release_waiter(waiter, *args):
         waiter.set_result(None)
 
 
- at coroutine
-def wait_for(fut, timeout, *, loop=None):
+async def wait_for(fut, timeout, *, loop=None):
     """Wait for the single Future or coroutine to complete, with timeout.
 
     Coroutine will be wrapped in Task.
@@ -331,7 +330,7 @@ def wait_for(fut, timeout, *, loop=None):
         loop = events.get_event_loop()
 
     if timeout is None:
-        return (yield from fut)
+        return await fut
 
     if timeout <= 0:
         fut = ensure_future(fut, loop=loop)
@@ -352,7 +351,7 @@ def wait_for(fut, timeout, *, loop=None):
     try:
         # wait until the future completes or the timeout
         try:
-            yield from waiter
+            await waiter
         except futures.CancelledError:
             fut.remove_done_callback(cb)
             fut.cancel()
@@ -368,8 +367,7 @@ def wait_for(fut, timeout, *, loop=None):
         timeout_handle.cancel()
 
 
- at coroutine
-def _wait(fs, timeout, return_when, loop):
+async def _wait(fs, timeout, return_when, loop):
     """Internal helper for wait() and wait_for().
 
     The fs argument must be a collection of Futures.
@@ -397,7 +395,7 @@ def _on_completion(f):
         f.add_done_callback(_on_completion)
 
     try:
-        yield from waiter
+        await waiter
     finally:
         if timeout_handle is not None:
             timeout_handle.cancel()
@@ -423,10 +421,10 @@ def as_completed(fs, *, loop=None, timeout=None):
     This differs from PEP 3148; the proper way to use this is:
 
         for f in as_completed(fs):
-            result = yield from f  # The 'yield from' may raise.
+            result = await f  # The 'await' may raise.
             # Use result.
 
-    If a timeout is specified, the 'yield from' will raise
+    If a timeout is specified, the 'await' will raise
     TimeoutError when the timeout occurs before all Futures are done.
 
     Note: The futures 'f' are not necessarily members of fs.
@@ -453,9 +451,8 @@ def _on_completion(f):
         if not todo and timeout_handle is not None:
             timeout_handle.cancel()
 
-    @coroutine
-    def _wait_for_one():
-        f = yield from done.get()
+    async def _wait_for_one():
+        f = await done.get()
         if f is None:
             # Dummy value from _on_timeout().
             raise futures.TimeoutError
@@ -469,11 +466,22 @@ def _wait_for_one():
         yield _wait_for_one()
 
 
- at coroutine
-def sleep(delay, result=None, *, loop=None):
+ at types.coroutine
+def __sleep0():
+    """Skip one event loop run cycle.
+
+    This is a private helper for 'asyncio.sleep()', used
+    when the 'delay' is set to 0.  It uses a bare 'yield'
+    expression (which Task._step knows how to handle)
+    instead of creating a Future object.
+    """
+    yield
+
+
+async def sleep(delay, result=None, *, loop=None):
     """Coroutine that completes after a given time (in seconds)."""
     if delay == 0:
-        yield
+        await __sleep0()
         return result
 
     if loop is None:
@@ -483,7 +491,7 @@ def sleep(delay, result=None, *, loop=None):
                                 futures._set_result_unless_cancelled,
                                 future, result)
     try:
-        return (yield from future)
+        return await future
     finally:
         h.cancel()
 
@@ -652,11 +660,11 @@ def shield(arg, *, loop=None):
 
     The statement
 
-        res = yield from shield(something())
+        res = await shield(something())
 
     is exactly equivalent to the statement
 
-        res = yield from something()
+        res = await something()
 
     *except* that if the coroutine containing it is cancelled, the
     task running in something() is not cancelled.  From the POV of
@@ -669,7 +677,7 @@ def shield(arg, *, loop=None):
     you can combine shield() with a try/except clause, as follows:
 
         try:
-            res = yield from shield(something())
+            res = await shield(something())
         except CancelledError:
             res = None
     """
diff --git a/Lib/asyncio/test_utils.py b/Lib/asyncio/test_utils.py
index 32d3b0bf630..231916970c7 100644
--- a/Lib/asyncio/test_utils.py
+++ b/Lib/asyncio/test_utils.py
@@ -30,7 +30,6 @@
 from . import events
 from . import futures
 from . import tasks
-from .coroutines import coroutine
 from .log import logger
 from test import support
 
@@ -43,8 +42,7 @@ def dummy_ssl_context():
 
 
 def run_briefly(loop):
-    @coroutine
-    def once():
+    async def once():
         pass
     gen = once()
     t = loop.create_task(gen)
diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py
index ab818da1dfa..0308b02a52d 100644
--- a/Lib/asyncio/unix_events.py
+++ b/Lib/asyncio/unix_events.py
@@ -20,7 +20,6 @@
 from . import futures
 from . import selector_events
 from . import transports
-from .coroutines import coroutine
 from .log import logger
 
 
@@ -168,10 +167,9 @@ def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
                                    extra=None):
         return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
 
-    @coroutine
-    def _make_subprocess_transport(self, protocol, args, shell,
-                                   stdin, stdout, stderr, bufsize,
-                                   extra=None, **kwargs):
+    async def _make_subprocess_transport(self, protocol, args, shell,
+                                         stdin, stdout, stderr, bufsize,
+                                         extra=None, **kwargs):
         with events.get_child_watcher() as watcher:
             waiter = self.create_future()
             transp = _UnixSubprocessTransport(self, protocol, args, shell,
@@ -182,29 +180,20 @@ def _make_subprocess_transport(self, protocol, args, shell,
             watcher.add_child_handler(transp.get_pid(),
                                       self._child_watcher_callback, transp)
             try:
-                yield from waiter
-            except Exception as exc:
-                # Workaround CPython bug #23353: using yield/yield-from in an
-                # except block of a generator doesn't clear properly
-                # sys.exc_info()
-                err = exc
-            else:
-                err = None
-
-            if err is not None:
+                await waiter
+            except Exception:
                 transp.close()
-                yield from transp._wait()
-                raise err
+                await transp._wait()
+                raise
 
         return transp
 
     def _child_watcher_callback(self, pid, returncode, transp):
         self.call_soon_threadsafe(transp._process_exited, returncode)
 
-    @coroutine
-    def create_unix_connection(self, protocol_factory, path=None, *,
-                               ssl=None, sock=None,
-                               server_hostname=None):
+    async def create_unix_connection(self, protocol_factory, path=None, *,
+                                     ssl=None, sock=None,
+                                     server_hostname=None):
         assert server_hostname is None or isinstance(server_hostname, str)
         if ssl:
             if server_hostname is None:
@@ -223,7 +212,7 @@ def create_unix_connection(self, protocol_factory, path=None, *,
             sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
             try:
                 sock.setblocking(False)
-                yield from self.sock_connect(sock, path)
+                await self.sock_connect(sock, path)
             except:
                 sock.close()
                 raise
@@ -238,13 +227,12 @@ def create_unix_connection(self, protocol_factory, path=None, *,
                     .format(sock))
             sock.setblocking(False)
 
-        transport, protocol = yield from self._create_connection_transport(
+        transport, protocol = await self._create_connection_transport(
             sock, protocol_factory, ssl, server_hostname)
         return transport, protocol
 
-    @coroutine
-    def create_unix_server(self, protocol_factory, path=None, *,
-                           sock=None, backlog=100, ssl=None):
+    async def create_unix_server(self, protocol_factory, path=None, *,
+                                 sock=None, backlog=100, ssl=None):
         if isinstance(ssl, bool):
             raise TypeError('ssl argument must be an SSLContext or None')
 
diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py
index de41e645163..95b12a11a62 100644
--- a/Lib/asyncio/windows_events.py
+++ b/Lib/asyncio/windows_events.py
@@ -15,7 +15,6 @@
 from . import selector_events
 from . import tasks
 from . import windows_utils
-from .coroutines import coroutine
 from .log import logger
 
 
@@ -305,17 +304,15 @@ def __init__(self, proactor=None):
             proactor = IocpProactor()
         super().__init__(proactor)
 
-    @coroutine
-    def create_pipe_connection(self, protocol_factory, address):
+    async def create_pipe_connection(self, protocol_factory, address):
         f = self._proactor.connect_pipe(address)
-        pipe = yield from f
+        pipe = await f
         protocol = protocol_factory()
         trans = self._make_duplex_pipe_transport(pipe, protocol,
                                                  extra={'addr': address})
         return trans, protocol
 
-    @coroutine
-    def start_serving_pipe(self, protocol_factory, address):
+    async def start_serving_pipe(self, protocol_factory, address):
         server = PipeServer(address)
 
         def loop_accept_pipe(f=None):
@@ -361,28 +358,20 @@ def loop_accept_pipe(f=None):
         self.call_soon(loop_accept_pipe)
         return [server]
 
-    @coroutine
-    def _make_subprocess_transport(self, protocol, args, shell,
-                                   stdin, stdout, stderr, bufsize,
-                                   extra=None, **kwargs):
+    async def _make_subprocess_transport(self, protocol, args, shell,
+                                         stdin, stdout, stderr, bufsize,
+                                         extra=None, **kwargs):
         waiter = self.create_future()
         transp = _WindowsSubprocessTransport(self, protocol, args, shell,
                                              stdin, stdout, stderr, bufsize,
                                              waiter=waiter, extra=extra,
                                              **kwargs)
         try:
-            yield from waiter
-        except Exception as exc:
-            # Workaround CPython bug #23353: using yield/yield-from in an
-            # except block of a generator doesn't clear properly sys.exc_info()
-            err = exc
-        else:
-            err = None
-
-        if err is not None:
+            await waiter
+        except Exception:
             transp.close()
-            yield from transp._wait()
-            raise err
+            await transp._wait()
+            raise
 
         return transp
 
@@ -498,11 +487,10 @@ def finish_accept(trans, key, ov):
             conn.settimeout(listener.gettimeout())
             return conn, conn.getpeername()
 
-        @coroutine
-        def accept_coro(future, conn):
+        async def accept_coro(future, conn):
             # Coroutine closing the accept socket if the future is cancelled
             try:
-                yield from future
+                await future
             except futures.CancelledError:
                 conn.close()
                 raise
@@ -552,8 +540,7 @@ def finish_accept_pipe(trans, key, ov):
 
         return self._register(ov, pipe, finish_accept_pipe)
 
-    @coroutine
-    def connect_pipe(self, address):
+    async def connect_pipe(self, address):
         delay = CONNECT_PIPE_INIT_DELAY
         while True:
             # Unfortunately there is no way to do an overlapped connect to a pipe.
@@ -568,7 +555,7 @@ def connect_pipe(self, address):
 
             # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
             delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
-            yield from tasks.sleep(delay, loop=self._loop)
+            await tasks.sleep(delay, loop=self._loop)
 
         return windows_utils.PipeHandle(handle)
 
diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py
index 98f2aef5627..6561a9df779 100644
--- a/Lib/test/test_asyncio/test_base_events.py
+++ b/Lib/test/test_asyncio/test_base_events.py
@@ -1316,7 +1316,8 @@ def mock_getaddrinfo(*args, **kwds):
 
         self.loop.getaddrinfo.side_effect = mock_getaddrinfo
         self.loop.sock_connect = mock.Mock()
-        self.loop.sock_connect.return_value = ()
+        self.loop.sock_connect.return_value = self.loop.create_future()
+        self.loop.sock_connect.return_value.set_result(None)
         self.loop._make_ssl_transport = mock.Mock()
 
         class _SelectorTransportMock:
@@ -1416,7 +1417,8 @@ def test_create_server_no_host_port_sock(self):
 
     def test_create_server_no_getaddrinfo(self):
         getaddrinfo = self.loop.getaddrinfo = mock.Mock()
-        getaddrinfo.return_value = []
+        getaddrinfo.return_value = self.loop.create_future()
+        getaddrinfo.return_value.set_result(None)
 
         f = self.loop.create_server(MyProto, 'python.org', 0)
         self.assertRaises(OSError, self.loop.run_until_complete, f)
diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py
index a6941aa4a60..2e8f46dd41e 100644
--- a/Lib/test/test_asyncio/test_events.py
+++ b/Lib/test/test_asyncio/test_events.py
@@ -285,10 +285,10 @@ def test_run_until_complete(self):
         self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
 
     def test_run_until_complete_stopped(self):
-        @asyncio.coroutine
-        def cb():
+
+        async def cb():
             self.loop.stop()
-            yield from asyncio.sleep(0.1, loop=self.loop)
+            await asyncio.sleep(0.1, loop=self.loop)
         task = cb()
         self.assertRaises(RuntimeError,
                           self.loop.run_until_complete, task)
@@ -1424,9 +1424,8 @@ def test_read_pipe(self):
         rpipe, wpipe = os.pipe()
         pipeobj = io.open(rpipe, 'rb', 1024)
 
-        @asyncio.coroutine
-        def connect():
-            t, p = yield from self.loop.connect_read_pipe(
+        async def connect():
+            t, p = await self.loop.connect_read_pipe(
                 lambda: proto, pipeobj)
             self.assertIs(p, proto)
             self.assertIs(t, proto.transport)
@@ -1463,11 +1462,10 @@ def test_unclosed_pipe_transport(self):
         rpipeobj = io.open(rpipe, 'rb', 1024)
         wpipeobj = io.open(wpipe, 'w', 1024)
 
-        @asyncio.coroutine
-        def connect():
-            read_transport, _ = yield from loop.connect_read_pipe(
+        async def connect():
+            read_transport, _ = await loop.connect_read_pipe(
                 lambda: read_proto, rpipeobj)
-            write_transport, _ = yield from loop.connect_write_pipe(
+            write_transport, _ = await loop.connect_write_pipe(
                 lambda: write_proto, wpipeobj)
             return read_transport, write_transport
 
@@ -1499,10 +1497,9 @@ def test_read_pty_output(self):
         master, slave = os.openpty()
         master_read_obj = io.open(master, 'rb', 0)
 
-        @asyncio.coroutine
-        def connect():
-            t, p = yield from self.loop.connect_read_pipe(lambda: proto,
-                                                          master_read_obj)
+        async def connect():
+            t, p = await self.loop.connect_read_pipe(lambda: proto,
+                                                     master_read_obj)
             self.assertIs(p, proto)
             self.assertIs(t, proto.transport)
             self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
@@ -1713,11 +1710,10 @@ def test_prompt_cancellation(self):
         if ov is not None:
             self.assertTrue(ov.pending)
 
-        @asyncio.coroutine
-        def main():
+        async def main():
             try:
                 self.loop.call_soon(f.cancel)
-                yield from f
+                await f
             except asyncio.CancelledError:
                 res = 'cancelled'
             else:
@@ -1750,14 +1746,13 @@ def _run_once():
         self.loop._run_once_counter = 0
         self.loop._run_once = _run_once
 
-        @asyncio.coroutine
-        def wait():
+        async def wait():
             loop = self.loop
-            yield from asyncio.sleep(1e-2, loop=loop)
-            yield from asyncio.sleep(1e-4, loop=loop)
-            yield from asyncio.sleep(1e-6, loop=loop)
-            yield from asyncio.sleep(1e-8, loop=loop)
-            yield from asyncio.sleep(1e-10, loop=loop)
+            await asyncio.sleep(1e-2, loop=loop)
+            await asyncio.sleep(1e-4, loop=loop)
+            await asyncio.sleep(1e-6, loop=loop)
+            await asyncio.sleep(1e-8, loop=loop)
+            await asyncio.sleep(1e-10, loop=loop)
 
         self.loop.run_until_complete(wait())
         # The ideal number of call is 12, but on some platforms, the selector
@@ -2076,9 +2071,9 @@ def test_subprocess_wait_no_same_group(self):
         self.assertEqual(7, proto.returncode)
 
     def test_subprocess_exec_invalid_args(self):
-        @asyncio.coroutine
-        def connect(**kwds):
-            yield from self.loop.subprocess_exec(
+
+        async def connect(**kwds):
+            await self.loop.subprocess_exec(
                 asyncio.SubprocessProtocol,
                 'pwd', **kwds)
 
@@ -2090,11 +2085,11 @@ def connect(**kwds):
             self.loop.run_until_complete(connect(shell=True))
 
     def test_subprocess_shell_invalid_args(self):
-        @asyncio.coroutine
-        def connect(cmd=None, **kwds):
+
+        async def connect(cmd=None, **kwds):
             if not cmd:
                 cmd = 'pwd'
-            yield from self.loop.subprocess_shell(
+            await self.loop.subprocess_shell(
                 asyncio.SubprocessProtocol,
                 cmd, **kwds)
 
@@ -2548,20 +2543,8 @@ def test_not_implemented(self):
             NotImplementedError, loop.time)
         self.assertRaises(
             NotImplementedError, loop.call_soon_threadsafe, None)
-        self.assertRaises(
-            NotImplementedError, loop.run_in_executor, f, f)
         self.assertRaises(
             NotImplementedError, loop.set_default_executor, f)
-        self.assertRaises(
-            NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
-        self.assertRaises(
-            NotImplementedError, loop.getnameinfo, ('localhost', 8080))
-        self.assertRaises(
-            NotImplementedError, loop.create_connection, f)
-        self.assertRaises(
-            NotImplementedError, loop.create_server, f)
-        self.assertRaises(
-            NotImplementedError, loop.create_datagram_endpoint, f)
         self.assertRaises(
             NotImplementedError, loop.add_reader, 1, f)
         self.assertRaises(
@@ -2570,33 +2553,12 @@ def test_not_implemented(self):
             NotImplementedError, loop.add_writer, 1, f)
         self.assertRaises(
             NotImplementedError, loop.remove_writer, 1)
-        self.assertRaises(
-            NotImplementedError, loop.sock_recv, f, 10)
-        self.assertRaises(
-            NotImplementedError, loop.sock_recv_into, f, 10)
-        self.assertRaises(
-            NotImplementedError, loop.sock_sendall, f, 10)
-        self.assertRaises(
-            NotImplementedError, loop.sock_connect, f, f)
-        self.assertRaises(
-            NotImplementedError, loop.sock_accept, f)
         self.assertRaises(
             NotImplementedError, loop.add_signal_handler, 1, f)
         self.assertRaises(
             NotImplementedError, loop.remove_signal_handler, 1)
         self.assertRaises(
             NotImplementedError, loop.remove_signal_handler, 1)
-        self.assertRaises(
-            NotImplementedError, loop.connect_read_pipe, f,
-            mock.sentinel.pipe)
-        self.assertRaises(
-            NotImplementedError, loop.connect_write_pipe, f,
-            mock.sentinel.pipe)
-        self.assertRaises(
-            NotImplementedError, loop.subprocess_shell, f,
-            mock.sentinel)
-        self.assertRaises(
-            NotImplementedError, loop.subprocess_exec, f)
         self.assertRaises(
             NotImplementedError, loop.set_exception_handler, f)
         self.assertRaises(
@@ -2608,6 +2570,47 @@ def test_not_implemented(self):
         self.assertRaises(
             NotImplementedError, loop.set_debug, f)
 
+    def test_not_implemented_async(self):
+
+        async def inner():
+            f = mock.Mock()
+            loop = asyncio.AbstractEventLoop()
+
+            with self.assertRaises(NotImplementedError):
+                await loop.run_in_executor(f, f)
+            with self.assertRaises(NotImplementedError):
+                await loop.getaddrinfo('localhost', 8080)
+            with self.assertRaises(NotImplementedError):
+                await loop.getnameinfo(('localhost', 8080))
+            with self.assertRaises(NotImplementedError):
+                await loop.create_connection(f)
+            with self.assertRaises(NotImplementedError):
+                await loop.create_server(f)
+            with self.assertRaises(NotImplementedError):
+                await loop.create_datagram_endpoint(f)
+            with self.assertRaises(NotImplementedError):
+                await loop.sock_recv(f, 10)
+            with self.assertRaises(NotImplementedError):
+                await loop.sock_recv_into(f, 10)
+            with self.assertRaises(NotImplementedError):
+                await loop.sock_sendall(f, 10)
+            with self.assertRaises(NotImplementedError):
+                await loop.sock_connect(f, f)
+            with self.assertRaises(NotImplementedError):
+                await loop.sock_accept(f)
+            with self.assertRaises(NotImplementedError):
+                await loop.connect_read_pipe(f, mock.sentinel.pipe)
+            with self.assertRaises(NotImplementedError):
+                await loop.connect_write_pipe(f, mock.sentinel.pipe)
+            with self.assertRaises(NotImplementedError):
+                await loop.subprocess_shell(f, mock.sentinel)
+            with self.assertRaises(NotImplementedError):
+                await loop.subprocess_exec(f)
+
+        loop = asyncio.new_event_loop()
+        loop.run_until_complete(inner())
+        loop.close()
+
 
 class ProtocolsAbsTests(unittest.TestCase):
 
diff --git a/Lib/test/test_asyncio/test_locks.py b/Lib/test/test_asyncio/test_locks.py
index c85e8b1a32f..c1f8d6e3673 100644
--- a/Lib/test/test_asyncio/test_locks.py
+++ b/Lib/test/test_asyncio/test_locks.py
@@ -69,21 +69,18 @@ def test_acquire(self):
 
         self.assertTrue(self.loop.run_until_complete(lock.acquire()))
 
-        @asyncio.coroutine
-        def c1(result):
-            if (yield from lock.acquire()):
+        async def c1(result):
+            if await lock.acquire():
                 result.append(1)
             return True
 
-        @asyncio.coroutine
-        def c2(result):
-            if (yield from lock.acquire()):
+        async def c2(result):
+            if await lock.acquire():
                 result.append(2)
             return True
 
-        @asyncio.coroutine
-        def c3(result):
-            if (yield from lock.acquire()):
+        async def c3(result):
+            if await lock.acquire():
                 result.append(3)
             return True
 
@@ -145,12 +142,11 @@ def test_cancel_race(self):
         # Setup: A has the lock, b and c are waiting.
         lock = asyncio.Lock(loop=self.loop)
 
-        @asyncio.coroutine
-        def lockit(name, blocker):
-            yield from lock.acquire()
+        async def lockit(name, blocker):
+            await lock.acquire()
             try:
                 if blocker is not None:
-                    yield from blocker
+                    await blocker
             finally:
                 lock.release()
 
@@ -294,19 +290,16 @@ def test_wait(self):
 
         result = []
 
-        @asyncio.coroutine
-        def c1(result):
-            if (yield from ev.wait()):
+        async def c1(result):
+            if await ev.wait():
                 result.append(1)
 
-        @asyncio.coroutine
-        def c2(result):
-            if (yield from ev.wait()):
+        async def c2(result):
+            if await ev.wait():
                 result.append(2)
 
-        @asyncio.coroutine
-        def c3(result):
-            if (yield from ev.wait()):
+        async def c3(result):
+            if await ev.wait():
                 result.append(3)
 
         t1 = asyncio.Task(c1(result), loop=self.loop)
@@ -359,9 +352,8 @@ def test_clear_with_waiters(self):
         ev = asyncio.Event(loop=self.loop)
         result = []
 
-        @asyncio.coroutine
-        def c1(result):
-            if (yield from ev.wait()):
+        async def c1(result):
+            if await ev.wait():
                 result.append(1)
             return True
 
@@ -408,24 +400,21 @@ def test_wait(self):
         cond = asyncio.Condition(loop=self.loop)
         result = []
 
-        @asyncio.coroutine
-        def c1(result):
-            yield from cond.acquire()
-            if (yield from cond.wait()):
+        async def c1(result):
+            await cond.acquire()
+            if await cond.wait():
                 result.append(1)
             return True
 
-        @asyncio.coroutine
-        def c2(result):
-            yield from cond.acquire()
-            if (yield from cond.wait()):
+        async def c2(result):
+            await cond.acquire()
+            if await cond.wait():
                 result.append(2)
             return True
 
-        @asyncio.coroutine
-        def c3(result):
-            yield from cond.acquire()
-            if (yield from cond.wait()):
+        async def c3(result):
+            await cond.acquire()
+            if await cond.wait():
                 result.append(3)
             return True
 
@@ -522,10 +511,9 @@ def predicate():
 
         result = []
 
-        @asyncio.coroutine
-        def c1(result):
-            yield from cond.acquire()
-            if (yield from cond.wait_for(predicate)):
+        async def c1(result):
+            await cond.acquire()
+            if await cond.wait_for(predicate):
                 result.append(1)
                 cond.release()
             return True
@@ -567,26 +555,23 @@ def test_notify(self):
         cond = asyncio.Condition(loop=self.loop)
         result = []
 
-        @asyncio.coroutine
-        def c1(result):
-            yield from cond.acquire()
-            if (yield from cond.wait()):
+        async def c1(result):
+            await cond.acquire()
+            if await cond.wait():
                 result.append(1)
                 cond.release()
             return True
 
-        @asyncio.coroutine
-        def c2(result):
-            yield from cond.acquire()
-            if (yield from cond.wait()):
+        async def c2(result):
+            await cond.acquire()
+            if await cond.wait():
                 result.append(2)
                 cond.release()
             return True
 
-        @asyncio.coroutine
-        def c3(result):
-            yield from cond.acquire()
-            if (yield from cond.wait()):
+        async def c3(result):
+            await cond.acquire()
+            if await cond.wait():
                 result.append(3)
                 cond.release()
             return True
@@ -623,18 +608,16 @@ def test_notify_all(self):
 
         result = []
 
-        @asyncio.coroutine
-        def c1(result):
-            yield from cond.acquire()
-            if (yield from cond.wait()):
+        async def c1(result):
+            await cond.acquire()
+            if await cond.wait():
                 result.append(1)
                 cond.release()
             return True
 
-        @asyncio.coroutine
-        def c2(result):
-            yield from cond.acquire()
-            if (yield from cond.wait()):
+        async def c2(result):
+            await cond.acquire()
+            if await cond.wait():
                 result.append(2)
                 cond.release()
             return True
@@ -791,27 +774,23 @@ def test_acquire(self):
         self.assertTrue(self.loop.run_until_complete(sem.acquire()))
         self.assertFalse(sem.locked())
 
-        @asyncio.coroutine
-        def c1(result):
-            yield from sem.acquire()
+        async def c1(result):
+            await sem.acquire()
             result.append(1)
             return True
 
-        @asyncio.coroutine
-        def c2(result):
-            yield from sem.acquire()
+        async def c2(result):
+            await sem.acquire()
             result.append(2)
             return True
 
-        @asyncio.coroutine
-        def c3(result):
-            yield from sem.acquire()
+        async def c3(result):
+            await sem.acquire()
             result.append(3)
             return True
 
-        @asyncio.coroutine
-        def c4(result):
-            yield from sem.acquire()
+        async def c4(result):
+            await sem.acquire()
             result.append(4)
             return True
 
diff --git a/Lib/test/test_asyncio/test_queues.py b/Lib/test/test_asyncio/test_queues.py
index 2137cde6f48..3b66d616f8e 100644
--- a/Lib/test/test_asyncio/test_queues.py
+++ b/Lib/test/test_asyncio/test_queues.py
@@ -36,27 +36,25 @@ def gen():
         id_is_present = hex(id(q)) in fn(q)
         self.assertEqual(expect_id, id_is_present)
 
-        @asyncio.coroutine
-        def add_getter():
+        async def add_getter():
             q = asyncio.Queue(loop=loop)
             # Start a task that waits to get.
             asyncio.Task(q.get(), loop=loop)
             # Let it start waiting.
-            yield from asyncio.sleep(0.1, loop=loop)
+            await asyncio.sleep(0.1, loop=loop)
             self.assertTrue('_getters[1]' in fn(q))
             # resume q.get coroutine to finish generator
             q.put_nowait(0)
 
         loop.run_until_complete(add_getter())
 
-        @asyncio.coroutine
-        def add_putter():
+        async def add_putter():
             q = asyncio.Queue(maxsize=1, loop=loop)
             q.put_nowait(1)
             # Start a task that waits to put.
             asyncio.Task(q.put(2), loop=loop)
             # Let it start waiting.
-            yield from asyncio.sleep(0.1, loop=loop)
+            await asyncio.sleep(0.1, loop=loop)
             self.assertTrue('_putters[1]' in fn(q))
             # resume q.put coroutine to finish generator
             q.get_nowait()
@@ -125,24 +123,22 @@ def gen():
         self.assertEqual(2, q.maxsize)
         have_been_put = []
 
-        @asyncio.coroutine
-        def putter():
+        async def putter():
             for i in range(3):
-                yield from q.put(i)
+                await q.put(i)
                 have_been_put.append(i)
             return True
 
-        @asyncio.coroutine
-        def test():
+        async def test():
             t = asyncio.Task(putter(), loop=loop)
-            yield from asyncio.sleep(0.01, loop=loop)
+            await asyncio.sleep(0.01, loop=loop)
 
             # The putter is blocked after putting two items.
             self.assertEqual([0, 1], have_been_put)
             self.assertEqual(0, q.get_nowait())
 
             # Let the putter resume and put last item.
-            yield from asyncio.sleep(0.01, loop=loop)
+            await asyncio.sleep(0.01, loop=loop)
             self.assertEqual([0, 1, 2], have_been_put)
             self.assertEqual(1, q.get_nowait())
             self.assertEqual(2, q.get_nowait())
@@ -160,9 +156,8 @@ def test_blocking_get(self):
         q = asyncio.Queue(loop=self.loop)
         q.put_nowait(1)
 
-        @asyncio.coroutine
-        def queue_get():
-            return (yield from q.get())
+        async def queue_get():
+            return await q.get()
 
         res = self.loop.run_until_complete(queue_get())
         self.assertEqual(1, res)
@@ -192,21 +187,19 @@ def gen():
         started = asyncio.Event(loop=loop)
         finished = False
 
-        @asyncio.coroutine
-        def queue_get():
+        async def queue_get():
             nonlocal finished
             started.set()
-            res = yield from q.get()
+            res = await q.get()
             finished = True
             return res
 
-        @asyncio.coroutine
-        def queue_put():
+        async def queue_put():
             loop.call_later(0.01, q.put_nowait, 1)
             queue_get_task = asyncio.Task(queue_get(), loop=loop)
-            yield from started.wait()
+            await started.wait()
             self.assertFalse(finished)
-            res = yield from queue_get_task
+            res = await queue_get_task
             self.assertTrue(finished)
             return res
 
@@ -236,16 +229,14 @@ def gen():
 
         q = asyncio.Queue(loop=loop)
 
-        @asyncio.coroutine
-        def queue_get():
-            return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
+        async def queue_get():
+            return await asyncio.wait_for(q.get(), 0.051, loop=loop)
 
-        @asyncio.coroutine
-        def test():
+        async def test():
             get_task = asyncio.Task(queue_get(), loop=loop)
-            yield from asyncio.sleep(0.01, loop=loop)  # let the task start
+            await asyncio.sleep(0.01, loop=loop)  # let the task start
             q.put_nowait(1)
-            return (yield from get_task)
+            return await get_task
 
         self.assertEqual(1, loop.run_until_complete(test()))
         self.assertAlmostEqual(0.06, loop.time())
@@ -275,15 +266,13 @@ def test_get_with_waiting_putters(self):
     def test_why_are_getters_waiting(self):
         # From issue #268.
 
-        @asyncio.coroutine
-        def consumer(queue, num_expected):
+        async def consumer(queue, num_expected):
             for _ in range(num_expected):
-                yield from queue.get()
+                await queue.get()
 
-        @asyncio.coroutine
-        def producer(queue, num_items):
+        async def producer(queue, num_items):
             for i in range(num_items):
-                yield from queue.put(i)
+                await queue.put(i)
 
         queue_size = 1
         producer_num_items = 5
@@ -301,10 +290,10 @@ def a_generator():
             yield 0.2
 
         self.loop = self.new_test_loop(a_generator)
-        @asyncio.coroutine
-        def consumer(queue):
+
+        async def consumer(queue):
             try:
-                item = yield from asyncio.wait_for(queue.get(), 0.1, loop=self.loop)
+                item = await asyncio.wait_for(queue.get(), 0.1, loop=self.loop)
             except asyncio.TimeoutError:
                 pass
 
@@ -318,10 +307,9 @@ class QueuePutTests(_QueueTestBase):
     def test_blocking_put(self):
         q = asyncio.Queue(loop=self.loop)
 
-        @asyncio.coroutine
-        def queue_put():
+        async def queue_put():
             # No maxsize, won't block.
-            yield from q.put(1)
+            await q.put(1)
 
         self.loop.run_until_complete(queue_put())
 
@@ -338,21 +326,19 @@ def gen():
         started = asyncio.Event(loop=loop)
         finished = False
 
-        @asyncio.coroutine
-        def queue_put():
+        async def queue_put():
             nonlocal finished
             started.set()
-            yield from q.put(1)
-            yield from q.put(2)
+            await q.put(1)
+            await q.put(2)
             finished = True
 
-        @asyncio.coroutine
-        def queue_get():
+        async def queue_get():
             loop.call_later(0.01, q.get_nowait)
             queue_put_task = asyncio.Task(queue_put(), loop=loop)
-            yield from started.wait()
+            await started.wait()
             self.assertFalse(finished)
-            yield from queue_put_task
+            await queue_put_task
             self.assertTrue(finished)
 
         loop.run_until_complete(queue_get())
@@ -464,24 +450,22 @@ def test_float_maxsize(self):
         self.assertRaises(asyncio.QueueFull, q.put_nowait, 3)
 
         q = asyncio.Queue(maxsize=1.3, loop=self.loop)
-        @asyncio.coroutine
-        def queue_put():
-            yield from q.put(1)
-            yield from q.put(2)
+
+        async def queue_put():
+            await q.put(1)
+            await q.put(2)
             self.assertTrue(q.full())
         self.loop.run_until_complete(queue_put())
 
     def test_put_cancelled(self):
         q = asyncio.Queue(loop=self.loop)
 
-        @asyncio.coroutine
-        def queue_put():
-            yield from q.put(1)
+        async def queue_put():
+            await q.put(1)
             return True
 
-        @asyncio.coroutine
-        def test():
-            return (yield from q.get())
+        async def test():
+            return await q.get()
 
         t = asyncio.Task(queue_put(), loop=self.loop)
         self.assertEqual(1, self.loop.run_until_complete(test()))
@@ -520,13 +504,11 @@ def test_why_are_putters_waiting(self):
 
         queue = asyncio.Queue(2, loop=self.loop)
 
-        @asyncio.coroutine
-        def putter(item):
-            yield from queue.put(item)
+        async def putter(item):
+            await queue.put(item)
 
-        @asyncio.coroutine
-        def getter():
-            yield
+        async def getter():
+            await asyncio.sleep(0, loop=self.loop)
             num = queue.qsize()
             for _ in range(num):
                 item = queue.get_nowait()
@@ -580,21 +562,19 @@ def test_task_done(self):
         # Join the queue and assert all items have been processed.
         running = True
 
-        @asyncio.coroutine
-        def worker():
+        async def worker():
             nonlocal accumulator
 
             while running:
-                item = yield from q.get()
+                item = await q.get()
                 accumulator += item
                 q.task_done()
 
-        @asyncio.coroutine
-        def test():
+        async def test():
             tasks = [asyncio.Task(worker(), loop=self.loop)
                      for index in range(2)]
 
-            yield from q.join()
+            await q.join()
             return tasks
 
         tasks = self.loop.run_until_complete(test())
@@ -612,10 +592,9 @@ def test_join_empty_queue(self):
         # Test that a queue join()s successfully, and before anything else
         # (done twice for insurance).
 
-        @asyncio.coroutine
-        def join():
-            yield from q.join()
-            yield from q.join()
+        async def join():
+            await q.join()
+            await q.join()
 
         self.loop.run_until_complete(join())
 
diff --git a/Lib/test/test_asyncio/test_streams.py b/Lib/test/test_asyncio/test_streams.py
index a1e5bd7fab6..2f4e6d23bf9 100644
--- a/Lib/test/test_asyncio/test_streams.py
+++ b/Lib/test/test_asyncio/test_streams.py
@@ -571,11 +571,10 @@ def __init__(self, loop):
                 self.server = None
                 self.loop = loop
 
-            @asyncio.coroutine
-            def handle_client(self, client_reader, client_writer):
-                data = yield from client_reader.readline()
+            async def handle_client(self, client_reader, client_writer):
+                data = await client_reader.readline()
                 client_writer.write(data)
-                yield from client_writer.drain()
+                await client_writer.drain()
                 client_writer.close()
 
             def start(self):
@@ -608,14 +607,13 @@ def stop(self):
                     self.loop.run_until_complete(self.server.wait_closed())
                     self.server = None
 
-        @asyncio.coroutine
-        def client(addr):
-            reader, writer = yield from asyncio.open_connection(
+        async def client(addr):
+            reader, writer = await asyncio.open_connection(
                 *addr, loop=self.loop)
             # send a line
             writer.write(b"hello world!\n")
             # read it back
-            msgback = yield from reader.readline()
+            msgback = await reader.readline()
             writer.close()
             return msgback
 
@@ -645,11 +643,10 @@ def __init__(self, loop, path):
                 self.loop = loop
                 self.path = path
 
-            @asyncio.coroutine
-            def handle_client(self, client_reader, client_writer):
-                data = yield from client_reader.readline()
+            async def handle_client(self, client_reader, client_writer):
+                data = await client_reader.readline()
                 client_writer.write(data)
-                yield from client_writer.drain()
+                await client_writer.drain()
                 client_writer.close()
 
             def start(self):
@@ -674,14 +671,13 @@ def stop(self):
                     self.loop.run_until_complete(self.server.wait_closed())
                     self.server = None
 
-        @asyncio.coroutine
-        def client(path):
-            reader, writer = yield from asyncio.open_unix_connection(
+        async def client(path):
+            reader, writer = await asyncio.open_unix_connection(
                 path, loop=self.loop)
             # send a line
             writer.write(b"hello world!\n")
             # read it back
-            msgback = yield from reader.readline()
+            msgback = await reader.readline()
             writer.close()
             return msgback
 
@@ -782,14 +778,13 @@ def server():
                 clt, _ = sock.accept()
                 clt.close()
 
-        @asyncio.coroutine
-        def client(host, port):
-            reader, writer = yield from asyncio.open_connection(
+        async def client(host, port):
+            reader, writer = await asyncio.open_connection(
                 host, port, loop=self.loop)
 
             while True:
                 writer.write(b"foo\n")
-                yield from writer.drain()
+                await writer.drain()
 
         # Start the server thread and wait for it to be listening.
         thread = threading.Thread(target=server)
diff --git a/Lib/test/test_asyncio/test_subprocess.py b/Lib/test/test_asyncio/test_subprocess.py
index e8822c36698..ad4bb149559 100644
--- a/Lib/test/test_asyncio/test_subprocess.py
+++ b/Lib/test/test_asyncio/test_subprocess.py
@@ -81,9 +81,8 @@ class SubprocessMixin:
     def test_stdin_stdout(self):
         args = PROGRAM_CAT
 
-        @asyncio.coroutine
-        def run(data):
-            proc = yield from asyncio.create_subprocess_exec(
+        async def run(data):
+            proc = await asyncio.create_subprocess_exec(
                                           *args,
                                           stdin=subprocess.PIPE,
                                           stdout=subprocess.PIPE,
@@ -91,12 +90,12 @@ def run(data):
 
             # feed data
             proc.stdin.write(data)
-            yield from proc.stdin.drain()
+            await proc.stdin.drain()
             proc.stdin.close()
 
             # get output and exitcode
-            data = yield from proc.stdout.read()
-            exitcode = yield from proc.wait()
+            data = await proc.stdout.read()
+            exitcode = await proc.wait()
             return (exitcode, data)
 
         task = run(b'some data')
@@ -108,14 +107,13 @@ def run(data):
     def test_communicate(self):
         args = PROGRAM_CAT
 
-        @asyncio.coroutine
-        def run(data):
-            proc = yield from asyncio.create_subprocess_exec(
+        async def run(data):
+            proc = await asyncio.create_subprocess_exec(
                                           *args,
                                           stdin=subprocess.PIPE,
                                           stdout=subprocess.PIPE,
                                           loop=self.loop)
-            stdout, stderr = yield from proc.communicate(data)
+            stdout, stderr = await proc.communicate(data)
             return proc.returncode, stdout
 
         task = run(b'some data')
@@ -178,14 +176,13 @@ def test_send_signal(self):
                                                     loop=self.loop)
             proc = self.loop.run_until_complete(create)
 
-            @asyncio.coroutine
-            def send_signal(proc):
+            async def send_signal(proc):
                 # basic synchronization to wait until the program is sleeping
-                line = yield from proc.stdout.readline()
+                line = await proc.stdout.readline()
                 self.assertEqual(line, b'sleeping\n')
 
                 proc.send_signal(signal.SIGHUP)
-                returncode = (yield from proc.wait())
+                returncode = await proc.wait()
                 return returncode
 
             returncode = self.loop.run_until_complete(send_signal(proc))
@@ -208,10 +205,9 @@ def prepare_broken_pipe_test(self):
     def test_stdin_broken_pipe(self):
         proc, large_data = self.prepare_broken_pipe_test()
 
-        @asyncio.coroutine
-        def write_stdin(proc, data):
+        async def write_stdin(proc, data):
             proc.stdin.write(data)
-            yield from proc.stdin.drain()
+            await proc.stdin.drain()
 
         coro = write_stdin(proc, large_data)
         # drain() must raise BrokenPipeError or ConnectionResetError
@@ -232,8 +228,7 @@ def test_pause_reading(self):
         limit = 10
         size = (limit * 2 + 1)
 
-        @asyncio.coroutine
-        def test_pause_reading():
+        async def test_pause_reading():
             code = '\n'.join((
                 'import sys',
                 'sys.stdout.write("x" * %s)' % size,
@@ -242,16 +237,15 @@ def test_pause_reading():
 
             connect_read_pipe = self.loop.connect_read_pipe
 
-            @asyncio.coroutine
-            def connect_read_pipe_mock(*args, **kw):
-                transport, protocol = yield from connect_read_pipe(*args, **kw)
+            async def connect_read_pipe_mock(*args, **kw):
+                transport, protocol = await connect_read_pipe(*args, **kw)
                 transport.pause_reading = mock.Mock()
                 transport.resume_reading = mock.Mock()
                 return (transport, protocol)
 
             self.loop.connect_read_pipe = connect_read_pipe_mock
 
-            proc = yield from asyncio.create_subprocess_exec(
+            proc = await asyncio.create_subprocess_exec(
                                          sys.executable, '-c', code,
                                          stdin=asyncio.subprocess.PIPE,
                                          stdout=asyncio.subprocess.PIPE,
@@ -259,7 +253,7 @@ def connect_read_pipe_mock(*args, **kw):
                                          loop=self.loop)
             stdout_transport = proc._transport.get_pipe_transport(1)
 
-            stdout, stderr = yield from proc.communicate()
+            stdout, stderr = await proc.communicate()
 
             # The child process produced more than limit bytes of output,
             # the stream reader transport should pause the protocol to not
@@ -277,18 +271,17 @@ def connect_read_pipe_mock(*args, **kw):
     def test_stdin_not_inheritable(self):
         # asyncio issue #209: stdin must not be inheritable, otherwise
         # the Process.communicate() hangs
-        @asyncio.coroutine
-        def len_message(message):
+        async def len_message(message):
             code = 'import sys; data = sys.stdin.read(); print(len(data))'
-            proc = yield from asyncio.create_subprocess_exec(
+            proc = await asyncio.create_subprocess_exec(
                                           sys.executable, '-c', code,
                                           stdin=asyncio.subprocess.PIPE,
                                           stdout=asyncio.subprocess.PIPE,
                                           stderr=asyncio.subprocess.PIPE,
                                           close_fds=False,
                                           loop=self.loop)
-            stdout, stderr = yield from proc.communicate(message)
-            exitcode = yield from proc.wait()
+            stdout, stderr = await proc.communicate(message)
+            exitcode = await proc.wait()
             return (stdout, exitcode)
 
         output, exitcode = self.loop.run_until_complete(len_message(b'abc'))
@@ -296,18 +289,18 @@ def len_message(message):
         self.assertEqual(exitcode, 0)
 
     def test_empty_input(self):
-        @asyncio.coroutine
-        def empty_input():
+
+        async def empty_input():
             code = 'import sys; data = sys.stdin.read(); print(len(data))'
-            proc = yield from asyncio.create_subprocess_exec(
+            proc = await asyncio.create_subprocess_exec(
                                           sys.executable, '-c', code,
                                           stdin=asyncio.subprocess.PIPE,
                                           stdout=asyncio.subprocess.PIPE,
                                           stderr=asyncio.subprocess.PIPE,
                                           close_fds=False,
                                           loop=self.loop)
-            stdout, stderr = yield from proc.communicate(b'')
-            exitcode = yield from proc.wait()
+            stdout, stderr = await proc.communicate(b'')
+            exitcode = await proc.wait()
             return (stdout, exitcode)
 
         output, exitcode = self.loop.run_until_complete(empty_input())
@@ -317,9 +310,8 @@ def empty_input():
     def test_cancel_process_wait(self):
         # Issue #23140: cancel Process.wait()
 
-        @asyncio.coroutine
-        def cancel_wait():
-            proc = yield from asyncio.create_subprocess_exec(
+        async def cancel_wait():
+            proc = await asyncio.create_subprocess_exec(
                                           *PROGRAM_BLOCKED,
                                           loop=self.loop)
 
@@ -327,7 +319,7 @@ def cancel_wait():
             task = self.loop.create_task(proc.wait())
             self.loop.call_soon(task.cancel)
             try:
-                yield from task
+                await task
             except asyncio.CancelledError:
                 pass
 
@@ -336,20 +328,20 @@ def cancel_wait():
 
             # Kill the process and wait until it is done
             proc.kill()
-            yield from proc.wait()
+            await proc.wait()
 
         self.loop.run_until_complete(cancel_wait())
 
     def test_cancel_make_subprocess_transport_exec(self):
-        @asyncio.coroutine
-        def cancel_make_transport():
+
+        async def cancel_make_transport():
             coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED,
                                                   loop=self.loop)
             task = self.loop.create_task(coro)
 
             self.loop.call_soon(task.cancel)
             try:
-                yield from task
+                await task
             except asyncio.CancelledError:
                 pass
 
@@ -359,15 +351,15 @@ def cancel_make_transport():
             self.loop.run_until_complete(cancel_make_transport())
 
     def test_cancel_post_init(self):
-        @asyncio.coroutine
-        def cancel_make_transport():
+
+        async def cancel_make_transport():
             coro = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
                                              *PROGRAM_BLOCKED)
             task = self.loop.create_task(coro)
 
             self.loop.call_soon(task.cancel)
             try:
-                yield from task
+                await task
             except asyncio.CancelledError:
                 pass
 
@@ -378,11 +370,11 @@ def cancel_make_transport():
             test_utils.run_briefly(self.loop)
 
     def test_close_kill_running(self):
-        @asyncio.coroutine
-        def kill_running():
+
+        async def kill_running():
             create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
                                                *PROGRAM_BLOCKED)
-            transport, protocol = yield from create
+            transport, protocol = await create
 
             kill_called = False
             def kill():
@@ -395,7 +387,7 @@ def kill():
             proc.kill = kill
             returncode = transport.get_returncode()
             transport.close()
-            yield from transport._wait()
+            await transport._wait()
             return (returncode, kill_called)
 
         # Ignore "Close running child process: kill ..." log
@@ -408,11 +400,11 @@ def kill():
         test_utils.run_briefly(self.loop)
 
     def test_close_dont_kill_finished(self):
-        @asyncio.coroutine
-        def kill_running():
+
+        async def kill_running():
             create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
                                                *PROGRAM_BLOCKED)
-            transport, protocol = yield from create
+            transport, protocol = await create
             proc = transport.get_extra_info('subprocess')
 
             # kill the process (but asyncio is not notified immediately)
@@ -444,8 +436,8 @@ def kill_running():
         # Unlike SafeChildWatcher, FastChildWatcher does not pop the
         # callbacks if waitpid() is called elsewhere. Let's clear them
         # manually to avoid a warning when the watcher is detached.
-        if sys.platform != 'win32' and \
-           isinstance(self, SubprocessFastWatcherTests):
+        if (sys.platform != 'win32' and
+                isinstance(self, SubprocessFastWatcherTests)):
             asyncio.get_child_watcher()._callbacks.clear()
 
     def test_popen_error(self):
@@ -467,8 +459,8 @@ def test_popen_error(self):
                 self.assertEqual(warns, [])
 
     def test_read_stdout_after_process_exit(self):
-        @asyncio.coroutine
-        def execute():
+
+        async def execute():
             code = '\n'.join(['import sys',
                               'for _ in range(64):',
                               '    sys.stdout.write("x" * 4096)',
@@ -480,11 +472,11 @@ def execute():
                 stdout=asyncio.subprocess.PIPE,
                 loop=self.loop)
 
-            process = yield from fut
+            process = await fut
             while True:
-                data = yield from process.stdout.read(65536)
+                data = await process.stdout.read(65536)
                 if data:
-                    yield from asyncio.sleep(0.3, loop=self.loop)
+                    await asyncio.sleep(0.3, loop=self.loop)
                 else:
                     break
 
diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py
index f66f7f1e170..cdc882a9bee 100644
--- a/Lib/test/test_asyncio/test_tasks.py
+++ b/Lib/test/test_asyncio/test_tasks.py
@@ -31,10 +31,6 @@
         from asyncio.test_support import assert_python_ok
 
 
-PY34 = (sys.version_info >= (3, 4))
-PY35 = (sys.version_info >= (3, 5))
-
-
 @asyncio.coroutine
 def coroutine_function():
     pass
@@ -110,9 +106,8 @@ def test_other_loop_future(self):
         other_loop = asyncio.new_event_loop()
         fut = self.new_future(other_loop)
 
-        @asyncio.coroutine
-        def run(fut):
-            yield from fut
+        async def run(fut):
+            await fut
 
         try:
             with self.assertRaisesRegex(RuntimeError,
@@ -122,9 +117,9 @@ def run(fut):
             other_loop.close()
 
     def test_task_awaits_on_itself(self):
-        @asyncio.coroutine
-        def test():
-            yield from task
+
+        async def test():
+            await task
 
         task = asyncio.ensure_future(test(), loop=self.loop)
 
@@ -209,7 +204,6 @@ def notmuch():
         t = asyncio.ensure_future(t_orig, loop=self.loop)
         self.assertIs(t, t_orig)
 
-    @unittest.skipUnless(PY35, 'need python 3.5 or later')
     def test_ensure_future_awaitable(self):
         class Aw:
             def __init__(self, coro):
@@ -234,12 +228,10 @@ def test_ensure_future_neither(self):
     def test_get_stack(self):
         T = None
 
-        @asyncio.coroutine
-        def foo():
-            yield from bar()
+        async def foo():
+            await bar()
 
-        @asyncio.coroutine
-        def bar():
+        async def bar():
             # test get_stack()
             f = T.get_stack(limit=1)
             try:
@@ -254,11 +246,10 @@ def bar():
             tb = file.read()
             self.assertRegex(tb, r'foo\(\) running')
 
-        @asyncio.coroutine
-        def runner():
+        async def runner():
             nonlocal T
             T = asyncio.ensure_future(foo(), loop=self.loop)
-            yield from T
+            await T
 
         self.loop.run_until_complete(runner())
 
@@ -272,9 +263,8 @@ def notmuch():
 
         # test coroutine function
         self.assertEqual(notmuch.__name__, 'notmuch')
-        if PY35:
-            self.assertRegex(notmuch.__qualname__,
-                             r'\w+.test_task_repr.<locals>.notmuch')
+        self.assertRegex(notmuch.__qualname__,
+                         r'\w+.test_task_repr.<locals>.notmuch')
         self.assertEqual(notmuch.__module__, __name__)
 
         filename, lineno = test_utils.get_function_source(notmuch)
@@ -282,14 +272,9 @@ def notmuch():
 
         # test coroutine object
         gen = notmuch()
-        if coroutines._DEBUG or PY35:
-            coro_qualname = 'BaseTaskTests.test_task_repr.<locals>.notmuch'
-        else:
-            coro_qualname = 'notmuch'
+        coro_qualname = 'BaseTaskTests.test_task_repr.<locals>.notmuch'
         self.assertEqual(gen.__name__, 'notmuch')
-        if PY35:
-            self.assertEqual(gen.__qualname__,
-                             coro_qualname)
+        self.assertEqual(gen.__qualname__, coro_qualname)
 
         # test pending Task
         t = self.new_task(self.loop, gen)
@@ -332,28 +317,21 @@ def notmuch():
 
         # test coroutine function
         self.assertEqual(notmuch.__name__, 'notmuch')
-        if PY35:
-            self.assertRegex(notmuch.__qualname__,
-                             r'\w+.test_task_repr_coro_decorator'
-                             r'\.<locals>\.notmuch')
+        self.assertRegex(notmuch.__qualname__,
+                         r'\w+.test_task_repr_coro_decorator'
+                         r'\.<locals>\.notmuch')
         self.assertEqual(notmuch.__module__, __name__)
 
         # test coroutine object
         gen = notmuch()
-        if coroutines._DEBUG or PY35:
-            # On Python >= 3.5, generators now inherit the name of the
-            # function, as expected, and have a qualified name (__qualname__
-            # attribute).
-            coro_name = 'notmuch'
-            coro_qualname = ('BaseTaskTests.test_task_repr_coro_decorator'
-                             '.<locals>.notmuch')
-        else:
-            # On Python < 3.5, generators inherit the name of the code, not of
-            # the function. See: http://bugs.python.org/issue21205
-            coro_name = coro_qualname = 'coro'
+        # On Python >= 3.5, generators now inherit the name of the
+        # function, as expected, and have a qualified name (__qualname__
+        # attribute).
+        coro_name = 'notmuch'
+        coro_qualname = ('BaseTaskTests.test_task_repr_coro_decorator'
+                         '.<locals>.notmuch')
         self.assertEqual(gen.__name__, coro_name)
-        if PY35:
-            self.assertEqual(gen.__qualname__, coro_qualname)
+        self.assertEqual(gen.__qualname__, coro_qualname)
 
         # test repr(CoroWrapper)
         if coroutines._DEBUG:
@@ -392,9 +370,8 @@ def notmuch():
     def test_task_repr_wait_for(self):
         self.loop.set_debug(False)
 
-        @asyncio.coroutine
-        def wait_for(fut):
-            return (yield from fut)
+        async def wait_for(fut):
+            return await fut
 
         fut = self.new_future(self.loop)
         task = self.new_task(self.loop, wait_for(fut))
@@ -411,9 +388,8 @@ def test_task_repr_partial_corowrapper(self):
         with set_coroutine_debug(True):
             self.loop.set_debug(True)
 
-            @asyncio.coroutine
-            def func(x, y):
-                yield from asyncio.sleep(0)
+            async def func(x, y):
+                await asyncio.sleep(0)
 
             partial_func = asyncio.coroutine(functools.partial(func, 1))
             task = self.loop.create_task(partial_func(2))
@@ -430,18 +406,16 @@ def func(x, y):
         self.assertRegex(coro_repr, expected)
 
     def test_task_basics(self):
-        @asyncio.coroutine
-        def outer():
-            a = yield from inner1()
-            b = yield from inner2()
+
+        async def outer():
+            a = await inner1()
+            b = await inner2()
             return a+b
 
-        @asyncio.coroutine
-        def inner1():
+        async def inner1():
             return 42
 
-        @asyncio.coroutine
-        def inner2():
+        async def inner2():
             return 1000
 
         t = outer()
@@ -456,9 +430,8 @@ def gen():
 
         loop = self.new_test_loop(gen)
 
-        @asyncio.coroutine
-        def task():
-            yield from asyncio.sleep(10.0, loop=loop)
+        async def task():
+            await asyncio.sleep(10.0, loop=loop)
             return 12
 
         t = self.new_task(loop, task())
@@ -488,9 +461,8 @@ def task():
     def test_cancel_inner_future(self):
         f = self.new_future(self.loop)
 
-        @asyncio.coroutine
-        def task():
-            yield from f
+        async def task():
+            await f
             return 12
 
         t = self.new_task(self.loop, task())
@@ -504,9 +476,8 @@ def task():
     def test_cancel_both_task_and_inner_future(self):
         f = self.new_future(self.loop)
 
-        @asyncio.coroutine
-        def task():
-            yield from f
+        async def task():
+            await f
             return 12
 
         t = self.new_task(self.loop, task())
@@ -526,11 +497,10 @@ def test_cancel_task_catching(self):
         fut1 = self.new_future(self.loop)
         fut2 = self.new_future(self.loop)
 
-        @asyncio.coroutine
-        def task():
-            yield from fut1
+        async def task():
+            await fut1
             try:
-                yield from fut2
+                await fut2
             except asyncio.CancelledError:
                 return 42
 
@@ -551,14 +521,13 @@ def test_cancel_task_ignoring(self):
         fut2 = self.new_future(self.loop)
         fut3 = self.new_future(self.loop)
 
-        @asyncio.coroutine
-        def task():
-            yield from fut1
+        async def task():
+            await fut1
             try:
-                yield from fut2
+                await fut2
             except asyncio.CancelledError:
                 pass
-            res = yield from fut3
+            res = await fut3
             return res
 
         t = self.new_task(self.loop, task())
@@ -581,12 +550,11 @@ def test_cancel_current_task(self):
         loop = asyncio.new_event_loop()
         self.set_event_loop(loop)
 
-        @asyncio.coroutine
-        def task():
+        async def task():
             t.cancel()
             self.assertTrue(t._must_cancel)  # White-box test.
             # The sleep should be cancelled immediately.
-            yield from asyncio.sleep(100, loop=loop)
+            await asyncio.sleep(100, loop=loop)
             return 12
 
         t = self.new_task(loop, task())
@@ -628,14 +596,11 @@ def gen():
         loop = self.new_test_loop(gen)
 
         x = 0
-        waiters = []
 
-        @asyncio.coroutine
-        def task():
+        async def task():
             nonlocal x
             while x < 10:
-                waiters.append(asyncio.sleep(0.1, loop=loop))
-                yield from waiters[-1]
+                await asyncio.sleep(0.1, loop=loop)
                 x += 1
                 if x == 2:
                     loop.stop()
@@ -649,9 +614,6 @@ def task():
         self.assertEqual(x, 2)
         self.assertAlmostEqual(0.3, loop.time())
 
-        # close generators
-        for w in waiters:
-            w.close()
         t.cancel()
         self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t)
 
@@ -704,12 +666,11 @@ def gen():
 
                 foo_running = None
 
-                @asyncio.coroutine
-                def foo():
+                async def foo():
                     nonlocal foo_running
                     foo_running = True
                     try:
-                        yield from asyncio.sleep(0.2, loop=loop)
+                        await asyncio.sleep(0.2, loop=loop)
                     finally:
                         foo_running = False
                     return 'done'
@@ -738,12 +699,11 @@ def gen():
 
         foo_running = None
 
-        @asyncio.coroutine
-        def foo():
+        async def foo():
             nonlocal foo_running
             foo_running = True
             try:
-                yield from asyncio.sleep(0.2, loop=loop)
+                await asyncio.sleep(0.2, loop=loop)
             finally:
                 foo_running = False
             return 'done'
@@ -781,9 +741,8 @@ def gen():
 
         loop = self.new_test_loop(gen)
 
-        @asyncio.coroutine
-        def foo():
-            yield from asyncio.sleep(0.2, loop=loop)
+        async def foo():
+            await asyncio.sleep(0.2, loop=loop)
             return 'done'
 
         asyncio.set_event_loop(loop)
@@ -827,9 +786,8 @@ def gen():
         a = self.new_task(loop, asyncio.sleep(0.1, loop=loop))
         b = self.new_task(loop, asyncio.sleep(0.15, loop=loop))
 
-        @asyncio.coroutine
-        def foo():
-            done, pending = yield from asyncio.wait([b, a], loop=loop)
+        async def foo():
+            done, pending = await asyncio.wait([b, a], loop=loop)
             self.assertEqual(done, set([a, b]))
             self.assertEqual(pending, set())
             return 42
@@ -857,9 +815,8 @@ def gen():
         a = self.new_task(loop, asyncio.sleep(0.01, loop=loop))
         b = self.new_task(loop, asyncio.sleep(0.015, loop=loop))
 
-        @asyncio.coroutine
-        def foo():
-            done, pending = yield from asyncio.wait([b, a])
+        async def foo():
+            done, pending = await asyncio.wait([b, a])
             self.assertEqual(done, set([a, b]))
             self.assertEqual(pending, set())
             return 42
@@ -871,6 +828,7 @@ def foo():
         self.assertEqual(res, 42)
 
     def test_wait_duplicate_coroutines(self):
+
         @asyncio.coroutine
         def coro(s):
             return s
@@ -1000,9 +958,8 @@ def gen():
         # first_exception, exception during waiting
         a = self.new_task(loop, asyncio.sleep(10.0, loop=loop))
 
-        @asyncio.coroutine
-        def exc():
-            yield from asyncio.sleep(0.01, loop=loop)
+        async def exc():
+            await asyncio.sleep(0.01, loop=loop)
             raise ZeroDivisionError('err')
 
         b = self.new_task(loop, exc())
@@ -1038,9 +995,8 @@ def sleeper():
 
         b = self.new_task(loop, sleeper())
 
-        @asyncio.coroutine
-        def foo():
-            done, pending = yield from asyncio.wait([b, a], loop=loop)
+        async def foo():
+            done, pending = await asyncio.wait([b, a], loop=loop)
             self.assertEqual(len(done), 2)
             self.assertEqual(pending, set())
             errors = set(f for f in done if f.exception() is not None)
@@ -1068,9 +1024,8 @@ def gen():
         a = self.new_task(loop, asyncio.sleep(0.1, loop=loop))
         b = self.new_task(loop, asyncio.sleep(0.15, loop=loop))
 
-        @asyncio.coroutine
-        def foo():
-            done, pending = yield from asyncio.wait([b, a], timeout=0.11,
+        async def foo():
+            done, pending = await asyncio.wait([b, a], timeout=0.11,
                                                     loop=loop)
             self.assertEqual(done, set([a]))
             self.assertEqual(pending, set([b]))
@@ -1164,17 +1119,16 @@ def gen():
 
         loop = self.new_test_loop(gen)
 
-        a = asyncio.sleep(0.1, 'a', loop=loop)
-        b = asyncio.sleep(0.15, 'b', loop=loop)
+        a = loop.create_task(asyncio.sleep(0.1, 'a', loop=loop))
+        b = loop.create_task(asyncio.sleep(0.15, 'b', loop=loop))
 
-        @asyncio.coroutine
-        def foo():
+        async def foo():
             values = []
             for f in asyncio.as_completed([a, b], timeout=0.12, loop=loop):
                 if values:
                     loop.advance_time(0.02)
                 try:
-                    v = yield from f
+                    v = await f
                     values.append((1, v))
                 except asyncio.TimeoutError as exc:
                     values.append((2, exc))
@@ -1202,10 +1156,9 @@ def gen():
 
         a = asyncio.sleep(0.01, 'a', loop=loop)
 
-        @asyncio.coroutine
-        def foo():
+        async def foo():
             for f in asyncio.as_completed([a], timeout=1, loop=loop):
-                v = yield from f
+                v = await f
                 self.assertEqual(v, 'a')
 
         loop.run_until_complete(self.new_task(loop, foo()))
@@ -1578,18 +1531,16 @@ def test_current_task_with_interleaving_tasks(self):
         fut1 = self.new_future(self.loop)
         fut2 = self.new_future(self.loop)
 
-        @asyncio.coroutine
-        def coro1(loop):
+        async def coro1(loop):
             self.assertTrue(Task.current_task(loop=loop) is task1)
-            yield from fut1
+            await fut1
             self.assertTrue(Task.current_task(loop=loop) is task1)
             fut2.set_result(True)
 
-        @asyncio.coroutine
-        def coro2(loop):
+        async def coro2(loop):
             self.assertTrue(Task.current_task(loop=loop) is task2)
             fut1.set_result(True)
-            yield from fut2
+            await fut2
             self.assertTrue(Task.current_task(loop=loop) is task2)
 
         task1 = self.new_task(self.loop, coro1(self.loop))
@@ -1607,22 +1558,20 @@ def test_yield_future_passes_cancel(self):
         proof = 0
         waiter = self.new_future(self.loop)
 
-        @asyncio.coroutine
-        def inner():
+        async def inner():
             nonlocal proof
             try:
-                yield from waiter
+                await waiter
             except asyncio.CancelledError:
                 proof += 1
                 raise
             else:
                 self.fail('got past sleep() in inner()')
 
-        @asyncio.coroutine
-        def outer():
+        async def outer():
             nonlocal proof
             try:
-                yield from inner()
+                await inner()
             except asyncio.CancelledError:
                 proof += 100  # Expect this path.
             else:
@@ -1641,16 +1590,14 @@ def test_yield_wait_does_not_shield_cancel(self):
         proof = 0
         waiter = self.new_future(self.loop)
 
-        @asyncio.coroutine
-        def inner():
+        async def inner():
             nonlocal proof
-            yield from waiter
+            await waiter
             proof += 1
 
-        @asyncio.coroutine
-        def outer():
+        async def outer():
             nonlocal proof
-            d, p = yield from asyncio.wait([inner()], loop=self.loop)
+            d, p = await asyncio.wait([inner()], loop=self.loop)
             proof += 100
 
         f = asyncio.ensure_future(outer(), loop=self.loop)
@@ -1697,16 +1644,14 @@ def test_shield_effect(self):
         proof = 0
         waiter = self.new_future(self.loop)
 
-        @asyncio.coroutine
-        def inner():
+        async def inner():
             nonlocal proof
-            yield from waiter
+            await waiter
             proof += 1
 
-        @asyncio.coroutine
-        def outer():
+        async def outer():
             nonlocal proof
-            yield from asyncio.shield(inner(), loop=self.loop)
+            await asyncio.shield(inner(), loop=self.loop)
             proof += 100
 
         f = asyncio.ensure_future(outer(), loop=self.loop)
@@ -1890,8 +1835,6 @@ def foo():
         self.assertIsInstance(exception, Exception)
         self.assertEqual(exception.args, ("foo", ))
 
-    @unittest.skipUnless(PY34,
-                         'need python 3.4 or later')
     def test_log_destroyed_pending_task(self):
         Task = self.__class__.Task
 
@@ -2661,5 +2604,47 @@ def coro():
         self.assertEqual(result, 11)
 
 
+class CompatibilityTests(test_utils.TestCase):
+    # Tests for checking a bridge between old-styled coroutines
+    # and async/await syntax
+
+    def setUp(self):
+        super().setUp()
+        self.loop = asyncio.new_event_loop()
+        asyncio.set_event_loop(None)
+
+    def tearDown(self):
+        self.loop.close()
+        self.loop = None
+        super().tearDown()
+
+    def test_yield_from_awaitable(self):
+
+        @asyncio.coroutine
+        def coro():
+            yield from asyncio.sleep(0, loop=self.loop)
+            return 'ok'
+
+        result = self.loop.run_until_complete(coro())
+        self.assertEqual('ok', result)
+
+    def test_await_old_style_coro(self):
+
+        @asyncio.coroutine
+        def coro1():
+            return 'ok1'
+
+        @asyncio.coroutine
+        def coro2():
+            yield from asyncio.sleep(0, loop=self.loop)
+            return 'ok2'
+
+        async def inner():
+            return await asyncio.gather(coro1(), coro2(), loop=self.loop)
+
+        result = self.loop.run_until_complete(inner())
+        self.assertEqual(['ok1', 'ok2'], result)
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/Lib/test/test_asyncio/test_unix_events.py b/Lib/test/test_asyncio/test_unix_events.py
index 284c73d8daf..6746b34fe2c 100644
--- a/Lib/test/test_asyncio/test_unix_events.py
+++ b/Lib/test/test_asyncio/test_unix_events.py
@@ -77,9 +77,8 @@ def test_add_signal_handler_setup_error(self, m_signal):
     def test_add_signal_handler_coroutine_error(self, m_signal):
         m_signal.NSIG = signal.NSIG
 
-        @asyncio.coroutine
-        def simple_coroutine():
-            yield from []
+        async def simple_coroutine():
+            pass
 
         # callback must not be a coroutine function
         coro_func = simple_coroutine
diff --git a/Lib/test/test_asyncio/test_windows_events.py b/Lib/test/test_asyncio/test_windows_events.py
index 5fdf5ff5e44..de6fe120612 100644
--- a/Lib/test/test_asyncio/test_windows_events.py
+++ b/Lib/test/test_asyncio/test_windows_events.py
@@ -56,14 +56,14 @@ def test_pipe(self):
         res = self.loop.run_until_complete(self._test_pipe())
         self.assertEqual(res, 'done')
 
-    def _test_pipe(self):
+    async def _test_pipe(self):
         ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
 
         with self.assertRaises(FileNotFoundError):
-            yield from self.loop.create_pipe_connection(
+            await self.loop.create_pipe_connection(
                 asyncio.Protocol, ADDRESS)
 
-        [server] = yield from self.loop.start_serving_pipe(
+        [server] = await self.loop.start_serving_pipe(
             UpperProto, ADDRESS)
         self.assertIsInstance(server, windows_events.PipeServer)
 
@@ -72,7 +72,7 @@ def _test_pipe(self):
             stream_reader = asyncio.StreamReader(loop=self.loop)
             protocol = asyncio.StreamReaderProtocol(stream_reader,
                                                     loop=self.loop)
-            trans, proto = yield from self.loop.create_pipe_connection(
+            trans, proto = await self.loop.create_pipe_connection(
                 lambda: protocol, ADDRESS)
             self.assertIsInstance(trans, asyncio.Transport)
             self.assertEqual(protocol, proto)
@@ -82,14 +82,14 @@ def _test_pipe(self):
             w.write('lower-{}\n'.format(i).encode())
 
         for i, (r, w) in enumerate(clients):
-            response = yield from r.readline()
+            response = await r.readline()
             self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
             w.close()
 
         server.close()
 
         with self.assertRaises(FileNotFoundError):
-            yield from self.loop.create_pipe_connection(
+            await self.loop.create_pipe_connection(
                 asyncio.Protocol, ADDRESS)
 
         return 'done'
@@ -97,7 +97,8 @@ def _test_pipe(self):
     def test_connect_pipe_cancel(self):
         exc = OSError()
         exc.winerror = _overlapped.ERROR_PIPE_BUSY
-        with mock.patch.object(_overlapped, 'ConnectPipe', side_effect=exc) as connect:
+        with mock.patch.object(_overlapped, 'ConnectPipe',
+                               side_effect=exc) as connect:
             coro = self.loop._proactor.connect_pipe('pipe_address')
             task = self.loop.create_task(coro)
 
diff --git a/Misc/NEWS.d/next/Library/2017-12-08-11-02-26.bpo-32193.NJe_TQ.rst b/Misc/NEWS.d/next/Library/2017-12-08-11-02-26.bpo-32193.NJe_TQ.rst
new file mode 100644
index 00000000000..6982f1e1ac5
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2017-12-08-11-02-26.bpo-32193.NJe_TQ.rst
@@ -0,0 +1,2 @@
+Convert asyncio to use *async/await* syntax. Old styled ``yield from`` is
+still supported too.



More information about the Python-checkins mailing list