[Python-checkins] bpo-32262: Fix codestyle; use f-strings formatting where necessary. (#4775)

Yury Selivanov webhook-mailer at python.org
Sun Dec 10 18:36:15 EST 2017


https://github.com/python/cpython/commit/6370f345e1d5829e1fba59cd695c8b82c5a8c620
commit: 6370f345e1d5829e1fba59cd695c8b82c5a8c620
branch: master
author: Yury Selivanov <yury at magic.io>
committer: GitHub <noreply at github.com>
date: 2017-12-10T18:36:12-05:00
summary:

bpo-32262: Fix codestyle; use f-strings formatting where necessary. (#4775)

files:
M Lib/asyncio/__init__.py
M Lib/asyncio/base_events.py
M Lib/asyncio/base_futures.py
M Lib/asyncio/base_subprocess.py
M Lib/asyncio/base_tasks.py
M Lib/asyncio/constants.py
M Lib/asyncio/coroutines.py
M Lib/asyncio/events.py
M Lib/asyncio/futures.py
M Lib/asyncio/locks.py
M Lib/asyncio/proactor_events.py
M Lib/asyncio/protocols.py
M Lib/asyncio/queues.py
M Lib/asyncio/selector_events.py
M Lib/asyncio/sslproto.py
M Lib/asyncio/streams.py
M Lib/asyncio/subprocess.py
M Lib/asyncio/tasks.py
M Lib/asyncio/transports.py
M Lib/asyncio/unix_events.py
M Lib/asyncio/windows_events.py
M Lib/asyncio/windows_utils.py
M Lib/test/test_asyncio/test_locks.py
M Lib/test/test_asyncio/test_streams.py

diff --git a/Lib/asyncio/__init__.py b/Lib/asyncio/__init__.py
index 1ee1b2516d4..dd6686de840 100644
--- a/Lib/asyncio/__init__.py
+++ b/Lib/asyncio/__init__.py
@@ -1,5 +1,7 @@
 """The asyncio package, tracking PEP 3156."""
 
+# flake8: noqa
+
 import sys
 
 # This relies on each of the submodules having an __all__ variable.
diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py
index ab92a0b5807..5cc7944f24a 100644
--- a/Lib/asyncio/base_events.py
+++ b/Lib/asyncio/base_events.py
@@ -36,7 +36,7 @@
 from .log import logger
 
 
-__all__ = ['BaseEventLoop']
+__all__ = 'BaseEventLoop',
 
 
 # Minimum number of _scheduled timer handles before cleanup of
@@ -173,8 +173,7 @@ def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0,
 
 def _run_until_complete_cb(fut):
     exc = fut._exception
-    if (isinstance(exc, BaseException)
-    and not isinstance(exc, Exception)):
+    if isinstance(exc, BaseException) and not isinstance(exc, Exception):
         # Issue #22429: run_forever() already finished, no need to
         # stop it.
         return
@@ -190,7 +189,7 @@ def __init__(self, loop, sockets):
         self._waiters = []
 
     def __repr__(self):
-        return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets)
+        return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
 
     def _attach(self):
         assert self.sockets is not None
@@ -262,9 +261,10 @@ def __init__(self):
         self._asyncgens_shutdown_called = False
 
     def __repr__(self):
-        return ('<%s running=%s closed=%s debug=%s>'
-                % (self.__class__.__name__, self.is_running(),
-                   self.is_closed(), self.get_debug()))
+        return (
+            f'<{self.__class__.__name__} running={self.is_running()} '
+            f'closed={self.is_closed()} debug={self.get_debug()}>'
+        )
 
     def create_future(self):
         """Create a Future object attached to the loop."""
@@ -362,8 +362,8 @@ def _asyncgen_finalizer_hook(self, agen):
     def _asyncgen_firstiter_hook(self, agen):
         if self._asyncgens_shutdown_called:
             warnings.warn(
-                "asynchronous generator {!r} was scheduled after "
-                "loop.shutdown_asyncgens() call".format(agen),
+                f"asynchronous generator {agen!r} was scheduled after "
+                f"loop.shutdown_asyncgens() call",
                 ResourceWarning, source=self)
 
         self._asyncgens.add(agen)
@@ -388,8 +388,8 @@ def _asyncgen_firstiter_hook(self, agen):
         for result, agen in zip(results, closing_agens):
             if isinstance(result, Exception):
                 self.call_exception_handler({
-                    'message': 'an error occurred during closing of '
-                               'asynchronous generator {!r}'.format(agen),
+                    'message': f'an error occurred during closing of '
+                               f'asynchronous generator {agen!r}',
                     'exception': result,
                     'asyncgen': agen
                 })
@@ -495,7 +495,7 @@ def is_closed(self):
 
     def __del__(self):
         if not self.is_closed():
-            warnings.warn("unclosed event loop %r" % self, ResourceWarning,
+            warnings.warn(f"unclosed event loop {self!r}", ResourceWarning,
                           source=self)
             if not self.is_running():
                 self.close()
@@ -573,12 +573,11 @@ def _check_callback(self, callback, method):
         if (coroutines.iscoroutine(callback) or
                 coroutines.iscoroutinefunction(callback)):
             raise TypeError(
-                "coroutines cannot be used with {}()".format(method))
+                f"coroutines cannot be used with {method}()")
         if not callable(callback):
             raise TypeError(
-                'a callable object was expected by {}(), got {!r}'.format(
-                    method, callback))
-
+                f'a callable object was expected by {method}(), '
+                f'got {callback!r}')
 
     def _call_soon(self, callback, args):
         handle = events.Handle(callback, args, self)
@@ -630,15 +629,15 @@ def set_default_executor(self, executor):
         self._default_executor = executor
 
     def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
-        msg = ["%s:%r" % (host, port)]
+        msg = [f"{host}:{port!r}"]
         if family:
-            msg.append('family=%r' % family)
+            msg.append(f'family={family!r}' % family)
         if type:
-            msg.append('type=%r' % type)
+            msg.append(f'type={type!r}')
         if proto:
-            msg.append('proto=%r' % proto)
+            msg.append(f'proto={proto!r}')
         if flags:
-            msg.append('flags=%r' % flags)
+            msg.append(f'flags={flags!r}')
         msg = ', '.join(msg)
         logger.debug('Get address info %s', msg)
 
@@ -646,8 +645,7 @@ def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
         addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
         dt = self.time() - t0
 
-        msg = ('Getting address info %s took %.3f ms: %r'
-               % (msg, dt * 1e3, addrinfo))
+        msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
         if dt >= self.slow_callback_duration:
             logger.info(msg)
         else:
@@ -738,11 +736,12 @@ def getnameinfo(self, sockaddr, flags=0):
                                 sock.bind(laddr)
                                 break
                             except OSError as exc:
-                                exc = OSError(
-                                    exc.errno, 'error while '
-                                    'attempting to bind on address '
-                                    '{!r}: {}'.format(
-                                        laddr, exc.strerror.lower()))
+                                msg = (
+                                    f'error while attempting to bind on '
+                                    f'address {laddr!r}: '
+                                    f'{exc.strerror.lower()}'
+                                )
+                                exc = OSError(exc.errno, msg)
                                 exceptions.append(exc)
                         else:
                             sock.close()
@@ -786,7 +785,7 @@ def getnameinfo(self, sockaddr, flags=0):
                 # Disallowing AF_UNIX in this method, breaks backwards
                 # compatibility.
                 raise ValueError(
-                    'A Stream Socket was expected, got {!r}'.format(sock))
+                    f'A Stream Socket was expected, got {sock!r}')
 
         transport, protocol = await self._create_connection_transport(
             sock, protocol_factory, ssl, server_hostname)
@@ -830,7 +829,7 @@ def getnameinfo(self, sockaddr, flags=0):
         if sock is not None:
             if not _is_dgram_socket(sock):
                 raise ValueError(
-                    'A UDP Socket was expected, got {!r}'.format(sock))
+                    f'A UDP Socket was expected, got {sock!r}')
             if (local_addr or remote_addr or
                     family or proto or flags or
                     reuse_address or reuse_port or allow_broadcast):
@@ -839,11 +838,10 @@ def getnameinfo(self, sockaddr, flags=0):
                             family=family, proto=proto, flags=flags,
                             reuse_address=reuse_address, reuse_port=reuse_port,
                             allow_broadcast=allow_broadcast)
-                problems = ', '.join(
-                    '{}={}'.format(k, v) for k, v in opts.items() if v)
+                problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
                 raise ValueError(
-                    'socket modifier keyword arguments can not be used '
-                    'when sock is specified. ({})'.format(problems))
+                    f'socket modifier keyword arguments can not be used '
+                    f'when sock is specified. ({problems})')
             sock.setblocking(False)
             r_addr = None
         else:
@@ -953,7 +951,7 @@ def getnameinfo(self, sockaddr, flags=0):
                                        type=socket.SOCK_STREAM,
                                        flags=flags, loop=self)
         if not infos:
-            raise OSError('getaddrinfo({!r}) returned empty list'.format(host))
+            raise OSError(f'getaddrinfo({host!r}) returned empty list')
         return infos
 
     async def create_server(self, protocol_factory, host=None, port=None,
@@ -967,8 +965,8 @@ def getnameinfo(self, sockaddr, flags=0):
                             reuse_port=None):
         """Create a TCP server.
 
-        The host parameter can be a string, in that case the TCP server is bound
-        to host and port.
+        The host parameter can be a string, in that case the TCP server is
+        bound to host and port.
 
         The host parameter can also be a sequence of strings and in that case
         the TCP server is bound to all hosts of the sequence. If a host
@@ -1046,8 +1044,7 @@ def getnameinfo(self, sockaddr, flags=0):
             if sock is None:
                 raise ValueError('Neither host/port nor sock were specified')
             if not _is_stream_socket(sock):
-                raise ValueError(
-                    'A Stream Socket was expected, got {!r}'.format(sock))
+                raise ValueError(f'A Stream Socket was expected, got {sock!r}')
             sockets = [sock]
 
         server = Server(self, sockets)
@@ -1070,8 +1067,7 @@ def getnameinfo(self, sockaddr, flags=0):
         returns a (transport, protocol) pair.
         """
         if not _is_stream_socket(sock):
-            raise ValueError(
-                'A Stream Socket was expected, got {!r}'.format(sock))
+            raise ValueError(f'A Stream Socket was expected, got {sock!r}')
 
         transport, protocol = await self._create_connection_transport(
             sock, protocol_factory, ssl, '', server_side=True)
@@ -1117,14 +1113,14 @@ def getnameinfo(self, sockaddr, flags=0):
     def _log_subprocess(self, msg, stdin, stdout, stderr):
         info = [msg]
         if stdin is not None:
-            info.append('stdin=%s' % _format_pipe(stdin))
+            info.append(f'stdin={_format_pipe(stdin)}')
         if stdout is not None and stderr == subprocess.STDOUT:
-            info.append('stdout=stderr=%s' % _format_pipe(stdout))
+            info.append(f'stdout=stderr={_format_pipe(stdout)}')
         else:
             if stdout is not None:
-                info.append('stdout=%s' % _format_pipe(stdout))
+                info.append(f'stdout={_format_pipe(stdout)}')
             if stderr is not None:
-                info.append('stderr=%s' % _format_pipe(stderr))
+                info.append(f'stderr={_format_pipe(stderr)}')
         logger.debug(' '.join(info))
 
     async def subprocess_shell(self, protocol_factory, cmd, *,
@@ -1167,14 +1163,14 @@ def _log_subprocess(self, msg, stdin, stdout, stderr):
         popen_args = (program,) + args
         for arg in popen_args:
             if not isinstance(arg, (str, bytes)):
-                raise TypeError("program arguments must be "
-                                "a bytes or text string, not %s"
-                                % type(arg).__name__)
+                raise TypeError(
+                    f"program arguments must be a bytes or text string, "
+                    f"not {type(arg).__name__}")
         protocol = protocol_factory()
         if self._debug:
             # don't log parameters: they may contain sensitive information
             # (password) and may be too long
-            debug_log = 'execute program %r' % program
+            debug_log = f'execute program {program!r}'
             self._log_subprocess(debug_log, stdin, stdout, stderr)
         transport = await self._make_subprocess_transport(
             protocol, popen_args, False, stdin, stdout, stderr,
@@ -1201,8 +1197,8 @@ def set_exception_handler(self, handler):
         documentation for details about context).
         """
         if handler is not None and not callable(handler):
-            raise TypeError('A callable object or None is expected, '
-                            'got {!r}'.format(handler))
+            raise TypeError(f'A callable object or None is expected, '
+                            f'got {handler!r}')
         self._exception_handler = handler
 
     def default_exception_handler(self, context):
@@ -1230,10 +1226,11 @@ def default_exception_handler(self, context):
         else:
             exc_info = False
 
-        if ('source_traceback' not in context
-        and self._current_handle is not None
-        and self._current_handle._source_traceback):
-            context['handle_traceback'] = self._current_handle._source_traceback
+        if ('source_traceback' not in context and
+                self._current_handle is not None and
+                self._current_handle._source_traceback):
+            context['handle_traceback'] = \
+                self._current_handle._source_traceback
 
         log_lines = [message]
         for key in sorted(context):
@@ -1250,7 +1247,7 @@ def default_exception_handler(self, context):
                 value += tb.rstrip()
             else:
                 value = repr(value)
-            log_lines.append('{}: {}'.format(key, value))
+            log_lines.append(f'{key}: {value}')
 
         logger.error('\n'.join(log_lines), exc_info=exc_info)
 
@@ -1438,18 +1435,19 @@ def _set_coroutine_wrapper(self, enabled):
         if enabled:
             if current_wrapper not in (None, wrapper):
                 warnings.warn(
-                    "loop.set_debug(True): cannot set debug coroutine "
-                    "wrapper; another wrapper is already set %r" %
-                    current_wrapper, RuntimeWarning)
+                    f"loop.set_debug(True): cannot set debug coroutine "
+                    f"wrapper; another wrapper is already set "
+                    f"{current_wrapper!r}",
+                    RuntimeWarning)
             else:
                 set_wrapper(wrapper)
                 self._coroutine_wrapper_set = True
         else:
             if current_wrapper not in (None, wrapper):
                 warnings.warn(
-                    "loop.set_debug(False): cannot unset debug coroutine "
-                    "wrapper; another wrapper was set %r" %
-                    current_wrapper, RuntimeWarning)
+                    f"loop.set_debug(False): cannot unset debug coroutine "
+                    f"wrapper; another wrapper was set {current_wrapper!r}",
+                    RuntimeWarning)
             else:
                 set_wrapper(None)
                 self._coroutine_wrapper_set = False
diff --git a/Lib/asyncio/base_futures.py b/Lib/asyncio/base_futures.py
index 01259a062e6..2ee82c3f057 100644
--- a/Lib/asyncio/base_futures.py
+++ b/Lib/asyncio/base_futures.py
@@ -1,4 +1,4 @@
-__all__ = []
+__all__ = ()
 
 import concurrent.futures._base
 import reprlib
@@ -48,7 +48,7 @@ def format_cb(callback):
         cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
                                         size - 2,
                                         format_cb(cb[-1]))
-    return 'cb=[%s]' % cb
+    return f'cb=[{cb}]'
 
 
 def _future_repr_info(future):
@@ -57,15 +57,15 @@ def _future_repr_info(future):
     info = [future._state.lower()]
     if future._state == _FINISHED:
         if future._exception is not None:
-            info.append('exception={!r}'.format(future._exception))
+            info.append(f'exception={future._exception!r}')
         else:
             # use reprlib to limit the length of the output, especially
             # for very long strings
             result = reprlib.repr(future._result)
-            info.append('result={}'.format(result))
+            info.append(f'result={result}')
     if future._callbacks:
         info.append(_format_callbacks(future._callbacks))
     if future._source_traceback:
         frame = future._source_traceback[-1]
-        info.append('created at %s:%s' % (frame[0], frame[1]))
+        info.append(f'created at {frame[0]}:{frame[1]}')
     return info
diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py
index 7e5a901845d..7c17066f8bb 100644
--- a/Lib/asyncio/base_subprocess.py
+++ b/Lib/asyncio/base_subprocess.py
@@ -57,9 +57,9 @@ def __repr__(self):
         if self._closed:
             info.append('closed')
         if self._pid is not None:
-            info.append('pid=%s' % self._pid)
+            info.append(f'pid={self.pid}')
         if self._returncode is not None:
-            info.append('returncode=%s' % self._returncode)
+            info.append(f'returncode={self._returncode}')
         elif self._pid is not None:
             info.append('running')
         else:
@@ -67,19 +67,19 @@ def __repr__(self):
 
         stdin = self._pipes.get(0)
         if stdin is not None:
-            info.append('stdin=%s' % stdin.pipe)
+            info.append(f'stdin={stdin.pipe}')
 
         stdout = self._pipes.get(1)
         stderr = self._pipes.get(2)
         if stdout is not None and stderr is stdout:
-            info.append('stdout=stderr=%s' % stdout.pipe)
+            info.append(f'stdout=stderr={stdout.pipe}')
         else:
             if stdout is not None:
-                info.append('stdout=%s' % stdout.pipe)
+                info.append(f'stdout={stdout.pipe}')
             if stderr is not None:
-                info.append('stderr=%s' % stderr.pipe)
+                info.append(f'stderr={stderr.pipe}')
 
-        return '<%s>' % ' '.join(info)
+        return '<{}>'.format(' '.join(info))
 
     def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
         raise NotImplementedError
@@ -103,12 +103,13 @@ def close(self):
                 continue
             proto.pipe.close()
 
-        if (self._proc is not None
-        # the child process finished?
-        and self._returncode is None
-        # the child process finished but the transport was not notified yet?
-        and self._proc.poll() is None
-        ):
+        if (self._proc is not None and
+                # has the child process finished?
+                self._returncode is None and
+                # the child process has finished, but the
+                # transport hasn't been notified yet?
+                self._proc.poll() is None):
+
             if self._loop.get_debug():
                 logger.warning('Close running child process: kill %r', self)
 
@@ -121,7 +122,7 @@ def close(self):
 
     def __del__(self):
         if not self._closed:
-            warnings.warn("unclosed transport %r" % self, ResourceWarning,
+            warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
                           source=self)
             self.close()
 
@@ -206,8 +207,7 @@ def _process_exited(self, returncode):
         assert returncode is not None, returncode
         assert self._returncode is None, self._returncode
         if self._loop.get_debug():
-            logger.info('%r exited with return code %r',
-                        self, returncode)
+            logger.info('%r exited with return code %r', self, returncode)
         self._returncode = returncode
         if self._proc.returncode is None:
             # asyncio uses a child watcher: copy the status into the Popen
@@ -263,8 +263,7 @@ def connection_made(self, transport):
         self.pipe = transport
 
     def __repr__(self):
-        return ('<%s fd=%s pipe=%r>'
-                % (self.__class__.__name__, self.fd, self.pipe))
+        return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>'
 
     def connection_lost(self, exc):
         self.disconnected = True
diff --git a/Lib/asyncio/base_tasks.py b/Lib/asyncio/base_tasks.py
index 5f34434c576..3ce51f6a986 100644
--- a/Lib/asyncio/base_tasks.py
+++ b/Lib/asyncio/base_tasks.py
@@ -13,10 +13,10 @@ def _task_repr_info(task):
         info[0] = 'cancelling'
 
     coro = coroutines._format_coroutine(task._coro)
-    info.insert(1, 'coro=<%s>' % coro)
+    info.insert(1, f'coro=<{coro}>')
 
     if task._fut_waiter is not None:
-        info.insert(2, 'wait_for=%r' % task._fut_waiter)
+        info.insert(2, f'wait_for={task._fut_waiter!r}')
     return info
 
 
@@ -61,15 +61,15 @@ def _task_print_stack(task, limit, file):
             linecache.checkcache(filename)
         line = linecache.getline(filename, lineno, f.f_globals)
         extracted_list.append((filename, lineno, name, line))
+
     exc = task._exception
     if not extracted_list:
-        print('No stack for %r' % task, file=file)
+        print(f'No stack for {task!r}', file=file)
     elif exc is not None:
-        print('Traceback for %r (most recent call last):' % task,
-              file=file)
+        print(f'Traceback for {task!r} (most recent call last):', file=file)
     else:
-        print('Stack for %r (most recent call last):' % task,
-              file=file)
+        print(f'Stack for {task!r} (most recent call last):', file=file)
+
     traceback.print_list(extracted_list, file=file)
     if exc is not None:
         for line in traceback.format_exception_only(exc.__class__, exc):
diff --git a/Lib/asyncio/constants.py b/Lib/asyncio/constants.py
index 60ca0da75f8..dfe97f49859 100644
--- a/Lib/asyncio/constants.py
+++ b/Lib/asyncio/constants.py
@@ -1,5 +1,3 @@
-"""Constants."""
-
 # After the connection is lost, log warnings after this many write()s.
 LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
 
diff --git a/Lib/asyncio/coroutines.py b/Lib/asyncio/coroutines.py
index 7d2ca05d2a0..d7e6b4c1ccb 100644
--- a/Lib/asyncio/coroutines.py
+++ b/Lib/asyncio/coroutines.py
@@ -1,9 +1,7 @@
-__all__ = ['coroutine',
-           'iscoroutinefunction', 'iscoroutine']
+__all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine'
 
 import functools
 import inspect
-import opcode
 import os
 import sys
 import traceback
@@ -27,9 +25,8 @@ def _is_debug_mode():
     # before you define your coroutines.  A downside of using this feature
     # is that tracebacks show entries for the CoroWrapper.__next__ method
     # when _DEBUG is true.
-    return (sys.flags.dev_mode
-            or (not sys.flags.ignore_environment
-                and bool(os.environ.get('PYTHONASYNCIODEBUG'))))
+    return sys.flags.dev_mode or (not sys.flags.ignore_environment and
+                                  bool(os.environ.get('PYTHONASYNCIODEBUG')))
 
 
 _DEBUG = _is_debug_mode()
@@ -58,8 +55,9 @@ def __repr__(self):
         coro_repr = _format_coroutine(self)
         if self._source_traceback:
             frame = self._source_traceback[-1]
-            coro_repr += ', created at %s:%s' % (frame[0], frame[1])
-        return '<%s %s>' % (self.__class__.__name__, coro_repr)
+            coro_repr += f', created at {frame[0]}:{frame[1]}'
+
+        return f'<{self.__class__.__name__} {coro_repr}>'
 
     def __iter__(self):
         return self
@@ -92,8 +90,8 @@ def __await__(self):
         cr_await = getattr(self.gen, 'cr_await', None)
         if cr_await is not None:
             raise RuntimeError(
-                "Cannot await on coroutine {!r} while it's "
-                "awaiting for {!r}".format(self.gen, cr_await))
+                f"Cannot await on coroutine {self.gen!r} while it's "
+                f"awaiting for {cr_await!r}")
         return self
 
     @property
@@ -123,7 +121,7 @@ def __del__(self):
         if frame is None:
             frame = getattr(gen, 'cr_frame', None)
         if frame is not None and frame.f_lasti == -1:
-            msg = '%r was never yielded from' % self
+            msg = f'{self!r} was never yielded from'
             tb = getattr(self, '_source_traceback', ())
             if tb:
                 tb = ''.join(traceback.format_list(tb))
@@ -154,11 +152,10 @@ def coroutine(func):
         def coro(*args, **kw):
             res = func(*args, **kw)
             if (base_futures.isfuture(res) or inspect.isgenerator(res) or
-                isinstance(res, CoroWrapper)):
+                    isinstance(res, CoroWrapper)):
                 res = yield from res
             else:
-                # If 'func' returns an Awaitable (new in 3.5) we
-                # want to run it.
+                # If 'res' is an awaitable, run it.
                 try:
                     await_meth = res.__await__
                 except AttributeError:
@@ -219,7 +216,7 @@ def _format_coroutine(coro):
         coro_name = getattr(
             coro, '__qualname__',
             getattr(coro, '__name__', type(coro).__name__))
-        coro_name = '{}()'.format(coro_name)
+        coro_name = f'{coro_name}()'
 
         running = False
         try:
@@ -231,7 +228,7 @@ def _format_coroutine(coro):
                 pass
 
         if running:
-            return '{} running'.format(coro_name)
+            return f'{coro_name} running'
         else:
             return coro_name
 
@@ -240,7 +237,7 @@ def _format_coroutine(coro):
         func = coro.func
         coro_name = coro.__qualname__
         if coro_name is not None:
-            coro_name = '{}()'.format(coro_name)
+            coro_name = f'{coro_name}()'
     else:
         func = coro
 
@@ -266,18 +263,14 @@ def _format_coroutine(coro):
         if source is not None:
             filename, lineno = source
         if coro_frame is None:
-            coro_repr = ('%s done, defined at %s:%s'
-                         % (coro_name, filename, lineno))
+            coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
         else:
-            coro_repr = ('%s running, defined at %s:%s'
-                         % (coro_name, filename, lineno))
+            coro_repr = f'{coro_name} running, defined at {filename}:{lineno}'
     elif coro_frame is not None:
         lineno = coro_frame.f_lineno
-        coro_repr = ('%s running at %s:%s'
-                     % (coro_name, filename, lineno))
+        coro_repr = f'{coro_name} running at {filename}:{lineno}'
     else:
         lineno = coro_code.co_firstlineno
-        coro_repr = ('%s done, defined at %s:%s'
-                     % (coro_name, filename, lineno))
+        coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
 
     return coro_repr
diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py
index 2cd6035973d..7db1ded8e8b 100644
--- a/Lib/asyncio/events.py
+++ b/Lib/asyncio/events.py
@@ -1,13 +1,14 @@
 """Event loop and event loop policy."""
 
-__all__ = ['AbstractEventLoopPolicy',
-           'AbstractEventLoop', 'AbstractServer',
-           'Handle', 'TimerHandle',
-           'get_event_loop_policy', 'set_event_loop_policy',
-           'get_event_loop', 'set_event_loop', 'new_event_loop',
-           'get_child_watcher', 'set_child_watcher',
-           '_set_running_loop', '_get_running_loop',
-           ]
+__all__ = (
+    'AbstractEventLoopPolicy',
+    'AbstractEventLoop', 'AbstractServer',
+    'Handle', 'TimerHandle',
+    'get_event_loop_policy', 'set_event_loop_policy',
+    'get_event_loop', 'set_event_loop', 'new_event_loop',
+    'get_child_watcher', 'set_child_watcher',
+    '_set_running_loop', '_get_running_loop',
+)
 
 import functools
 import inspect
@@ -44,9 +45,8 @@ def _format_args_and_kwargs(args, kwargs):
     if args:
         items.extend(reprlib.repr(arg) for arg in args)
     if kwargs:
-        items.extend('{}={}'.format(k, reprlib.repr(v))
-                     for k, v in kwargs.items())
-    return '(' + ', '.join(items) + ')'
+        items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
+    return '({})'.format(', '.join(items))
 
 
 def _format_callback(func, args, kwargs, suffix=''):
@@ -66,11 +66,12 @@ def _format_callback(func, args, kwargs, suffix=''):
         func_repr += suffix
     return func_repr
 
+
 def _format_callback_source(func, args):
     func_repr = _format_callback(func, args, None)
     source = _get_function_source(func)
     if source:
-        func_repr += ' at %s:%s' % source
+        func_repr += f' at {source[0]}:{source[1]}'
     return func_repr
 
 
@@ -116,14 +117,14 @@ def _repr_info(self):
             info.append(_format_callback_source(self._callback, self._args))
         if self._source_traceback:
             frame = self._source_traceback[-1]
-            info.append('created at %s:%s' % (frame[0], frame[1]))
+            info.append(f'created at {frame[0]}:{frame[1]}')
         return info
 
     def __repr__(self):
         if self._repr is not None:
             return self._repr
         info = self._repr_info()
-        return '<%s>' % ' '.join(info)
+        return '<{}>'.format(' '.join(info))
 
     def cancel(self):
         if not self._cancelled:
@@ -144,7 +145,7 @@ def _run(self):
             self._callback(*self._args)
         except Exception as exc:
             cb = _format_callback_source(self._callback, self._args)
-            msg = 'Exception in callback {}'.format(cb)
+            msg = f'Exception in callback {cb}'
             context = {
                 'message': msg,
                 'exception': exc,
@@ -172,7 +173,7 @@ def __init__(self, when, callback, args, loop):
     def _repr_info(self):
         info = super()._repr_info()
         pos = 2 if self._cancelled else 1
-        info.insert(pos, 'when=%s' % self._when)
+        info.insert(pos, f'when={self._when}')
         return info
 
     def __hash__(self):
@@ -334,8 +335,8 @@ def set_default_executor(self, executor):
 
         If host is an empty string or None all interfaces are assumed
         and a list of multiple sockets will be returned (most likely
-        one for IPv4 and another one for IPv6). The host parameter can also be a
-        sequence (e.g. list) of hosts to bind to.
+        one for IPv4 and another one for IPv6). The host parameter can also be
+        a sequence (e.g. list) of hosts to bind to.
 
         family can be set to either AF_INET or AF_INET6 to force the
         socket to use IPv4 or IPv6. If not set it will be determined
@@ -602,12 +603,14 @@ def get_event_loop(self):
         This may be None or an instance of EventLoop.
         """
         if (self._local._loop is None and
-            not self._local._set_called and
-            isinstance(threading.current_thread(), threading._MainThread)):
+                not self._local._set_called and
+                isinstance(threading.current_thread(), threading._MainThread)):
             self.set_event_loop(self.new_event_loop())
+
         if self._local._loop is None:
             raise RuntimeError('There is no current event loop in thread %r.'
                                % threading.current_thread().name)
+
         return self._local._loop
 
     def set_event_loop(self, loop):
diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py
index 7b6204a626f..b805f99896d 100644
--- a/Lib/asyncio/futures.py
+++ b/Lib/asyncio/futures.py
@@ -1,12 +1,13 @@
 """A Future class similar to the one in PEP 3148."""
 
-__all__ = ['CancelledError', 'TimeoutError', 'InvalidStateError',
-           'Future', 'wrap_future', 'isfuture']
+__all__ = (
+    'CancelledError', 'TimeoutError', 'InvalidStateError',
+    'Future', 'wrap_future', 'isfuture',
+)
 
 import concurrent.futures
 import logging
 import sys
-import traceback
 
 from . import base_futures
 from . import events
@@ -82,7 +83,8 @@ def __init__(self, *, loop=None):
     _repr_info = base_futures._future_repr_info
 
     def __repr__(self):
-        return '<%s %s>' % (self.__class__.__name__, ' '.join(self._repr_info()))
+        return '<{} {}>'.format(self.__class__.__name__,
+                                ' '.join(self._repr_info()))
 
     def __del__(self):
         if not self._log_traceback:
@@ -91,8 +93,8 @@ def __del__(self):
             return
         exc = self._exception
         context = {
-            'message': ('%s exception was never retrieved'
-                        % self.__class__.__name__),
+            'message':
+                f'{self.__class__.__name__} exception was never retrieved',
             'exception': exc,
             'future': self,
         }
@@ -237,7 +239,7 @@ def __iter__(self):
         assert self.done(), "yield from wasn't used with future"
         return self.result()  # May raise too.
 
-    __await__ = __iter__ # make compatible with 'await' expression
+    __await__ = __iter__  # make compatible with 'await' expression
 
 
 # Needed for testing purposes.
@@ -330,7 +332,7 @@ def wrap_future(future, *, loop=None):
     if isfuture(future):
         return future
     assert isinstance(future, concurrent.futures.Future), \
-        'concurrent.futures.Future is expected, got {!r}'.format(future)
+        f'concurrent.futures.Future is expected, got {future!r}'
     if loop is None:
         loop = events.get_event_loop()
     new_future = loop.create_future()
diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py
index 57eb69e7cf0..54f62585983 100644
--- a/Lib/asyncio/locks.py
+++ b/Lib/asyncio/locks.py
@@ -1,6 +1,6 @@
 """Synchronization primitives."""
 
-__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
+__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore')
 
 import collections
 import warnings
@@ -157,8 +157,8 @@ def __repr__(self):
         res = super().__repr__()
         extra = 'locked' if self._locked else 'unlocked'
         if self._waiters:
-            extra = '{},waiters:{}'.format(extra, len(self._waiters))
-        return '<{} [{}]>'.format(res[1:-1], extra)
+            extra = f'{extra}, waiters:{len(self._waiters)}'
+        return f'<{res[1:-1]} [{extra}]>'
 
     def locked(self):
         """Return True if lock is acquired."""
@@ -233,8 +233,8 @@ def __repr__(self):
         res = super().__repr__()
         extra = 'set' if self._value else 'unset'
         if self._waiters:
-            extra = '{},waiters:{}'.format(extra, len(self._waiters))
-        return '<{} [{}]>'.format(res[1:-1], extra)
+            extra = f'{extra}, waiters:{len(self._waiters)}'
+        return f'<{res[1:-1]} [{extra}]>'
 
     def is_set(self):
         """Return True if and only if the internal flag is true."""
@@ -310,8 +310,8 @@ def __repr__(self):
         res = super().__repr__()
         extra = 'locked' if self.locked() else 'unlocked'
         if self._waiters:
-            extra = '{},waiters:{}'.format(extra, len(self._waiters))
-        return '<{} [{}]>'.format(res[1:-1], extra)
+            extra = f'{extra}, waiters:{len(self._waiters)}'
+        return f'<{res[1:-1]} [{extra}]>'
 
     async def wait(self):
         """Wait until notified.
@@ -419,11 +419,10 @@ def __init__(self, value=1, *, loop=None):
 
     def __repr__(self):
         res = super().__repr__()
-        extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
-            self._value)
+        extra = 'locked' if self.locked() else f'unlocked, value:{self._value}'
         if self._waiters:
-            extra = '{},waiters:{}'.format(extra, len(self._waiters))
-        return '<{} [{}]>'.format(res[1:-1], extra)
+            extra = f'{extra}, waiters:{len(self._waiters)}'
+        return f'<{res[1:-1]} [{extra}]>'
 
     def _wake_up_next(self):
         while self._waiters:
diff --git a/Lib/asyncio/proactor_events.py b/Lib/asyncio/proactor_events.py
index d7aa5ff3017..3d48a2c05a2 100644
--- a/Lib/asyncio/proactor_events.py
+++ b/Lib/asyncio/proactor_events.py
@@ -4,7 +4,7 @@
 proactor is only implemented on Windows with IOCP.
 """
 
-__all__ = ['BaseProactorEventLoop']
+__all__ = 'BaseProactorEventLoop',
 
 import socket
 import warnings
@@ -50,17 +50,16 @@ def __repr__(self):
         elif self._closing:
             info.append('closing')
         if self._sock is not None:
-            info.append('fd=%s' % self._sock.fileno())
+            info.append(f'fd={self._sock.fileno()}')
         if self._read_fut is not None:
-            info.append('read=%s' % self._read_fut)
+            info.append(f'read={self._read_fut!r}')
         if self._write_fut is not None:
-            info.append("write=%r" % self._write_fut)
+            info.append(f'write={self._write_fut!r}')
         if self._buffer:
-            bufsize = len(self._buffer)
-            info.append('write_bufsize=%s' % bufsize)
+            info.append(f'write_bufsize={len(self._buffer)}')
         if self._eof_written:
             info.append('EOF written')
-        return '<%s>' % ' '.join(info)
+        return '<{}>'.format(' '.join(info))
 
     def _set_extra(self, sock):
         self._extra['pipe'] = sock
@@ -87,7 +86,7 @@ def close(self):
 
     def __del__(self):
         if self._sock is not None:
-            warnings.warn("unclosed transport %r" % self, ResourceWarning,
+            warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
                           source=self)
             self.close()
 
@@ -227,9 +226,9 @@ class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
 
     def write(self, data):
         if not isinstance(data, (bytes, bytearray, memoryview)):
-            msg = ("data argument must be a bytes-like object, not '%s'" %
-                   type(data).__name__)
-            raise TypeError(msg)
+            raise TypeError(
+                f"data argument must be a bytes-like object, "
+                f"not {type(data).__name__}")
         if self._eof_written:
             raise RuntimeError('write_eof() already called')
 
@@ -347,12 +346,14 @@ class _ProactorSocketTransport(_ProactorReadPipeTransport,
 
     def _set_extra(self, sock):
         self._extra['socket'] = sock
+
         try:
             self._extra['sockname'] = sock.getsockname()
         except (socket.error, AttributeError):
             if self._loop.get_debug():
-                logger.warning("getsockname() failed on %r",
-                             sock, exc_info=True)
+                logger.warning(
+                    "getsockname() failed on %r", sock, exc_info=True)
+
         if 'peername' not in self._extra:
             try:
                 self._extra['peername'] = sock.getpeername()
diff --git a/Lib/asyncio/protocols.py b/Lib/asyncio/protocols.py
index 80fcac9a82d..57987ae4463 100644
--- a/Lib/asyncio/protocols.py
+++ b/Lib/asyncio/protocols.py
@@ -1,7 +1,9 @@
-"""Abstract Protocol class."""
+"""Abstract Protocol base classes."""
 
-__all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol',
-           'SubprocessProtocol']
+__all__ = (
+    'BaseProtocol', 'Protocol', 'DatagramProtocol',
+    'SubprocessProtocol',
+)
 
 
 class BaseProtocol:
diff --git a/Lib/asyncio/queues.py b/Lib/asyncio/queues.py
index 10e694f1393..512ea6037f8 100644
--- a/Lib/asyncio/queues.py
+++ b/Lib/asyncio/queues.py
@@ -1,6 +1,4 @@
-"""Queues"""
-
-__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
+__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty')
 
 import collections
 import heapq
@@ -10,16 +8,12 @@
 
 
 class QueueEmpty(Exception):
-    """Exception raised when Queue.get_nowait() is called on a Queue object
-    which is empty.
-    """
+    """Raised when Queue.get_nowait() is called on an empty Queue."""
     pass
 
 
 class QueueFull(Exception):
-    """Exception raised when the Queue.put_nowait() method is called on a Queue
-    object which is full.
-    """
+    """Raised when the Queue.put_nowait() method is called on a full Queue."""
     pass
 
 
@@ -73,22 +67,21 @@ def _wakeup_next(self, waiters):
                 break
 
     def __repr__(self):
-        return '<{} at {:#x} {}>'.format(
-            type(self).__name__, id(self), self._format())
+        return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
 
     def __str__(self):
-        return '<{} {}>'.format(type(self).__name__, self._format())
+        return f'<{type(self).__name__} {self._format()}>'
 
     def _format(self):
-        result = 'maxsize={!r}'.format(self._maxsize)
+        result = f'maxsize={self._maxsize!r}'
         if getattr(self, '_queue', None):
-            result += ' _queue={!r}'.format(list(self._queue))
+            result += f' _queue={list(self._queue)!r}'
         if self._getters:
-            result += ' _getters[{}]'.format(len(self._getters))
+            result += f' _getters[{len(self._getters)}]'
         if self._putters:
-            result += ' _putters[{}]'.format(len(self._putters))
+            result += f' _putters[{len(self._putters)}]'
         if self._unfinished_tasks:
-            result += ' tasks={}'.format(self._unfinished_tasks)
+            result += f' tasks={self._unfinished_tasks}'
         return result
 
     def qsize(self):
diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py
index c30fde7f4da..2467e23e854 100644
--- a/Lib/asyncio/selector_events.py
+++ b/Lib/asyncio/selector_events.py
@@ -4,7 +4,7 @@
 also includes support for signal handling, see the unix_events sub-module.
 """
 
-__all__ = ['BaseSelectorEventLoop']
+__all__ = 'BaseSelectorEventLoop',
 
 import collections
 import errno
@@ -184,8 +184,8 @@ def _accept_connection(self, protocol_factory, sock,
                     raise  # The event loop will catch, log and ignore it.
             else:
                 extra = {'peername': addr}
-                accept = self._accept_connection2(protocol_factory, conn, extra,
-                                                  sslcontext, server)
+                accept = self._accept_connection2(
+                    protocol_factory, conn, extra, sslcontext, server)
                 self.create_task(accept)
 
     async def _accept_connection2(self, protocol_factory, conn, extra,
@@ -214,8 +214,8 @@ def _accept_connection(self, protocol_factory, sock,
         except Exception as exc:
             if self._debug:
                 context = {
-                    'message': ('Error on transport creation '
-                                'for incoming connection'),
+                    'message':
+                        'Error on transport creation for incoming connection',
                     'exception': exc,
                 }
                 if protocol is not None:
@@ -231,8 +231,7 @@ def _ensure_fd_no_transport(self, fd):
                 fileno = int(fileno.fileno())
             except (AttributeError, TypeError, ValueError):
                 # This code matches selectors._fileobj_to_fd function.
-                raise ValueError("Invalid file object: "
-                                 "{!r}".format(fd)) from None
+                raise ValueError(f"Invalid file object: {fd!r}") from None
         try:
             transport = self._transports[fileno]
         except KeyError:
@@ -240,8 +239,8 @@ def _ensure_fd_no_transport(self, fd):
         else:
             if not transport.is_closing():
                 raise RuntimeError(
-                    'File descriptor {!r} is used by transport {!r}'.format(
-                        fd, transport))
+                    f'File descriptor {fd!r} is used by transport '
+                    f'{transport!r}')
 
     def _add_reader(self, fd, callback, *args):
         self._check_closed()
@@ -389,10 +388,11 @@ def sock_recv_into(self, sock, buf):
 
     def _sock_recv_into(self, fut, registered_fd, sock, buf):
         # _sock_recv_into() can add itself as an I/O callback if the operation
-        # can't be done immediately. Don't use it directly, call sock_recv_into().
+        # can't be done immediately. Don't use it directly, call
+        # sock_recv_into().
         if registered_fd is not None:
             # Remove the callback early.  It should be rare that the
-            # selector says the fd is ready but the call still returns
+            # selector says the FD is ready but the call still returns
             # EAGAIN, and I am willing to take a hit in that case in
             # order to simplify the common case.
             self.remove_reader(registered_fd)
@@ -497,7 +497,7 @@ def _sock_connect_cb(self, fut, sock, address):
             err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
             if err != 0:
                 # Jump to any except clause below.
-                raise OSError(err, 'Connect call failed %s' % (address,))
+                raise OSError(err, f'Connect call failed {address}')
         except (BlockingIOError, InterruptedError):
             # socket is still registered, the callback will be retried later
             pass
@@ -596,7 +596,7 @@ def __repr__(self):
             info.append('closed')
         elif self._closing:
             info.append('closing')
-        info.append('fd=%s' % self._sock_fd)
+        info.append(f'fd={self._sock_fd}')
         # test if the transport was closed
         if self._loop is not None and not self._loop.is_closed():
             polling = _test_selector_event(self._loop._selector,
@@ -615,8 +615,8 @@ def __repr__(self):
                 state = 'idle'
 
             bufsize = self.get_write_buffer_size()
-            info.append('write=<%s, bufsize=%s>' % (state, bufsize))
-        return '<%s>' % ' '.join(info)
+            info.append(f'write=<{state}, bufsize={bufsize}>')
+        return '<{}>'.format(' '.join(info))
 
     def abort(self):
         self._force_close(None)
@@ -642,7 +642,7 @@ def close(self):
 
     def __del__(self):
         if self._sock is not None:
-            warnings.warn("unclosed transport %r" % self, ResourceWarning,
+            warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
                           source=self)
             self._sock.close()
 
@@ -758,8 +758,8 @@ def _read_ready(self):
 
     def write(self, data):
         if not isinstance(data, (bytes, bytearray, memoryview)):
-            raise TypeError('data argument must be a bytes-like object, '
-                            'not %r' % type(data).__name__)
+            raise TypeError(f'data argument must be a bytes-like object, '
+                            f'not {type(data).__name__!r}')
         if self._eof:
             raise RuntimeError('Cannot call write() after write_eof()')
         if not data:
@@ -862,14 +862,14 @@ def _read_ready(self):
 
     def sendto(self, data, addr=None):
         if not isinstance(data, (bytes, bytearray, memoryview)):
-            raise TypeError('data argument must be a bytes-like object, '
-                            'not %r' % type(data).__name__)
+            raise TypeError(f'data argument must be a bytes-like object, '
+                            f'not {type(data).__name__!r}')
         if not data:
             return
 
         if self._address and addr not in (None, self._address):
-            raise ValueError('Invalid address: must be None or %s' %
-                             (self._address,))
+            raise ValueError(
+                f'Invalid address: must be None or {self._address}')
 
         if self._conn_lost and self._address:
             if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
@@ -891,8 +891,8 @@ def sendto(self, data, addr=None):
                 self._protocol.error_received(exc)
                 return
             except Exception as exc:
-                self._fatal_error(exc,
-                                  'Fatal write error on datagram transport')
+                self._fatal_error(
+                    exc, 'Fatal write error on datagram transport')
                 return
 
         # Ensure that what we buffer is immutable.
@@ -914,8 +914,8 @@ def _sendto_ready(self):
                 self._protocol.error_received(exc)
                 return
             except Exception as exc:
-                self._fatal_error(exc,
-                                  'Fatal write error on datagram transport')
+                self._fatal_error(
+                    exc, 'Fatal write error on datagram transport')
                 return
 
         self._maybe_resume_protocol()  # May append to buffer.
diff --git a/Lib/asyncio/sslproto.py b/Lib/asyncio/sslproto.py
index c231eb58ee5..0c8f01ad8f1 100644
--- a/Lib/asyncio/sslproto.py
+++ b/Lib/asyncio/sslproto.py
@@ -313,7 +313,7 @@ def close(self):
 
     def __del__(self):
         if not self._closed:
-            warnings.warn("unclosed transport %r" % self, ResourceWarning,
+            warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
                           source=self)
             self.close()
 
@@ -365,8 +365,8 @@ def write(self, data):
         to be sent out asynchronously.
         """
         if not isinstance(data, (bytes, bytearray, memoryview)):
-            raise TypeError("data: expecting a bytes-like instance, got {!r}"
-                                .format(type(data).__name__))
+            raise TypeError(f"data: expecting a bytes-like instance, "
+                            f"got {type(data).__name__}")
         if not data:
             return
         self._ssl_protocol._write_appdata(data)
@@ -399,7 +399,8 @@ def __init__(self, loop, app_protocol, sslcontext, waiter,
             raise RuntimeError('stdlib ssl module not available')
 
         if not sslcontext:
-            sslcontext = _create_transport_context(server_side, server_hostname)
+            sslcontext = _create_transport_context(
+                server_side, server_hostname)
 
         self._server_side = server_side
         if server_hostname and not server_side:
@@ -567,8 +568,8 @@ def _on_handshake_complete(self, handshake_exc):
             if not hasattr(self._sslcontext, 'check_hostname'):
                 # Verify hostname if requested, Python 3.4+ uses check_hostname
                 # and checks the hostname in do_handshake()
-                if (self._server_hostname
-                and self._sslcontext.verify_mode != ssl.CERT_NONE):
+                if (self._server_hostname and
+                        self._sslcontext.verify_mode != ssl.CERT_NONE):
                     ssl.match_hostname(peercert, self._server_hostname)
         except BaseException as exc:
             if self._loop.get_debug():
diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py
index baa9ec94439..eef2b895f1e 100644
--- a/Lib/asyncio/streams.py
+++ b/Lib/asyncio/streams.py
@@ -1,15 +1,13 @@
-"""Stream-related things."""
-
-__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
-           'open_connection', 'start_server',
-           'IncompleteReadError',
-           'LimitOverrunError',
-           ]
+__all__ = (
+    'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
+    'open_connection', 'start_server',
+    'IncompleteReadError', 'LimitOverrunError',
+)
 
 import socket
 
 if hasattr(socket, 'AF_UNIX'):
-    __all__.extend(['open_unix_connection', 'start_unix_server'])
+    __all__ += ('open_unix_connection', 'start_unix_server')
 
 from . import coroutines
 from . import events
@@ -29,8 +27,8 @@ class IncompleteReadError(EOFError):
     - expected: total number of expected bytes (or None if unknown)
     """
     def __init__(self, partial, expected):
-        super().__init__("%d bytes read on a total of %r expected bytes"
-                         % (len(partial), expected))
+        super().__init__(f'{len(partial)} bytes read on a total of '
+                         f'{expected!r} expected bytes')
         self.partial = partial
         self.expected = expected
 
@@ -281,10 +279,10 @@ def __init__(self, transport, protocol, reader, loop):
         self._loop = loop
 
     def __repr__(self):
-        info = [self.__class__.__name__, 'transport=%r' % self._transport]
+        info = [self.__class__.__name__, f'transport={self._transport!r}']
         if self._reader is not None:
-            info.append('reader=%r' % self._reader)
-        return '<%s>' % ' '.join(info)
+            info.append(f'reader={self._reader!r}')
+        return '<{}>'.format(' '.join(info))
 
     @property
     def transport(self):
@@ -356,20 +354,20 @@ def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
     def __repr__(self):
         info = ['StreamReader']
         if self._buffer:
-            info.append('%d bytes' % len(self._buffer))
+            info.append(f'{len(self._buffer)} bytes')
         if self._eof:
             info.append('eof')
         if self._limit != _DEFAULT_LIMIT:
-            info.append('l=%d' % self._limit)
+            info.append(f'limit={self._limit}')
         if self._waiter:
-            info.append('w=%r' % self._waiter)
+            info.append(f'waiter={self._waiter!r}')
         if self._exception:
-            info.append('e=%r' % self._exception)
+            info.append(f'exception={self._exception!r}')
         if self._transport:
-            info.append('t=%r' % self._transport)
+            info.append(f'transport={self._transport!r}')
         if self._paused:
             info.append('paused')
-        return '<%s>' % ' '.join(info)
+        return '<{}>'.format(' '.join(info))
 
     def exception(self):
         return self._exception
@@ -440,8 +438,9 @@ def feed_data(self, data):
         # would have an unexpected behaviour. It would not possible to know
         # which coroutine would get the next data.
         if self._waiter is not None:
-            raise RuntimeError('%s() called while another coroutine is '
-                               'already waiting for incoming data' % func_name)
+            raise RuntimeError(
+                f'{func_name}() called while another coroutine is '
+                f'already waiting for incoming data')
 
         assert not self._eof, '_wait_for_data after EOF'
 
diff --git a/Lib/asyncio/subprocess.py b/Lib/asyncio/subprocess.py
index dd3d10c8879..90fc00de833 100644
--- a/Lib/asyncio/subprocess.py
+++ b/Lib/asyncio/subprocess.py
@@ -1,4 +1,4 @@
-__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
+__all__ = 'create_subprocess_exec', 'create_subprocess_shell'
 
 import subprocess
 
@@ -29,12 +29,12 @@ def __init__(self, limit, loop):
     def __repr__(self):
         info = [self.__class__.__name__]
         if self.stdin is not None:
-            info.append('stdin=%r' % self.stdin)
+            info.append(f'stdin={self.stdin!r}')
         if self.stdout is not None:
-            info.append('stdout=%r' % self.stdout)
+            info.append(f'stdout={self.stdout!r}')
         if self.stderr is not None:
-            info.append('stderr=%r' % self.stderr)
-        return '<%s>' % ' '.join(info)
+            info.append(f'stderr={self.stderr!r}')
+        return '<{}>'.format(' '.join(info))
 
     def connection_made(self, transport):
         self._transport = transport
@@ -83,7 +83,7 @@ def pipe_connection_lost(self, fd, exc):
             reader = self.stderr
         else:
             reader = None
-        if reader != None:
+        if reader is not None:
             if exc is None:
                 reader.feed_eof()
             else:
@@ -114,7 +114,7 @@ def __init__(self, transport, protocol, loop):
         self.pid = transport.get_pid()
 
     def __repr__(self):
-        return '<%s %s>' % (self.__class__.__name__, self.pid)
+        return f'<{self.__class__.__name__} {self.pid}>'
 
     @property
     def returncode(self):
@@ -137,8 +137,8 @@ def kill(self):
         debug = self._loop.get_debug()
         self.stdin.write(input)
         if debug:
-            logger.debug('%r communicate: feed stdin (%s bytes)',
-                        self, len(input))
+            logger.debug(
+                '%r communicate: feed stdin (%s bytes)', self, len(input))
         try:
             await self.stdin.drain()
         except (BrokenPipeError, ConnectionResetError) as exc:
diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py
index c23d06afd7c..e0af5abdf23 100644
--- a/Lib/asyncio/tasks.py
+++ b/Lib/asyncio/tasks.py
@@ -1,10 +1,11 @@
 """Support for tasks, coroutines and the scheduler."""
 
-__all__ = ['Task',
-           'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
-           'wait', 'wait_for', 'as_completed', 'sleep', 'async',
-           'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
-           ]
+__all__ = (
+    'Task',
+    'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
+    'wait', 'wait_for', 'as_completed', 'sleep', 'async',
+    'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
+)
 
 import concurrent.futures
 import functools
@@ -158,8 +159,7 @@ def cancel(self):
         return True
 
     def _step(self, exc=None):
-        assert not self.done(), \
-            '_step(): already done: {!r}, {!r}'.format(self, exc)
+        assert not self.done(), f'_step(): already done: {self!r}, {exc!r}'
         if self._must_cancel:
             if not isinstance(exc, futures.CancelledError):
                 exc = futures.CancelledError()
@@ -195,18 +195,15 @@ def _step(self, exc=None):
             if blocking is not None:
                 # Yielded Future must come from Future.__iter__().
                 if result._loop is not self._loop:
-                    self._loop.call_soon(
-                        self._step,
-                        RuntimeError(
-                            'Task {!r} got Future {!r} attached to a '
-                            'different loop'.format(self, result)))
+                    new_exc = RuntimeError(
+                        f'Task {self!r} got Future '
+                        f'{result!r} attached to a different loop')
+                    self._loop.call_soon(self._step, new_exc)
                 elif blocking:
                     if result is self:
-                        self._loop.call_soon(
-                            self._step,
-                            RuntimeError(
-                                'Task cannot await on itself: {!r}'.format(
-                                    self)))
+                        new_exc = RuntimeError(
+                            f'Task cannot await on itself: {self!r}')
+                        self._loop.call_soon(self._step, new_exc)
                     else:
                         result._asyncio_future_blocking = False
                         result.add_done_callback(self._wakeup)
@@ -215,28 +212,24 @@ def _step(self, exc=None):
                             if self._fut_waiter.cancel():
                                 self._must_cancel = False
                 else:
-                    self._loop.call_soon(
-                        self._step,
-                        RuntimeError(
-                            'yield was used instead of yield from '
-                            'in task {!r} with {!r}'.format(self, result)))
+                    new_exc = RuntimeError(
+                        f'yield was used instead of yield from '
+                        f'in task {self!r} with {result!r}')
+                    self._loop.call_soon(self._step, new_exc)
+
             elif result is None:
                 # Bare yield relinquishes control for one event loop iteration.
                 self._loop.call_soon(self._step)
             elif inspect.isgenerator(result):
                 # Yielding a generator is just wrong.
-                self._loop.call_soon(
-                    self._step,
-                    RuntimeError(
-                        'yield was used instead of yield from for '
-                        'generator in task {!r} with {}'.format(
-                            self, result)))
+                new_exc = RuntimeError(
+                    f'yield was used instead of yield from for '
+                    f'generator in task {self!r} with {result}')
+                self._loop.call_soon(self._step, new_exc)
             else:
                 # Yielding something else is an error.
-                self._loop.call_soon(
-                    self._step,
-                    RuntimeError(
-                        'Task got bad yield: {!r}'.format(result)))
+                new_exc = RuntimeError(f'Task got bad yield: {result!r}')
+                self._loop.call_soon(self._step, new_exc)
         finally:
             self.__class__._current_tasks.pop(self._loop)
             self = None  # Needed to break cycles when an exception occurs.
@@ -294,11 +287,11 @@ def _wakeup(self, future):
     when the timeout occurs are returned in the second set.
     """
     if futures.isfuture(fs) or coroutines.iscoroutine(fs):
-        raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
+        raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
     if not fs:
         raise ValueError('Set of coroutines/Futures is empty.')
     if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
-        raise ValueError('Invalid return_when value: {}'.format(return_when))
+        raise ValueError(f'Invalid return_when value: {return_when}')
 
     if loop is None:
         loop = events.get_event_loop()
@@ -430,7 +423,7 @@ def as_completed(fs, *, loop=None, timeout=None):
     Note: The futures 'f' are not necessarily members of fs.
     """
     if futures.isfuture(fs) or coroutines.iscoroutine(fs):
-        raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
+        raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
     loop = loop if loop is not None else events.get_event_loop()
     todo = {ensure_future(f, loop=loop) for f in set(fs)}
     from .queues import Queue  # Import here to avoid circular import problem.
diff --git a/Lib/asyncio/transports.py b/Lib/asyncio/transports.py
index a94079f433a..51f56737c67 100644
--- a/Lib/asyncio/transports.py
+++ b/Lib/asyncio/transports.py
@@ -1,8 +1,9 @@
 """Abstract Transport class."""
 
-__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
-           'Transport', 'DatagramTransport', 'SubprocessTransport',
-           ]
+__all__ = (
+    'BaseTransport', 'ReadTransport', 'WriteTransport',
+    'Transport', 'DatagramTransport', 'SubprocessTransport',
+)
 
 
 class BaseTransport:
@@ -267,7 +268,7 @@ def _maybe_pause_protocol(self):
 
     def _maybe_resume_protocol(self):
         if (self._protocol_paused and
-            self.get_write_buffer_size() <= self._low_water):
+                self.get_write_buffer_size() <= self._low_water):
             self._protocol_paused = False
             try:
                 self._protocol.resume_writing()
@@ -285,14 +286,16 @@ def get_write_buffer_limits(self):
     def _set_write_buffer_limits(self, high=None, low=None):
         if high is None:
             if low is None:
-                high = 64*1024
+                high = 64 * 1024
             else:
-                high = 4*low
+                high = 4 * low
         if low is None:
             low = high // 4
+
         if not high >= low >= 0:
-            raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
-                             (high, low))
+            raise ValueError(
+                f'high ({high!r}) must be >= low ({low!r}) must be >= 0')
+
         self._high_water = high
         self._low_water = low
 
diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py
index 0308b02a52d..b9bdf8786f2 100644
--- a/Lib/asyncio/unix_events.py
+++ b/Lib/asyncio/unix_events.py
@@ -23,10 +23,12 @@
 from .log import logger
 
 
-__all__ = ['SelectorEventLoop',
-           'AbstractChildWatcher', 'SafeChildWatcher',
-           'FastChildWatcher', 'DefaultEventLoopPolicy',
-           ]
+__all__ = (
+    'SelectorEventLoop',
+    'AbstractChildWatcher', 'SafeChildWatcher',
+    'FastChildWatcher', 'DefaultEventLoopPolicy',
+)
+
 
 if sys.platform == 'win32':  # pragma: no cover
     raise ImportError('Signals are not really supported on Windows')
@@ -65,8 +67,8 @@ def add_signal_handler(self, sig, callback, *args):
         Raise ValueError if the signal number is invalid or uncatchable.
         Raise RuntimeError if there is a problem setting up the handler.
         """
-        if (coroutines.iscoroutine(callback)
-                or coroutines.iscoroutinefunction(callback)):
+        if (coroutines.iscoroutine(callback) or
+                coroutines.iscoroutinefunction(callback)):
             raise TypeError("coroutines cannot be used "
                             "with add_signal_handler()")
         self._check_signal(sig)
@@ -100,7 +102,7 @@ def add_signal_handler(self, sig, callback, *args):
                     logger.info('set_wakeup_fd(-1) failed: %s', nexc)
 
             if exc.errno == errno.EINVAL:
-                raise RuntimeError('sig {} cannot be caught'.format(sig))
+                raise RuntimeError(f'sig {sig} cannot be caught')
             else:
                 raise
 
@@ -134,7 +136,7 @@ def remove_signal_handler(self, sig):
             signal.signal(sig, handler)
         except OSError as exc:
             if exc.errno == errno.EINVAL:
-                raise RuntimeError('sig {} cannot be caught'.format(sig))
+                raise RuntimeError(f'sig {sig} cannot be caught')
             else:
                 raise
 
@@ -153,11 +155,10 @@ def _check_signal(self, sig):
         Raise RuntimeError if there is a problem setting up the handler.
         """
         if not isinstance(sig, int):
-            raise TypeError('sig must be an int, not {!r}'.format(sig))
+            raise TypeError(f'sig must be an int, not {sig!r}')
 
         if not (1 <= sig < signal.NSIG):
-            raise ValueError(
-                'sig {} out of range(1, {})'.format(sig, signal.NSIG))
+            raise ValueError(f'sig {sig} out of range(1, {signal.NSIG})')
 
     def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
                                   extra=None):
@@ -223,8 +224,7 @@ def _child_watcher_callback(self, pid, returncode, transp):
             if (sock.family != socket.AF_UNIX or
                     not base_events._is_stream_socket(sock)):
                 raise ValueError(
-                    'A UNIX Domain Stream Socket was expected, got {!r}'
-                    .format(sock))
+                    f'A UNIX Domain Stream Socket was expected, got {sock!r}')
             sock.setblocking(False)
 
         transport, protocol = await self._create_connection_transport(
@@ -263,7 +263,7 @@ def _child_watcher_callback(self, pid, returncode, transp):
                 if exc.errno == errno.EADDRINUSE:
                     # Let's improve the error message by adding
                     # with what exact address it occurs.
-                    msg = 'Address {!r} is already in use'.format(path)
+                    msg = f'Address {path!r} is already in use'
                     raise OSError(errno.EADDRINUSE, msg) from None
                 else:
                     raise
@@ -278,8 +278,7 @@ def _child_watcher_callback(self, pid, returncode, transp):
             if (sock.family != socket.AF_UNIX or
                     not base_events._is_stream_socket(sock)):
                 raise ValueError(
-                    'A UNIX Domain Stream Socket was expected, got {!r}'
-                    .format(sock))
+                    f'A UNIX Domain Stream Socket was expected, got {sock!r}')
 
         server = base_events.Server(self, [sock])
         sock.listen(backlog)
@@ -327,12 +326,11 @@ def __repr__(self):
             info.append('closed')
         elif self._closing:
             info.append('closing')
-        info.append('fd=%s' % self._fileno)
+        info.append(f'fd={self._fileno}')
         selector = getattr(self._loop, '_selector', None)
         if self._pipe is not None and selector is not None:
             polling = selector_events._test_selector_event(
-                          selector,
-                          self._fileno, selectors.EVENT_READ)
+                selector, self._fileno, selectors.EVENT_READ)
             if polling:
                 info.append('polling')
             else:
@@ -341,7 +339,7 @@ def __repr__(self):
             info.append('open')
         else:
             info.append('closed')
-        return '<%s>' % ' '.join(info)
+        return '<{}>'.format(' '.join(info))
 
     def _read_ready(self):
         try:
@@ -382,7 +380,7 @@ def close(self):
 
     def __del__(self):
         if self._pipe is not None:
-            warnings.warn("unclosed transport %r" % self, ResourceWarning,
+            warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
                           source=self)
             self._pipe.close()
 
@@ -461,24 +459,23 @@ def __repr__(self):
             info.append('closed')
         elif self._closing:
             info.append('closing')
-        info.append('fd=%s' % self._fileno)
+        info.append(f'fd={self._fileno}')
         selector = getattr(self._loop, '_selector', None)
         if self._pipe is not None and selector is not None:
             polling = selector_events._test_selector_event(
-                          selector,
-                          self._fileno, selectors.EVENT_WRITE)
+                selector, self._fileno, selectors.EVENT_WRITE)
             if polling:
                 info.append('polling')
             else:
                 info.append('idle')
 
             bufsize = self.get_write_buffer_size()
-            info.append('bufsize=%s' % bufsize)
+            info.append(f'bufsize={bufsize}')
         elif self._pipe is not None:
             info.append('open')
         else:
             info.append('closed')
-        return '<%s>' % ' '.join(info)
+        return '<{}>'.format(' '.join(info))
 
     def get_write_buffer_size(self):
         return len(self._buffer)
@@ -579,7 +576,7 @@ def close(self):
 
     def __del__(self):
         if self._pipe is not None:
-            warnings.warn("unclosed transport %r" % self, ResourceWarning,
+            warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
                           source=self)
             self._pipe.close()
 
@@ -1007,5 +1004,6 @@ def set_child_watcher(self, watcher):
 
         self._watcher = watcher
 
+
 SelectorEventLoop = _UnixSelectorEventLoop
 DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py
index 95b12a11a62..e18fd392ba2 100644
--- a/Lib/asyncio/windows_events.py
+++ b/Lib/asyncio/windows_events.py
@@ -18,9 +18,10 @@
 from .log import logger
 
 
-__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
-           'DefaultEventLoopPolicy',
-           ]
+__all__ = (
+    'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
+    'DefaultEventLoopPolicy',
+)
 
 
 NULL = 0
@@ -51,7 +52,7 @@ def _repr_info(self):
         info = super()._repr_info()
         if self._ov is not None:
             state = 'pending' if self._ov.pending else 'completed'
-            info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address))
+            info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>')
         return info
 
     def _cancel_overlapped(self):
@@ -107,12 +108,12 @@ def _poll(self):
 
     def _repr_info(self):
         info = super()._repr_info()
-        info.append('handle=%#x' % self._handle)
+        info.append(f'handle={self._handle:#x}')
         if self._handle is not None:
             state = 'signaled' if self._poll() else 'waiting'
             info.append(state)
         if self._wait_handle is not None:
-            info.append('wait_handle=%#x' % self._wait_handle)
+            info.append(f'wait_handle={self._wait_handle:#x}')
         return info
 
     def _unregister_wait_cb(self, fut):
@@ -543,9 +544,9 @@ def finish_accept_pipe(trans, key, ov):
     async def connect_pipe(self, address):
         delay = CONNECT_PIPE_INIT_DELAY
         while True:
-            # Unfortunately there is no way to do an overlapped connect to a pipe.
-            # Call CreateFile() in a loop until it doesn't fail with
-            # ERROR_PIPE_BUSY
+            # Unfortunately there is no way to do an overlapped connect to
+            # a pipe.  Call CreateFile() in a loop until it doesn't fail with
+            # ERROR_PIPE_BUSY.
             try:
                 handle = _overlapped.ConnectPipe(address)
                 break
diff --git a/Lib/asyncio/windows_utils.py b/Lib/asyncio/windows_utils.py
index 3b410976f9d..9e22f6e0740 100644
--- a/Lib/asyncio/windows_utils.py
+++ b/Lib/asyncio/windows_utils.py
@@ -1,6 +1,4 @@
-"""
-Various Windows specific bits and pieces
-"""
+"""Various Windows specific bits and pieces."""
 
 import sys
 
@@ -11,13 +9,12 @@
 import itertools
 import msvcrt
 import os
-import socket
 import subprocess
 import tempfile
 import warnings
 
 
-__all__ = ['pipe', 'Popen', 'PIPE', 'PipeHandle']
+__all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle'
 
 
 # Constants/globals
@@ -34,8 +31,9 @@
 
 def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
     """Like os.pipe() but with overlapped support and using handles not fds."""
-    address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
-                              (os.getpid(), next(_mmap_counter)))
+    address = tempfile.mktemp(
+        prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format(
+            os.getpid(), next(_mmap_counter)))
 
     if duplex:
         openmode = _winapi.PIPE_ACCESS_DUPLEX
@@ -90,10 +88,10 @@ def __init__(self, handle):
 
     def __repr__(self):
         if self._handle is not None:
-            handle = 'handle=%r' % self._handle
+            handle = f'handle={self._handle!r}'
         else:
             handle = 'closed'
-        return '<%s %s>' % (self.__class__.__name__, handle)
+        return f'<{self.__class__.__name__} {handle}>'
 
     @property
     def handle(self):
@@ -111,7 +109,7 @@ def close(self, *, CloseHandle=_winapi.CloseHandle):
 
     def __del__(self):
         if self._handle is not None:
-            warnings.warn("unclosed %r" % self, ResourceWarning,
+            warnings.warn(f"unclosed {self!r}", ResourceWarning,
                           source=self)
             self.close()
 
diff --git a/Lib/test/test_asyncio/test_locks.py b/Lib/test/test_asyncio/test_locks.py
index f365a45106e..78d80ecf4f3 100644
--- a/Lib/test/test_asyncio/test_locks.py
+++ b/Lib/test/test_asyncio/test_locks.py
@@ -10,7 +10,7 @@
 STR_RGX_REPR = (
     r'^<(?P<class>.*?) object at (?P<address>.*?)'
     r'\[(?P<extras>'
-    r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?'
+    r'(set|unset|locked|unlocked)(, value:\d)?(, waiters:\d+)?'
     r')\]>\Z'
 )
 RGX_REPR = re.compile(STR_RGX_REPR)
@@ -760,7 +760,7 @@ def test_initial_value_zero(self):
 
     def test_repr(self):
         sem = asyncio.Semaphore(loop=self.loop)
-        self.assertTrue(repr(sem).endswith('[unlocked,value:1]>'))
+        self.assertTrue(repr(sem).endswith('[unlocked, value:1]>'))
         self.assertTrue(RGX_REPR.match(repr(sem)))
 
         self.loop.run_until_complete(sem.acquire())
diff --git a/Lib/test/test_asyncio/test_streams.py b/Lib/test/test_asyncio/test_streams.py
index 2f4e6d23bf9..1927d731281 100644
--- a/Lib/test/test_asyncio/test_streams.py
+++ b/Lib/test/test_asyncio/test_streams.py
@@ -806,7 +806,7 @@ def test___repr__(self):
 
     def test___repr__nondefault_limit(self):
         stream = asyncio.StreamReader(loop=self.loop, limit=123)
-        self.assertEqual("<StreamReader l=123>", repr(stream))
+        self.assertEqual("<StreamReader limit=123>", repr(stream))
 
     def test___repr__eof(self):
         stream = asyncio.StreamReader(loop=self.loop)
@@ -822,14 +822,15 @@ def test___repr__exception(self):
         stream = asyncio.StreamReader(loop=self.loop)
         exc = RuntimeError()
         stream.set_exception(exc)
-        self.assertEqual("<StreamReader e=RuntimeError()>", repr(stream))
+        self.assertEqual("<StreamReader exception=RuntimeError()>",
+                         repr(stream))
 
     def test___repr__waiter(self):
         stream = asyncio.StreamReader(loop=self.loop)
         stream._waiter = asyncio.Future(loop=self.loop)
         self.assertRegex(
             repr(stream),
-            r"<StreamReader w=<Future pending[\S ]*>>")
+            r"<StreamReader waiter=<Future pending[\S ]*>>")
         stream._waiter.set_result(None)
         self.loop.run_until_complete(stream._waiter)
         stream._waiter = None
@@ -840,7 +841,7 @@ def test___repr__transport(self):
         stream._transport = mock.Mock()
         stream._transport.__repr__ = mock.Mock()
         stream._transport.__repr__.return_value = "<Transport>"
-        self.assertEqual("<StreamReader t=<Transport>>", repr(stream))
+        self.assertEqual("<StreamReader transport=<Transport>>", repr(stream))
 
     def test_IncompleteReadError_pickleable(self):
         e = asyncio.IncompleteReadError(b'abc', 10)



More information about the Python-checkins mailing list