[pypy-commit] pypy py3.5: Add f-strings to PyPy 3.5. In CPython they are only there from 3.6, but

arigo pypy.commits at gmail.com
Tue Jan 24 11:32:31 EST 2017


Author: Armin Rigo <arigo at tunes.org>
Branch: py3.5
Changeset: r89749:a19390150925
Date: 2017-01-24 17:31 +0100
http://bitbucket.org/pypy/pypy/changeset/a19390150925/

Log:	Add f-strings to PyPy 3.5. In CPython they are only there from 3.6,
	but the idea is that if it is the only new feature that you really
	need to run your Python 3.x programs, then using PyPy 3.5 should
	work too.

	For people that, for some reason, think f-strings are a security
	issue and would like to disable them, translate with --no-objspace-
	fstrings.

diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py
--- a/lib-python/3/opcode.py
+++ b/lib-python/3/opcode.py
@@ -214,6 +214,9 @@
 def_op('BUILD_TUPLE_UNPACK', 152)
 def_op('BUILD_SET_UNPACK', 153)
 
+def_op('FORMAT_VALUE', 155)   # in CPython 3.6, but available in PyPy from 3.5
+def_op('BUILD_STRING', 157)   # in CPython 3.6, but available in PyPy from 3.5
+
 # pypy modification, experimental bytecode
 def_op('LOOKUP_METHOD', 201)          # Index in name list
 hasname.append(201)
diff --git a/lib-python/3/test/test_fstring.py b/lib-python/3/test/test_fstring.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3/test/test_fstring.py
@@ -0,0 +1,762 @@
+# This test file is from CPython 3.6.0
+
+import ast
+import types
+import decimal
+import unittest
+
+a_global = 'global variable'
+
+# You could argue that I'm too strict in looking for specific error
+#  values with assertRaisesRegex, but without it it's way too easy to
+#  make a syntax error in the test strings. Especially with all of the
+#  triple quotes, raw strings, backslashes, etc. I think it's a
+#  worthwhile tradeoff. When I switched to this method, I found many
+#  examples where I wasn't testing what I thought I was.
+
+class TestCase(unittest.TestCase):
+    def assertAllRaise(self, exception_type, regex, error_strings):
+        for str in error_strings:
+            with self.subTest(str=str):
+                with self.assertRaisesRegex(exception_type, regex):
+                    eval(str)
+
+    def test__format__lookup(self):
+        # Make sure __format__ is looked up on the type, not the instance.
+        class X:
+            def __format__(self, spec):
+                return 'class'
+
+        x = X()
+
+        # Add a bound __format__ method to the 'y' instance, but not
+        #  the 'x' instance.
+        y = X()
+        y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
+
+        self.assertEqual(f'{y}', format(y))
+        self.assertEqual(f'{y}', 'class')
+        self.assertEqual(format(x), format(y))
+
+        # __format__ is not called this way, but still make sure it
+        #  returns what we expect (so we can make sure we're bypassing
+        #  it).
+        self.assertEqual(x.__format__(''), 'class')
+        self.assertEqual(y.__format__(''), 'instance')
+
+        # This is how __format__ is actually called.
+        self.assertEqual(type(x).__format__(x, ''), 'class')
+        self.assertEqual(type(y).__format__(y, ''), 'class')
+
+    def test_ast(self):
+        # Inspired by http://bugs.python.org/issue24975
+        class X:
+            def __init__(self):
+                self.called = False
+            def __call__(self):
+                self.called = True
+                return 4
+        x = X()
+        expr = """
+a = 10
+f'{a * x()}'"""
+        t = ast.parse(expr)
+        c = compile(t, '', 'exec')
+
+        # Make sure x was not called.
+        self.assertFalse(x.called)
+
+        # Actually run the code.
+        exec(c)
+
+        # Make sure x was called.
+        self.assertTrue(x.called)
+
+    def test_literal_eval(self):
+        # With no expressions, an f-string is okay.
+        self.assertEqual(ast.literal_eval("f'x'"), 'x')
+        self.assertEqual(ast.literal_eval("f'x' 'y'"), 'xy')
+
+        # But this should raise an error.
+        with self.assertRaisesRegex(ValueError, 'malformed node or string'):
+            ast.literal_eval("f'x{3}'")
+
+        # As should this, which uses a different ast node
+        with self.assertRaisesRegex(ValueError, 'malformed node or string'):
+            ast.literal_eval("f'{3}'")
+
+    def test_ast_compile_time_concat(self):
+        x = ['']
+
+        expr = """x[0] = 'foo' f'{3}'"""
+        t = ast.parse(expr)
+        c = compile(t, '', 'exec')
+        exec(c)
+        self.assertEqual(x[0], 'foo3')
+
+    def test_compile_time_concat_errors(self):
+        self.assertAllRaise(SyntaxError,
+                            'cannot mix bytes and nonbytes literals',
+                            [r"""f'' b''""",
+                             r"""b'' f''""",
+                             ])
+
+    def test_literal(self):
+        self.assertEqual(f'', '')
+        self.assertEqual(f'a', 'a')
+        self.assertEqual(f' ', ' ')
+
+    def test_unterminated_string(self):
+        self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
+                            [r"""f'{"x'""",
+                             r"""f'{"x}'""",
+                             r"""f'{("x'""",
+                             r"""f'{("x}'""",
+                             ])
+
+    def test_mismatched_parens(self):
+        self.assertAllRaise(SyntaxError, 'f-string: mismatched',
+                            ["f'{((}'",
+                             ])
+
+    def test_double_braces(self):
+        self.assertEqual(f'{{', '{')
+        self.assertEqual(f'a{{', 'a{')
+        self.assertEqual(f'{{b', '{b')
+        self.assertEqual(f'a{{b', 'a{b')
+        self.assertEqual(f'}}', '}')
+        self.assertEqual(f'a}}', 'a}')
+        self.assertEqual(f'}}b', '}b')
+        self.assertEqual(f'a}}b', 'a}b')
+        self.assertEqual(f'{{}}', '{}')
+        self.assertEqual(f'a{{}}', 'a{}')
+        self.assertEqual(f'{{b}}', '{b}')
+        self.assertEqual(f'{{}}c', '{}c')
+        self.assertEqual(f'a{{b}}', 'a{b}')
+        self.assertEqual(f'a{{}}c', 'a{}c')
+        self.assertEqual(f'{{b}}c', '{b}c')
+        self.assertEqual(f'a{{b}}c', 'a{b}c')
+
+        self.assertEqual(f'{{{10}', '{10')
+        self.assertEqual(f'}}{10}', '}10')
+        self.assertEqual(f'}}{{{10}', '}{10')
+        self.assertEqual(f'}}a{{{10}', '}a{10')
+
+        self.assertEqual(f'{10}{{', '10{')
+        self.assertEqual(f'{10}}}', '10}')
+        self.assertEqual(f'{10}}}{{', '10}{')
+        self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
+
+        # Inside of strings, don't interpret doubled brackets.
+        self.assertEqual(f'{"{{}}"}', '{{}}')
+
+        self.assertAllRaise(TypeError, 'unhashable type',
+                            ["f'{ {{}} }'", # dict in a set
+                             ])
+
+    def test_compile_time_concat(self):
+        x = 'def'
+        self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
+        self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
+        self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
+        self.assertEqual('{x}' f'{x}', '{x}def')
+        self.assertEqual('{x' f'{x}', '{xdef')
+        self.assertEqual('{x}' f'{x}', '{x}def')
+        self.assertEqual('{{x}}' f'{x}', '{{x}}def')
+        self.assertEqual('{{x' f'{x}', '{{xdef')
+        self.assertEqual('x}}' f'{x}', 'x}}def')
+        self.assertEqual(f'{x}' 'x}}', 'defx}}')
+        self.assertEqual(f'{x}' '', 'def')
+        self.assertEqual('' f'{x}' '', 'def')
+        self.assertEqual('' f'{x}', 'def')
+        self.assertEqual(f'{x}' '2', 'def2')
+        self.assertEqual('1' f'{x}' '2', '1def2')
+        self.assertEqual('1' f'{x}', '1def')
+        self.assertEqual(f'{x}' f'-{x}', 'def-def')
+        self.assertEqual('' f'', '')
+        self.assertEqual('' f'' '', '')
+        self.assertEqual('' f'' '' f'', '')
+        self.assertEqual(f'', '')
+        self.assertEqual(f'' '', '')
+        self.assertEqual(f'' '' f'', '')
+        self.assertEqual(f'' '' f'' '', '')
+
+        self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
+                            ["f'{3' f'}'",  # can't concat to get a valid f-string
+                             ])
+
+    def test_comments(self):
+        # These aren't comments, since they're in strings.
+        d = {'#': 'hash'}
+        self.assertEqual(f'{"#"}', '#')
+        self.assertEqual(f'{d["#"]}', 'hash')
+
+        self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
+                            ["f'{1#}'",   # error because the expression becomes "(1#)"
+                             "f'{3(#)}'",
+                             "f'{#}'",
+                             "f'{)#}'",   # When wrapped in parens, this becomes
+                                          #  '()#)'.  Make sure that doesn't compile.
+                             ])
+
+    def test_many_expressions(self):
+        # Create a string with many expressions in it. Note that
+        #  because we have a space in here as a literal, we're actually
+        #  going to use twice as many ast nodes: one for each literal
+        #  plus one for each expression.
+        def build_fstr(n, extra=''):
+            return "f'" + ('{x} ' * n) + extra + "'"
+
+        x = 'X'
+        width = 1
+
+        # Test around 256.
+        for i in range(250, 260):
+            self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
+
+        # Test concatenating 2 largs fstrings.
+        self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
+
+        s = build_fstr(253, '{x:{width}} ')
+        self.assertEqual(eval(s), (x+' ')*254)
+
+        # Test lots of expressions and constants, concatenated.
+        s = "f'{1}' 'x' 'y'" * 1024
+        self.assertEqual(eval(s), '1xy' * 1024)
+
+    def test_format_specifier_expressions(self):
+        width = 10
+        precision = 4
+        value = decimal.Decimal('12.34567')
+        self.assertEqual(f'result: {value:{width}.{precision}}', 'result:      12.35')
+        self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result:      12.35')
+        self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result:      12.35')
+        self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result:      12.35')
+        self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result:      12.35')
+        self.assertEqual(f'{10:#{1}0x}', '       0xa')
+        self.assertEqual(f'{10:{"#"}1{0}{"x"}}', '       0xa')
+        self.assertEqual(f'{-10:-{"#"}1{0}x}', '      -0xa')
+        self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', '      -0xa')
+        self.assertEqual(f'{10:#{3 != {4:5} and width}x}', '       0xa')
+
+        self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
+                            ["""f'{"s"!r{":10"}}'""",
+
+                             # This looks like a nested format spec.
+                             ])
+
+        self.assertAllRaise(SyntaxError, "invalid syntax",
+                            [# Invalid syntax inside a nested spec.
+                             "f'{4:{/5}}'",
+                             ])
+
+        self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
+                            [# Can't nest format specifiers.
+                             "f'result: {value:{width:{0}}.{precision:1}}'",
+                             ])
+
+        self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
+                            [# No expansion inside conversion or for
+                             #  the : or ! itself.
+                             """f'{"s"!{"r"}}'""",
+                             ])
+
+    def test_side_effect_order(self):
+        class X:
+            def __init__(self):
+                self.i = 0
+            def __format__(self, spec):
+                self.i += 1
+                return str(self.i)
+
+        x = X()
+        self.assertEqual(f'{x} {x}', '1 2')
+
+    def test_missing_expression(self):
+        self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
+                            ["f'{}'",
+                             "f'{ }'"
+                             "f' {} '",
+                             "f'{!r}'",
+                             "f'{ !r}'",
+                             "f'{10:{ }}'",
+                             "f' { } '",
+
+                             # Catch the empty expression before the
+                             #  invalid conversion.
+                             "f'{!x}'",
+                             "f'{ !xr}'",
+                             "f'{!x:}'",
+                             "f'{!x:a}'",
+                             "f'{ !xr:}'",
+                             "f'{ !xr:a}'",
+
+                             "f'{!}'",
+                             "f'{:}'",
+
+                             # We find the empty expression before the
+                             #  missing closing brace.
+                             "f'{!'",
+                             "f'{!s:'",
+                             "f'{:'",
+                             "f'{:x'",
+                             ])
+
+    def test_parens_in_expressions(self):
+        self.assertEqual(f'{3,}', '(3,)')
+
+        # Add these because when an expression is evaluated, parens
+        #  are added around it. But we shouldn't go from an invalid
+        #  expression to a valid one. The added parens are just
+        #  supposed to allow whitespace (including newlines).
+        self.assertAllRaise(SyntaxError, 'invalid syntax',
+                            ["f'{,}'",
+                             "f'{,}'",  # this is (,), which is an error
+                             ])
+
+        self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
+                            ["f'{3)+(4}'",
+                             ])
+
+        self.assertAllRaise(SyntaxError, 'EOL while scanning string literal',
+                            ["f'{\n}'",
+                             ])
+
+    def test_backslashes_in_string_part(self):
+        self.assertEqual(f'\t', '\t')
+        self.assertEqual(r'\t', '\\t')
+        self.assertEqual(rf'\t', '\\t')
+        self.assertEqual(f'{2}\t', '2\t')
+        self.assertEqual(f'{2}\t{3}', '2\t3')
+        self.assertEqual(f'\t{3}', '\t3')
+
+        self.assertEqual(f'\u0394', '\u0394')
+        self.assertEqual(r'\u0394', '\\u0394')
+        self.assertEqual(rf'\u0394', '\\u0394')
+        self.assertEqual(f'{2}\u0394', '2\u0394')
+        self.assertEqual(f'{2}\u0394{3}', '2\u03943')
+        self.assertEqual(f'\u0394{3}', '\u03943')
+
+        self.assertEqual(f'\U00000394', '\u0394')
+        self.assertEqual(r'\U00000394', '\\U00000394')
+        self.assertEqual(rf'\U00000394', '\\U00000394')
+        self.assertEqual(f'{2}\U00000394', '2\u0394')
+        self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
+        self.assertEqual(f'\U00000394{3}', '\u03943')
+
+        self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
+        self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
+        self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
+        self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
+        self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
+        self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
+        self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
+
+        self.assertEqual(f'\x20', ' ')
+        self.assertEqual(r'\x20', '\\x20')
+        self.assertEqual(rf'\x20', '\\x20')
+        self.assertEqual(f'{2}\x20', '2 ')
+        self.assertEqual(f'{2}\x20{3}', '2 3')
+        self.assertEqual(f'\x20{3}', ' 3')
+
+        self.assertEqual(f'2\x20', '2 ')
+        self.assertEqual(f'2\x203', '2 3')
+        self.assertEqual(f'\x203', ' 3')
+
+    def test_misformed_unicode_character_name(self):
+        # These test are needed because unicode names are parsed
+        # differently inside f-strings.
+        self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
+                            [r"f'\N'",
+                             r"f'\N{'",
+                             r"f'\N{GREEK CAPITAL LETTER DELTA'",
+
+                             # Here are the non-f-string versions,
+                             #  which should give the same errors.
+                             r"'\N'",
+                             r"'\N{'",
+                             r"'\N{GREEK CAPITAL LETTER DELTA'",
+                             ])
+
+    def test_no_backslashes_in_expression_part(self):
+        self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
+                            [r"f'{\'a\'}'",
+                             r"f'{\t3}'",
+                             r"f'{\}'",
+                             r"rf'{\'a\'}'",
+                             r"rf'{\t3}'",
+                             r"rf'{\}'",
+                             r"""rf'{"\N{LEFT CURLY BRACKET}"}'""",
+                             r"f'{\n}'",
+                             ])
+
+    def test_no_escapes_for_braces(self):
+        """
+        Only literal curly braces begin an expression.
+        """
+        # \x7b is '{'.
+        self.assertEqual(f'\x7b1+1}}', '{1+1}')
+        self.assertEqual(f'\x7b1+1', '{1+1')
+        self.assertEqual(f'\u007b1+1', '{1+1')
+        self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
+
+    def test_newlines_in_expressions(self):
+        self.assertEqual(f'{0}', '0')
+        self.assertEqual(rf'''{3+
+4}''', '7')
+
+    def test_lambda(self):
+        x = 5
+        self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'")
+        self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888'   ")
+        self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888     ")
+
+        # lambda doesn't work without parens, because the colon
+        #  makes the parser think it's a format_spec
+        self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
+                            ["f'{lambda x:x}'",
+                             ])
+
+    def test_yield(self):
+        # Not terribly useful, but make sure the yield turns
+        #  a function into a generator
+        def fn(y):
+            f'y:{yield y*2}'
+
+        g = fn(4)
+        self.assertEqual(next(g), 8)
+
+    def test_yield_send(self):
+        def fn(x):
+            yield f'x:{yield (lambda i: x * i)}'
+
+        g = fn(10)
+        the_lambda = next(g)
+        self.assertEqual(the_lambda(4), 40)
+        self.assertEqual(g.send('string'), 'x:string')
+
+    def test_expressions_with_triple_quoted_strings(self):
+        self.assertEqual(f"{'''x'''}", 'x')
+        self.assertEqual(f"{'''eric's'''}", "eric's")
+
+        # Test concatenation within an expression
+        self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy')
+        self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s')
+        self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy')
+        self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy')
+        self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy')
+        self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy')
+
+    def test_multiple_vars(self):
+        x = 98
+        y = 'abc'
+        self.assertEqual(f'{x}{y}', '98abc')
+
+        self.assertEqual(f'X{x}{y}', 'X98abc')
+        self.assertEqual(f'{x}X{y}', '98Xabc')
+        self.assertEqual(f'{x}{y}X', '98abcX')
+
+        self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
+        self.assertEqual(f'X{x}{y}Y', 'X98abcY')
+        self.assertEqual(f'{x}X{y}Y', '98XabcY')
+
+        self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
+
+    def test_closure(self):
+        def outer(x):
+            def inner():
+                return f'x:{x}'
+            return inner
+
+        self.assertEqual(outer('987')(), 'x:987')
+        self.assertEqual(outer(7)(), 'x:7')
+
+    def test_arguments(self):
+        y = 2
+        def f(x, width):
+            return f'x={x*y:{width}}'
+
+        self.assertEqual(f('foo', 10), 'x=foofoo    ')
+        x = 'bar'
+        self.assertEqual(f(10, 10), 'x=        20')
+
+    def test_locals(self):
+        value = 123
+        self.assertEqual(f'v:{value}', 'v:123')
+
+    def test_missing_variable(self):
+        with self.assertRaises(NameError):
+            f'v:{value}'
+
+    def test_missing_format_spec(self):
+        class O:
+            def __format__(self, spec):
+                if not spec:
+                    return '*'
+                return spec
+
+        self.assertEqual(f'{O():x}', 'x')
+        self.assertEqual(f'{O()}', '*')
+        self.assertEqual(f'{O():}', '*')
+
+        self.assertEqual(f'{3:}', '3')
+        self.assertEqual(f'{3!s:}', '3')
+
+    def test_global(self):
+        self.assertEqual(f'g:{a_global}', 'g:global variable')
+        self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
+
+        a_local = 'local variable'
+        self.assertEqual(f'g:{a_global} l:{a_local}',
+                         'g:global variable l:local variable')
+        self.assertEqual(f'g:{a_global!r}',
+                         "g:'global variable'")
+        self.assertEqual(f'g:{a_global} l:{a_local!r}',
+                         "g:global variable l:'local variable'")
+
+        self.assertIn("module 'unittest' from", f'{unittest}')
+
+    def test_shadowed_global(self):
+        a_global = 'really a local'
+        self.assertEqual(f'g:{a_global}', 'g:really a local')
+        self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
+
+        a_local = 'local variable'
+        self.assertEqual(f'g:{a_global} l:{a_local}',
+                         'g:really a local l:local variable')
+        self.assertEqual(f'g:{a_global!r}',
+                         "g:'really a local'")
+        self.assertEqual(f'g:{a_global} l:{a_local!r}',
+                         "g:really a local l:'local variable'")
+
+    def test_call(self):
+        def foo(x):
+            return 'x=' + str(x)
+
+        self.assertEqual(f'{foo(10)}', 'x=10')
+
+    def test_nested_fstrings(self):
+        y = 5
+        self.assertEqual(f'{f"{0}"*3}', '000')
+        self.assertEqual(f'{f"{y}"*3}', '555')
+
+    def test_invalid_string_prefixes(self):
+        self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
+                            ["fu''",
+                             "uf''",
+                             "Fu''",
+                             "fU''",
+                             "Uf''",
+                             "uF''",
+                             "ufr''",
+                             "urf''",
+                             "fur''",
+                             "fru''",
+                             "rfu''",
+                             "ruf''",
+                             "FUR''",
+                             "Fur''",
+                             "fb''",
+                             "fB''",
+                             "Fb''",
+                             "FB''",
+                             "bf''",
+                             "bF''",
+                             "Bf''",
+                             "BF''",
+                             ])
+
+    def test_leading_trailing_spaces(self):
+        self.assertEqual(f'{ 3}', '3')
+        self.assertEqual(f'{  3}', '3')
+        self.assertEqual(f'{3 }', '3')
+        self.assertEqual(f'{3  }', '3')
+
+        self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
+                         'expr={1: 2}')
+        self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
+                         'expr={1: 2}')
+
+    def test_not_equal(self):
+        # There's a special test for this because there's a special
+        #  case in the f-string parser to look for != as not ending an
+        #  expression. Normally it would, while looking for !s or !r.
+
+        self.assertEqual(f'{3!=4}', 'True')
+        self.assertEqual(f'{3!=4:}', 'True')
+        self.assertEqual(f'{3!=4!s}', 'True')
+        self.assertEqual(f'{3!=4!s:.3}', 'Tru')
+
+    def test_conversions(self):
+        self.assertEqual(f'{3.14:10.10}', '      3.14')
+        self.assertEqual(f'{3.14!s:10.10}', '3.14      ')
+        self.assertEqual(f'{3.14!r:10.10}', '3.14      ')
+        self.assertEqual(f'{3.14!a:10.10}', '3.14      ')
+
+        self.assertEqual(f'{"a"}', 'a')
+        self.assertEqual(f'{"a"!r}', "'a'")
+        self.assertEqual(f'{"a"!a}', "'a'")
+
+        # Not a conversion.
+        self.assertEqual(f'{"a!r"}', "a!r")
+
+        # Not a conversion, but show that ! is allowed in a format spec.
+        self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
+
+        self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
+                            ["f'{3!g}'",
+                             "f'{3!A}'",
+                             "f'{3!3}'",
+                             "f'{3!G}'",
+                             "f'{3!!}'",
+                             "f'{3!:}'",
+                             "f'{3! s}'",  # no space before conversion char
+                             ])
+
+        self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
+                            ["f'{x!s{y}}'",
+                             "f'{3!ss}'",
+                             "f'{3!ss:}'",
+                             "f'{3!ss:s}'",
+                             ])
+
+    def test_assignment(self):
+        self.assertAllRaise(SyntaxError, 'invalid syntax',
+                            ["f'' = 3",
+                             "f'{0}' = x",
+                             "f'{x}' = x",
+                             ])
+
+    def test_del(self):
+        self.assertAllRaise(SyntaxError, 'invalid syntax',
+                            ["del f''",
+                             "del '' f''",
+                             ])
+
+    def test_mismatched_braces(self):
+        self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
+                            ["f'{{}'",
+                             "f'{{}}}'",
+                             "f'}'",
+                             "f'x}'",
+                             "f'x}x'",
+                             r"f'\u007b}'",
+
+                             # Can't have { or } in a format spec.
+                             "f'{3:}>10}'",
+                             "f'{3:}}>10}'",
+                             ])
+
+        self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
+                            ["f'{3:{{>10}'",
+                             "f'{3'",
+                             "f'{3!'",
+                             "f'{3:'",
+                             "f'{3!s'",
+                             "f'{3!s:'",
+                             "f'{3!s:3'",
+                             "f'x{'",
+                             "f'x{x'",
+                             "f'{x'",
+                             "f'{3:s'",
+                             "f'{{{'",
+                             "f'{{}}{'",
+                             "f'{'",
+                             ])
+
+        # But these are just normal strings.
+        self.assertEqual(f'{"{"}', '{')
+        self.assertEqual(f'{"}"}', '}')
+        self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3')
+        self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2')
+
+    def test_if_conditional(self):
+        # There's special logic in compile.c to test if the
+        #  conditional for an if (and while) are constants. Exercise
+        #  that code.
+
+        def test_fstring(x, expected):
+            flag = 0
+            if f'{x}':
+                flag = 1
+            else:
+                flag = 2
+            self.assertEqual(flag, expected)
+
+        def test_concat_empty(x, expected):
+            flag = 0
+            if '' f'{x}':
+                flag = 1
+            else:
+                flag = 2
+            self.assertEqual(flag, expected)
+
+        def test_concat_non_empty(x, expected):
+            flag = 0
+            if ' ' f'{x}':
+                flag = 1
+            else:
+                flag = 2
+            self.assertEqual(flag, expected)
+
+        test_fstring('', 2)
+        test_fstring(' ', 1)
+
+        test_concat_empty('', 2)
+        test_concat_empty(' ', 1)
+
+        test_concat_non_empty('', 1)
+        test_concat_non_empty(' ', 1)
+
+    def test_empty_format_specifier(self):
+        x = 'test'
+        self.assertEqual(f'{x}', 'test')
+        self.assertEqual(f'{x:}', 'test')
+        self.assertEqual(f'{x!s:}', 'test')
+        self.assertEqual(f'{x!r:}', "'test'")
+
+    def test_str_format_differences(self):
+        d = {'a': 'string',
+             0: 'integer',
+             }
+        a = 0
+        self.assertEqual(f'{d[0]}', 'integer')
+        self.assertEqual(f'{d["a"]}', 'string')
+        self.assertEqual(f'{d[a]}', 'integer')
+        self.assertEqual('{d[a]}'.format(d=d), 'string')
+        self.assertEqual('{d[0]}'.format(d=d), 'integer')
+
+    def test_invalid_expressions(self):
+        self.assertAllRaise(SyntaxError, 'invalid syntax',
+                            [r"f'{a[4)}'",
+                             r"f'{a(4]}'",
+                            ])
+
+    def test_errors(self):
+        # see issue 26287
+        self.assertAllRaise(TypeError, 'unsupported',
+                            [r"f'{(lambda: 0):x}'",
+                             r"f'{(0,):x}'",
+                             ])
+        self.assertAllRaise(ValueError, 'Unknown format code',
+                            [r"f'{1000:j}'",
+                             r"f'{1000:j}'",
+                            ])
+
+    def test_loop(self):
+        for i in range(1000):
+            self.assertEqual(f'i:{i}', 'i:' + str(i))
+
+    def test_dict(self):
+        d = {'"': 'dquote',
+             "'": 'squote',
+             'foo': 'bar',
+             }
+        self.assertEqual(f'''{d["'"]}''', 'squote')
+        self.assertEqual(f"""{d['"']}""", 'dquote')
+
+        self.assertEqual(f'{d["foo"]}', 'bar')
+        self.assertEqual(f"{d['foo']}", 'bar')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -196,6 +196,11 @@
                default=False,
                requires=[("objspace.usemodules.cpyext", False)]),
 
+    BoolOption("fstrings",
+               "if you are really convinced that f-strings are a security "
+               "issue, you can disable them here",
+               default=True),
+
     OptionDescription("std", "Standard Object Space Options", [
         BoolOption("withtproxy", "support transparent proxies",
                    default=True),
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -758,6 +758,14 @@
 def _compute_CALL_METHOD(arg):
     return -_num_args(arg) - 1
 
+def _compute_FORMAT_VALUE(arg):
+    if (arg & consts.FVS_MASK) == consts.FVS_HAVE_SPEC:
+        return -1
+    return 0
+
+def _compute_BUILD_STRING(arg):
+    return 1 - arg
+
 
 _stack_effect_computers = {}
 for name, func in globals().items():
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -1733,6 +1733,10 @@
             return Num.from_object(space, w_node)
         if space.isinstance_w(w_node, get(space).w_Str):
             return Str.from_object(space, w_node)
+        if space.isinstance_w(w_node, get(space).w_FormattedValue):
+            return FormattedValue.from_object(space, w_node)
+        if space.isinstance_w(w_node, get(space).w_JoinedStr):
+            return JoinedStr.from_object(space, w_node)
         if space.isinstance_w(w_node, get(space).w_Bytes):
             return Bytes.from_object(space, w_node)
         if space.isinstance_w(w_node, get(space).w_NameConstant):
@@ -2639,6 +2643,100 @@
 State.ast_type('Str', 'expr', ['s'])
 
 
+class FormattedValue(expr):
+
+    def __init__(self, value, conversion, format_spec, lineno, col_offset):
+        self.value = value
+        self.conversion = conversion
+        self.format_spec = format_spec
+        expr.__init__(self, lineno, col_offset)
+
+    def walkabout(self, visitor):
+        visitor.visit_FormattedValue(self)
+
+    def mutate_over(self, visitor):
+        self.value = self.value.mutate_over(visitor)
+        if self.format_spec:
+            self.format_spec = self.format_spec.mutate_over(visitor)
+        return visitor.visit_FormattedValue(self)
+
+    def to_object(self, space):
+        w_node = space.call_function(get(space).w_FormattedValue)
+        w_value = self.value.to_object(space)  # expr
+        space.setattr(w_node, space.wrap('value'), w_value)
+        w_conversion = space.wrap(self.conversion)  # int
+        space.setattr(w_node, space.wrap('conversion'), w_conversion)
+        w_format_spec = self.format_spec.to_object(space) if self.format_spec is not None else space.w_None  # expr
+        space.setattr(w_node, space.wrap('format_spec'), w_format_spec)
+        w_lineno = space.wrap(self.lineno)  # int
+        space.setattr(w_node, space.wrap('lineno'), w_lineno)
+        w_col_offset = space.wrap(self.col_offset)  # int
+        space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+        return w_node
+
+    @staticmethod
+    def from_object(space, w_node):
+        w_value = get_field(space, w_node, 'value', False)
+        w_conversion = get_field(space, w_node, 'conversion', True)
+        w_format_spec = get_field(space, w_node, 'format_spec', True)
+        w_lineno = get_field(space, w_node, 'lineno', False)
+        w_col_offset = get_field(space, w_node, 'col_offset', False)
+        _value = expr.from_object(space, w_value)
+        if _value is None:
+            raise_required_value(space, w_node, 'value')
+        _conversion = space.int_w(w_conversion)
+        _format_spec = expr.from_object(space, w_format_spec)
+        _lineno = space.int_w(w_lineno)
+        _col_offset = space.int_w(w_col_offset)
+        return FormattedValue(_value, _conversion, _format_spec, _lineno, _col_offset)
+
+State.ast_type('FormattedValue', 'expr', ['value', 'conversion', 'format_spec'])
+
+
+class JoinedStr(expr):
+
+    def __init__(self, values, lineno, col_offset):
+        self.values = values
+        expr.__init__(self, lineno, col_offset)
+
+    def walkabout(self, visitor):
+        visitor.visit_JoinedStr(self)
+
+    def mutate_over(self, visitor):
+        if self.values:
+            for i in range(len(self.values)):
+                if self.values[i] is not None:
+                    self.values[i] = self.values[i].mutate_over(visitor)
+        return visitor.visit_JoinedStr(self)
+
+    def to_object(self, space):
+        w_node = space.call_function(get(space).w_JoinedStr)
+        if self.values is None:
+            values_w = []
+        else:
+            values_w = [node.to_object(space) for node in self.values] # expr
+        w_values = space.newlist(values_w)
+        space.setattr(w_node, space.wrap('values'), w_values)
+        w_lineno = space.wrap(self.lineno)  # int
+        space.setattr(w_node, space.wrap('lineno'), w_lineno)
+        w_col_offset = space.wrap(self.col_offset)  # int
+        space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+        return w_node
+
+    @staticmethod
+    def from_object(space, w_node):
+        w_values = get_field(space, w_node, 'values', False)
+        w_lineno = get_field(space, w_node, 'lineno', False)
+        w_col_offset = get_field(space, w_node, 'col_offset', False)
+        values_w = space.unpackiterable(w_values)
+        _values = [expr.from_object(space, w_item) for w_item in values_w]
+        _lineno = space.int_w(w_lineno)
+        _col_offset = space.int_w(w_col_offset)
+        return JoinedStr(_values, _lineno, _col_offset)
+
+State.ast_type('JoinedStr', 'expr', ['values'])
+
+
 class Bytes(expr):
 
     def __init__(self, s, lineno, col_offset):
@@ -4022,6 +4120,10 @@
         return self.default_visitor(node)
     def visit_Str(self, node):
         return self.default_visitor(node)
+    def visit_FormattedValue(self, node):
+        return self.default_visitor(node)
+    def visit_JoinedStr(self, node):
+        return self.default_visitor(node)
     def visit_Bytes(self, node):
         return self.default_visitor(node)
     def visit_NameConstant(self, node):
@@ -4251,6 +4353,14 @@
     def visit_Str(self, node):
         pass
 
+    def visit_FormattedValue(self, node):
+        node.value.walkabout(self)
+        if node.format_spec:
+            node.format_spec.walkabout(self)
+
+    def visit_JoinedStr(self, node):
+        self.visit_sequence(node.values)
+
     def visit_Bytes(self, node):
         pass
 
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -1,15 +1,15 @@
 from pypy.interpreter.astcompiler import ast, consts, misc
 from pypy.interpreter.astcompiler import asthelpers # Side effects
+from pypy.interpreter.astcompiler import fstring
 from pypy.interpreter import error
 from pypy.interpreter.pyparser.pygram import syms, tokens
 from pypy.interpreter.pyparser.error import SyntaxError
-from pypy.interpreter.pyparser import parsestring
 from rpython.rlib.objectmodel import always_inline, we_are_translated
 
 
-def ast_from_node(space, node, compile_info):
+def ast_from_node(space, node, compile_info, recursive_parser=None):
     """Turn a parse tree, node, to AST."""
-    ast = ASTBuilder(space, node, compile_info).build_ast()
+    ast = ASTBuilder(space, node, compile_info, recursive_parser).build_ast()
     #
     # When we are not translated, we send this ast to validate_ast.
     # The goal is to check that validate_ast doesn't crash on valid
@@ -54,10 +54,11 @@
 
 class ASTBuilder(object):
 
-    def __init__(self, space, n, compile_info):
+    def __init__(self, space, n, compile_info, recursive_parser=None):
         self.space = space
         self.compile_info = compile_info
         self.root_node = n
+        self.recursive_parser = recursive_parser
 
     def build_ast(self):
         """Convert an top level parse tree node into an AST mod."""
@@ -1189,7 +1190,7 @@
             value = self.handle_expr(node.get_child(i+2))
             i += 3
         return (i,key,value)
-    
+
     def handle_atom(self, atom_node):
         first_child = atom_node.get_child(0)
         first_child_type = first_child.type
@@ -1207,39 +1208,10 @@
                                 first_child.get_column())
             return ast.NameConstant(w_singleton, first_child.get_lineno(),
                                 first_child.get_column())
+        #
         elif first_child_type == tokens.STRING:
-            space = self.space
-            encoding = self.compile_info.encoding
-            try:
-                sub_strings_w = [
-                    parsestring.parsestr(
-                            space, encoding, atom_node.get_child(i).get_value())
-                        for i in range(atom_node.num_children())]
-            except error.OperationError as e:
-                if e.match(space, space.w_UnicodeError):
-                    kind = 'unicode error'
-                elif e.match(space, space.w_ValueError):
-                    kind = 'value error'
-                else:
-                    raise
-                # Unicode/ValueError in literal: turn into SyntaxError
-                e.normalize_exception(space)
-                errmsg = space.str_w(space.str(e.get_w_value(space)))
-                raise self.error('(%s) %s' % (kind, errmsg), atom_node)
-            # Implement implicit string concatenation.
-            w_string = sub_strings_w[0]
-            for i in range(1, len(sub_strings_w)):
-                try:
-                    w_string = space.add(w_string, sub_strings_w[i])
-                except error.OperationError as e:
-                    if not e.match(space, space.w_TypeError):
-                        raise
-                    self.error("cannot mix bytes and nonbytes literals",
-                              atom_node)
-                # UnicodeError in literal: turn into SyntaxError
-            strdata = space.isinstance_w(w_string, space.w_unicode)
-            node = ast.Str if strdata else ast.Bytes
-            return node(w_string, atom_node.get_lineno(), atom_node.get_column())
+            return fstring.string_parse_literal(self, atom_node)
+        #
         elif first_child_type == tokens.NUMBER:
             num_value = self.parse_number(first_child.get_value())
             return ast.Num(num_value, atom_node.get_lineno(), atom_node.get_column())
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -1502,6 +1502,23 @@
             sub.value.walkabout(self)
         self._compile_slice(sub.slice, sub.ctx)
 
+    def visit_JoinedStr(self, joinedstr):
+        self.update_position(joinedstr.lineno)
+        for node in joinedstr.values:
+            node.walkabout(self)
+        self.emit_op_arg(ops.BUILD_STRING, len(joinedstr.values))
+
+    def visit_FormattedValue(self, fmt):
+        fmt.value.walkabout(self)
+        arg = 0
+        if fmt.conversion == ord('s'): arg = consts.FVC_STR
+        if fmt.conversion == ord('r'): arg = consts.FVC_REPR
+        if fmt.conversion == ord('a'): arg = consts.FVC_ASCII
+        if fmt.format_spec is not None:
+            arg |= consts.FVS_HAVE_SPEC
+            fmt.format_spec.walkabout(self)
+        self.emit_op_arg(ops.FORMAT_VALUE, arg)
+
 
 class TopLevelCodeGenerator(PythonCodeGenerator):
 
diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py
--- a/pypy/interpreter/astcompiler/consts.py
+++ b/pypy/interpreter/astcompiler/consts.py
@@ -33,3 +33,12 @@
 PyCF_IGNORE_COOKIE = 0x0800
 PyCF_ACCEPT_NULL_BYTES = 0x10000000   # PyPy only, for compile()
 PyCF_FOUND_ENCODING = 0x20000000      # PyPy only, for pytokenizer
+
+# Masks and values used by FORMAT_VALUE opcode
+FVC_MASK      = 0x3
+FVC_NONE      = 0x0
+FVC_STR       = 0x1
+FVC_REPR      = 0x2
+FVC_ASCII     = 0x3
+FVS_MASK      = 0x4
+FVS_HAVE_SPEC = 0x4
diff --git a/pypy/interpreter/astcompiler/fstring.py b/pypy/interpreter/astcompiler/fstring.py
new file mode 100644
--- /dev/null
+++ b/pypy/interpreter/astcompiler/fstring.py
@@ -0,0 +1,377 @@
+from pypy.interpreter.astcompiler import ast, consts
+from pypy.interpreter.pyparser import parsestring
+from pypy.interpreter import error
+from pypy.interpreter import unicodehelper
+from rpython.rlib.rstring import UnicodeBuilder
+
+
+def add_constant_string(astbuilder, joined_pieces, w_string, atom_node):
+    space = astbuilder.space
+    is_unicode = space.isinstance_w(w_string, space.w_unicode)
+    # Implement implicit string concatenation.
+    if joined_pieces:
+        prev = joined_pieces[-1]
+        if is_unicode and isinstance(prev, ast.Str):
+            w_string = space.add(prev.s, w_string)
+            del joined_pieces[-1]
+        elif not is_unicode and isinstance(prev, ast.Bytes):
+            w_string = space.add(prev.s, w_string)
+            del joined_pieces[-1]
+    node = ast.Str if is_unicode else ast.Bytes
+    joined_pieces.append(node(w_string, atom_node.get_lineno(),
+                                        atom_node.get_column()))
+
+def f_constant_string(astbuilder, joined_pieces, u, atom_node):
+    space = astbuilder.space
+    add_constant_string(astbuilder, joined_pieces, space.newunicode(u),
+                        atom_node)
+
+def f_string_compile(astbuilder, source, atom_node):
+    # Note: a f-string is kept as a single literal up to here.
+    # At this point only, we recursively call the AST compiler
+    # on all the '{expr}' parts.  The 'expr' part is not parsed
+    # or even tokenized together with the rest of the source code!
+    from pypy.interpreter.pyparser import pyparse
+    from pypy.interpreter.astcompiler.astbuilder import ast_from_node
+
+    # complain if 'source' is only whitespace or an empty string
+    for c in source:
+        if c not in ' \t\n\r\v\f':
+            break
+    else:
+        astbuilder.error("f-string: empty expression not allowed", atom_node)
+
+    if astbuilder.recursive_parser is None:
+        astbuilder.error("internal error: parser not available for parsing "
+                   "the expressions inside the f-string", atom_node)
+    source = '(%s)' % source.encode('utf-8')
+
+    info = pyparse.CompileInfo("<fstring>", "eval",
+                               consts.PyCF_SOURCE_IS_UTF8 |
+                               consts.PyCF_IGNORE_COOKIE,
+                               optimize=astbuilder.compile_info.optimize)
+    parser = astbuilder.recursive_parser
+    parse_tree = parser.parse_source(source, info)
+    return ast_from_node(astbuilder.space, parse_tree, info,
+                         recursive_parser=parser)
+
+
+def unexpected_end_of_string(astbuilder, atom_node):
+    astbuilder.error("f-string: expecting '}'", atom_node)
+
+
+def fstring_find_expr(astbuilder, fstr, atom_node, rec):
+    # Parse the f-string at fstr.current_index.  We know it starts an
+    # expression (so it must be at '{'). Returns the FormattedValue node,
+    # which includes the expression, conversion character, and
+    # format_spec expression.
+    conversion = -1      # the conversion char.  -1 if not specified.
+    format_spec = None
+
+    # 0 if we're not in a string, else the quote char we're trying to
+    # match (single or double quote).
+    quote_char = 0
+
+    # If we're inside a string, 1=normal, 3=triple-quoted.
+    string_type = 0
+
+    # Keep track of nesting level for braces/parens/brackets in
+    # expressions.
+    nested_depth = 0
+
+    # Can only nest one level deep.
+    if rec >= 2:
+        astbuilder.error("f-string: expressions nested too deeply", atom_node)
+
+    # The first char must be a left brace, or we wouldn't have gotten
+    # here. Skip over it.
+    u = fstr.unparsed
+    i = fstr.current_index
+    assert u[i] == u'{'
+    i += 1
+
+    expr_start = i
+    while i < len(u):
+
+        # Loop invariants.
+        assert nested_depth >= 0
+        if quote_char:
+            assert string_type == 1 or string_type == 3
+        else:
+            assert string_type == 0
+
+        ch = u[i]
+        # Nowhere inside an expression is a backslash allowed.
+        if ch == u'\\':
+            # Error: can't include a backslash character, inside
+            # parens or strings or not.
+            astbuilder.error("f-string expression part "
+                             "cannot include a backslash", atom_node)
+
+        if quote_char:
+            # We're inside a string. See if we're at the end.
+            # <a long comment goes here about how we're duplicating
+            # some existing logic>
+            if ord(ch) == quote_char:
+                # Does this match the string_type (single or triple
+                # quoted)?
+                if string_type == 3:
+                    if i + 2 < len(u) and u[i + 1] == u[i + 2] == ch:
+                        # We're at the end of a triple quoted string.
+                        i += 3
+                        string_type = 0
+                        quote_char = 0
+                        continue
+                else:
+                    # We're at the end of a normal string.
+                    i += 1
+                    string_type = 0
+                    quote_char = 0
+                    continue
+        elif ch == u"'" or ch == u'"':
+            # Is this a triple quoted string?
+            if i + 2 < len(u) and u[i + 1] == u[i + 2] == ch:
+                string_type = 3
+                i += 2
+            else:
+                # Start of a normal string.
+                string_type = 1
+            # Start looking for the end of the string.
+            quote_char = ord(ch)
+        elif ch in u"[{(":
+            nested_depth += 1
+        elif nested_depth != 0 and ch in u"]})":
+            nested_depth -= 1
+        elif ch == u'#':
+            # Error: can't include a comment character, inside parens
+            # or not.
+            astbuilder.error("f-string expression part cannot include '#'",
+                             atom_node)
+        elif nested_depth == 0 and ch in u"!:}":
+            # First, test for the special case of "!=". Since '=' is
+            # not an allowed conversion character, nothing is lost in
+            # this test.
+            if ch == '!' and i + 1 < len(u) and u[i+1] == u'=':
+                # This isn't a conversion character, just continue.
+                i += 1
+                continue
+            # Normal way out of this loop.
+            break
+        #else:
+        #   This isn't a conversion character, just continue.
+        i += 1
+
+    # If we leave this loop in a string or with mismatched parens, we
+    # don't care. We'll get a syntax error when compiling the
+    # expression. But, we can produce a better error message, so
+    # let's just do that.
+    if quote_char:
+        astbuilder.error("f-string: unterminated string", atom_node)
+
+    if nested_depth:
+        astbuilder.error("f-string: mismatched '(', '{' or '['", atom_node)
+
+    if i >= len(u):
+        unexpected_end_of_string(astbuilder, atom_node)
+
+    # Compile the expression as soon as possible, so we show errors
+    # related to the expression before errors related to the
+    # conversion or format_spec.
+    expr = f_string_compile(astbuilder, u[expr_start:i], atom_node)
+    assert isinstance(expr, ast.Expression)
+
+    # Check for a conversion char, if present.
+    if u[i] == u'!':
+        i += 1
+        if i >= len(u):
+            unexpected_end_of_string(astbuilder, atom_node)
+
+        conversion = ord(u[i])
+        i += 1
+        if conversion not in (ord('s'), ord('r'), ord('a')):
+            astbuilder.error("f-string: invalid conversion character: "
+                             "expected 's', 'r', or 'a'", atom_node)
+
+    # Check for the format spec, if present.
+    if i >= len(u):
+        unexpected_end_of_string(astbuilder, atom_node)
+    if u[i] == u':':
+        i += 1
+        if i >= len(u):
+            unexpected_end_of_string(astbuilder, atom_node)
+        fstr.current_index = i
+        subpieces = []
+        parse_f_string(astbuilder, subpieces, fstr, atom_node, rec + 1)
+        format_spec = f_string_to_ast_node(astbuilder, subpieces, atom_node)
+        i = fstr.current_index
+
+    if i >= len(u) or u[i] != u'}':
+        unexpected_end_of_string(astbuilder, atom_node)
+
+    # We're at a right brace. Consume it.
+    i += 1
+    fstr.current_index = i
+
+    # And now create the FormattedValue node that represents this
+    # entire expression with the conversion and format spec.
+    return ast.FormattedValue(expr.body, conversion, format_spec,
+                              atom_node.get_lineno(),
+                              atom_node.get_column())
+
+
+def fstring_find_literal(astbuilder, fstr, atom_node, rec):
+    # Return the next literal part.  Updates the current index inside 'fstr'.
+    # Differs from CPython: this version handles double-braces on its own.
+    u = fstr.unparsed
+    literal_start = fstr.current_index
+    in_named_escape = False
+
+    # Get any literal string. It ends when we hit an un-doubled left
+    # brace (which isn't part of a unicode name escape such as
+    # "\N{EULER CONSTANT}"), or the end of the string.
+    i = literal_start
+    builder = UnicodeBuilder()
+    while i < len(u):
+        ch = u[i]
+        if (not in_named_escape and ch == u'{' and i - literal_start >= 2
+                and u[i - 2] == u'\\' and u[i - 1] == u'N'):
+            in_named_escape = True
+        elif in_named_escape and ch == u'}':
+            in_named_escape = False
+        elif ch == u'{' or ch == u'}':
+            # Check for doubled braces, but only at the top level. If
+            # we checked at every level, then f'{0:{3}}' would fail
+            # with the two closing braces.
+            if rec == 0 and i + 1 < len(u) and u[i + 1] == ch:
+                i += 1   # skip over the second brace
+            elif rec == 0 and ch == u'}':
+                # Where a single '{' is the start of a new expression, a
+                # single '}' is not allowed.
+                astbuilder.error("f-string: single '}' is not allowed",
+                                 atom_node)
+            else:
+                # We're either at a '{', which means we're starting another
+                # expression; or a '}', which means we're at the end of this
+                # f-string (for a nested format_spec).
+                break
+        builder.append(ch)
+        i += 1
+
+    fstr.current_index = i
+    literal = builder.build()
+    if not fstr.raw_mode and u'\\' in literal:
+        # xxx messy
+        space = astbuilder.space
+        literal = literal.encode('utf-8')
+        literal = parsestring.decode_unicode_utf8(space, literal, 0,
+                                                  len(literal))
+        literal = unicodehelper.decode_unicode_escape(space, literal)
+    return literal
+
+
+def fstring_find_literal_and_expr(astbuilder, fstr, atom_node, rec):
+    # Return a tuple with the next literal part, and optionally the
+    # following expression node.  Updates the current index inside 'fstr'.
+    literal = fstring_find_literal(astbuilder, fstr, atom_node, rec)
+
+    u = fstr.unparsed
+    i = fstr.current_index
+    if i >= len(u) or u[i] == u'}':
+        # We're at the end of the string or the end of a nested
+        # f-string: no expression.
+        expr = None
+    else:
+        # We must now be the start of an expression, on a '{'.
+        assert u[i] == u'{'
+        expr = fstring_find_expr(astbuilder, fstr, atom_node, rec)
+    return literal, expr
+
+
+def parse_f_string(astbuilder, joined_pieces, fstr, atom_node, rec=0):
+    # In our case, parse_f_string() and fstring_find_literal_and_expr()
+    # could be merged into a single function with a clearer logic.  It's
+    # done this way to follow CPython's source code more closely.
+
+    space = astbuilder.space
+    if not space.config.objspace.fstrings:
+        raise oefmt(space.w_SystemError,
+                    "f-strings have been disabled in this version of pypy "
+                    "with the translation option --no-objspace-fstrings.  "
+                    "The PyPy team (and CPython) thinks f-strings don't "
+                    "add any security risks, but we leave it to you to "
+                    "convince whoever translated this pypy that it is "
+                    "really the case")
+
+    while True:
+        literal, expr = fstring_find_literal_and_expr(astbuilder, fstr,
+                                                      atom_node, rec)
+
+        # add the literal part
+        f_constant_string(astbuilder, joined_pieces, literal, atom_node)
+
+        if expr is None:
+            break         # We're done with this f-string.
+
+        joined_pieces.append(expr)
+
+    # If recurse_lvl is zero, then we must be at the end of the
+    # string. Otherwise, we must be at a right brace.
+    if rec == 0 and fstr.current_index < len(fstr.unparsed) - 1:
+        astbuilder.error("f-string: unexpected end of string", atom_node)
+
+    if rec != 0 and (fstr.current_index >= len(fstr.unparsed) or
+                     fstr.unparsed[fstr.current_index] != u'}'):
+        astbuilder.error("f-string: expecting '}'", atom_node)
+
+
+def f_string_to_ast_node(astbuilder, joined_pieces, atom_node):
+    # remove empty Strs
+    values = [node for node in joined_pieces
+                   if not (isinstance(node, ast.Str) and not node.s)]
+    if len(values) > 1:
+        return ast.JoinedStr(values, atom_node.get_lineno(),
+                                     atom_node.get_column())
+    elif len(values) == 1:
+        return values[0]
+    else:
+        assert len(joined_pieces) > 0    # they are all empty strings
+        return joined_pieces[0]
+
+
+def string_parse_literal(astbuilder, atom_node):
+    space = astbuilder.space
+    encoding = astbuilder.compile_info.encoding
+    joined_pieces = []
+    try:
+        for i in range(atom_node.num_children()):
+            w_next = parsestring.parsestr(
+                    space, encoding, atom_node.get_child(i).get_value())
+            if not isinstance(w_next, parsestring.W_FString):
+                add_constant_string(astbuilder, joined_pieces, w_next,
+                                    atom_node)
+            else:
+                parse_f_string(astbuilder, joined_pieces, w_next, atom_node)
+
+    except error.OperationError as e:
+        if e.match(space, space.w_UnicodeError):
+            kind = 'unicode error'
+        elif e.match(space, space.w_ValueError):
+            kind = 'value error'
+        else:
+            raise
+        # Unicode/ValueError in literal: turn into SyntaxError
+        e.normalize_exception(space)
+        errmsg = space.str_w(space.str(e.get_w_value(space)))
+        raise astbuilder.error('(%s) %s' % (kind, errmsg), atom_node)
+
+    if len(joined_pieces) == 1:   # <= the common path
+        return joined_pieces[0]   # ast.Str, Bytes or FormattedValue
+
+    # with more than one piece, it is a combination of Str and
+    # FormattedValue pieces---if there is a Bytes, then we got
+    # an invalid mixture of bytes and unicode literals
+    for node in joined_pieces:
+        if isinstance(node, ast.Bytes):
+            astbuilder.error("cannot mix bytes and nonbytes literals",
+                             atom_node)
+    return f_string_to_ast_node(astbuilder, joined_pieces, atom_node)
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -12,7 +12,7 @@
     p = pyparse.PythonParser(space)
     info = pyparse.CompileInfo("<test>", mode)
     cst = p.parse_source(expr, info)
-    ast = astbuilder.ast_from_node(space, cst, info)
+    ast = astbuilder.ast_from_node(space, cst, info, recursive_parser=p)
     return codegen.compile_ast(space, ast, info)
 
 def generate_function_code(expr, space):
@@ -20,7 +20,7 @@
     p = pyparse.PythonParser(space)
     info = pyparse.CompileInfo("<test>", 'exec')
     cst = p.parse_source(expr, info)
-    ast = astbuilder.ast_from_node(space, cst, info)
+    ast = astbuilder.ast_from_node(space, cst, info, recursive_parser=p)
     function_ast = optimize.optimize_ast(space, ast.body[0], info)
     function_ast = ast.body[0]
     assert isinstance(function_ast, FunctionDef)
@@ -1162,6 +1162,38 @@
         """
         yield self.st, source, "f()", 43
 
+    def test_fstring(self):
+        yield self.st, """x = 42; z = f'ab{x}cd'""", 'z', 'ab42cd'
+        yield self.st, """z = f'{{'""", 'z', '{'
+        yield self.st, """z = f'}}'""", 'z', '}'
+        yield self.st, """z = f'x{{y'""", 'z', 'x{y'
+        yield self.st, """z = f'x}}y'""", 'z', 'x}y'
+        yield self.st, """z = f'{{{4*10}}}'""", 'z', '{40}'
+        yield self.st, r"""z = fr'x={4*10}\n'""", 'z', 'x=40\\n'
+
+        yield self.st, """x = 'hi'; z = f'{x}'""", 'z', 'hi'
+        yield self.st, """x = 'hi'; z = f'{x!s}'""", 'z', 'hi'
+        yield self.st, """x = 'hi'; z = f'{x!r}'""", 'z', "'hi'"
+        yield self.st, """x = 'hi'; z = f'{x!a}'""", 'z', "'hi'"
+
+        yield self.st, """x = 'hi'; z = f'''{\nx}'''""", 'z', 'hi'
+
+        yield self.st, """x = 'hi'; z = f'{x:5}'""", 'z', 'hi   '
+        yield self.st, """x = 42;   z = f'{x:5}'""", 'z', '   42'
+        yield self.st, """x = 2; z = f'{5:{x:+1}0}'""", 'z', (' ' * 18 + '+5')
+
+        yield self.st, """z=f'{"}"}'""", 'z', '}'
+
+        yield self.st, """z=f'{f"{0}"*3}'""", 'z', '000'
+
+    def test_fstring_error(self):
+        raises(SyntaxError, self.run, "f'{}'")
+        raises(SyntaxError, self.run, "f'{   \t   }'")
+        raises(SyntaxError, self.run, "f'{5#}'")
+        raises(SyntaxError, self.run, "f'{5)#}'")
+        raises(SyntaxError, self.run, "f'''{5)\n#}'''")
+        raises(SyntaxError, self.run, "f'\\x'")
+
 
 class AppTestCompiler:
 
@@ -1384,3 +1416,9 @@
         code, blocks = generate_function_code(source, self.space)
         # there is a stack computation error
         assert blocks[0].instructions[3].arg == 0
+
+    def test_fstring(self):
+        source = """def f(x):
+            return f'ab{x}cd'
+        """
+        code, blocks = generate_function_code(source, self.space)
diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl
--- a/pypy/interpreter/astcompiler/tools/Python.asdl
+++ b/pypy/interpreter/astcompiler/tools/Python.asdl
@@ -70,6 +70,8 @@
          | Call(expr func, expr* args, keyword* keywords)
          | Num(object n) -- a number as a PyObject.
          | Str(string s) -- need to specify raw, unicode, etc?
+         | FormattedValue(expr value, int? conversion, expr? format_spec)
+         | JoinedStr(expr* values)
          | Bytes(bytes s)
          | NameConstant(singleton value)
          | Ellipsis
diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py
--- a/pypy/interpreter/astcompiler/validate.py
+++ b/pypy/interpreter/astcompiler/validate.py
@@ -448,3 +448,11 @@
             node.value is not space.w_True and
             node.value is not space.w_False):
             raise ValidationError("singleton must be True, False, or None")
+
+    def visit_JoinedStr(self, node):
+        self._validate_exprs(node.values)
+
+    def visit_FormattedValue(self, node):
+        self._validate_expr(node.value)
+        if node.format_spec:
+            self._validate_expr(node.format_spec)
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -150,7 +150,8 @@
         space = self.space
         try:
             parse_tree = self.parser.parse_source(source, info)
-            mod = astbuilder.ast_from_node(space, parse_tree, info)
+            mod = astbuilder.ast_from_node(space, parse_tree, info,
+                                           recursive_parser=self.parser)
         except parseerror.TabError as e:
             raise OperationError(space.w_TabError,
                                  e.wrap_info(space))
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -434,6 +434,10 @@
                 self.GET_AITER(oparg, next_instr)
             elif opcode == opcodedesc.GET_ANEXT.index:
                 self.GET_ANEXT(oparg, next_instr)
+            elif opcode == opcodedesc.FORMAT_VALUE.index:
+                self.FORMAT_VALUE(oparg, next_instr)
+            elif opcode == opcodedesc.BUILD_STRING.index:
+                self.BUILD_STRING(oparg, next_instr)
             else:
                 self.MISSING_OPCODE(oparg, next_instr)
 
@@ -1607,6 +1611,39 @@
                         "from __anext__: %T", w_next_iter)
         self.pushvalue(w_awaitable)
 
+    def FORMAT_VALUE(self, oparg, next_instr):
+        from pypy.interpreter.astcompiler import consts
+        space = self.space
+        #
+        if (oparg & consts.FVS_MASK) == consts.FVS_HAVE_SPEC:
+            w_spec = self.popvalue()
+        else:
+            w_spec = space.newunicode(u'')
+        w_value = self.popvalue()
+        #
+        conversion = oparg & consts.FVC_MASK
+        if conversion == consts.FVC_STR:
+            w_value = space.str(w_value)
+        elif conversion == consts.FVC_REPR:
+            w_value = space.repr(w_value)
+        elif conversion == consts.FVC_ASCII:
+            from pypy.objspace.std.unicodeobject import ascii_from_object
+            w_value = ascii_from_object(space, w_value)
+        #
+        w_res = space.format(w_value, w_spec)
+        self.pushvalue(w_res)
+
+    @jit.unroll_safe
+    def BUILD_STRING(self, itemcount, next_instr):
+        space = self.space
+        lst = []
+        for i in range(itemcount-1, -1, -1):
+            w_item = self.peekvalue(i)
+            lst.append(space.unicode_w(w_item))
+        self.dropvalues(itemcount)
+        w_res = space.newunicode(u''.join(lst))
+        self.pushvalue(w_res)
+
 ### ____________________________________________________________ ###
 
 class ExitFrame(Exception):
diff --git a/pypy/interpreter/pyparser/dfa_generated.py b/pypy/interpreter/pyparser/dfa_generated.py
--- a/pypy/interpreter/pyparser/dfa_generated.py
+++ b/pypy/interpreter/pyparser/dfa_generated.py
@@ -23,7 +23,7 @@
      '8': 6, '9': 6, ':': 15, ';': 15,
      '<': 10, '=': 14, '>': 9, '@': 14,
      'A': 1, 'B': 2, 'C': 1, 'D': 1,
-     'E': 1, 'F': 1, 'G': 1, 'H': 1,
+     'E': 1, 'F': 2, 'G': 1, 'H': 1,
      'I': 1, 'J': 1, 'K': 1, 'L': 1,
      'M': 1, 'N': 1, 'O': 1, 'P': 1,
      'Q': 1, 'R': 3, 'S': 1, 'T': 1,
@@ -31,7 +31,7 @@
      'Y': 1, 'Z': 1, '[': 15, '\\': 19,
      ']': 15, '^': 14, '_': 1, '`': 15,
      'a': 1, 'b': 2, 'c': 1, 'd': 1,
-     'e': 1, 'f': 1, 'g': 1, 'h': 1,
+     'e': 1, 'f': 2, 'g': 1, 'h': 1,
      'i': 1, 'j': 1, 'k': 1, 'l': 1,
      'm': 1, 'n': 1, 'o': 1, 'p': 1,
      'q': 1, 'r': 3, 's': 1, 't': 1,
@@ -78,14 +78,14 @@
      '2': 1, '3': 1, '4': 1, '5': 1,
      '6': 1, '7': 1, '8': 1, '9': 1,
      'A': 1, 'B': 4, 'C': 1, 'D': 1,
-     'E': 1, 'F': 1, 'G': 1, 'H': 1,
+     'E': 1, 'F': 4, 'G': 1, 'H': 1,
      'I': 1, 'J': 1, 'K': 1, 'L': 1,
      'M': 1, 'N': 1, 'O': 1, 'P': 1,
      'Q': 1, 'R': 1, 'S': 1, 'T': 1,
      'U': 1, 'V': 1, 'W': 1, 'X': 1,
      'Y': 1, 'Z': 1, '_': 1, 'a': 1,
      'b': 4, 'c': 1, 'd': 1, 'e': 1,
-     'f': 1, 'g': 1, 'h': 1, 'i': 1,
+     'f': 4, 'g': 1, 'h': 1, 'i': 1,
      'j': 1, 'k': 1, 'l': 1, 'm': 1,
      'n': 1, 'o': 1, 'p': 1, 'q': 1,
      'r': 1, 's': 1, 't': 1, 'u': 1,
diff --git a/pypy/interpreter/pyparser/gendfa.py b/pypy/interpreter/pyparser/gendfa.py
--- a/pypy/interpreter/pyparser/gendfa.py
+++ b/pypy/interpreter/pyparser/gendfa.py
@@ -152,9 +152,9 @@
         return group(states,
                      chain(states,
                            maybe(states, groupStr(states, "rR")),
-                           maybe(states, groupStr(states, "bB"))),
+                           maybe(states, groupStr(states, "bBfF"))),
                      chain(states,
-                           maybe(states, groupStr(states, "bB")),
+                           maybe(states, groupStr(states, "bBfF")),
                            maybe(states, groupStr(states, "rR"))),
                      maybe(states, groupStr(states, "uU")))
     # ____________________________________________________________
diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py
--- a/pypy/interpreter/pyparser/parsestring.py
+++ b/pypy/interpreter/pyparser/parsestring.py
@@ -1,11 +1,22 @@
 # coding: utf-8
+from pypy.interpreter.baseobjspace import W_Root
 from pypy.interpreter.error import OperationError, oefmt
 from pypy.interpreter import unicodehelper
 from rpython.rlib.rstring import StringBuilder
 
 
+class W_FString(W_Root):
+    def __init__(self, unparsed, raw_mode):
+        assert isinstance(unparsed, unicode)
+        self.unparsed = unparsed     # but the quotes are removed
+        self.raw_mode = raw_mode
+        self.current_index = 0       # for astcompiler.fstring
+
+
 def parsestr(space, encoding, s):
-    """Parses a string or unicode literal, and return a wrapped value.
+    """Parses a string or unicode literal, and return usually
+    a wrapped value.  If we get an f-string, then instead return
+    an unparsed but unquoted W_FString instance.
 
     If encoding=None, the source string is ascii only.
     In other cases, the source string is in utf-8 encoding.
@@ -23,6 +34,7 @@
     rawmode = False
     unicode_literal = True
     saw_u = False
+    saw_f = False
 
     # string decoration handling
     if quote == 'b' or quote == 'B':
@@ -37,6 +49,10 @@
         ps += 1
         quote = s[ps]
         rawmode = True
+    elif quote == 'f' or quote == 'F':
+        ps += 1
+        quote = s[ps]
+        saw_f = True
 
     if not saw_u:
         if quote == 'r' or quote == 'R':
@@ -47,6 +63,10 @@
             ps += 1
             quote = s[ps]
             unicode_literal = False
+        elif quote == 'f' or quote == 'F':
+            ps += 1
+            quote = s[ps]
+            saw_f = True
 
     if quote != "'" and quote != '"':
         raise_app_valueerror(space,
@@ -70,6 +90,9 @@
             substr = s[ps:q]
         else:
             substr = decode_unicode_utf8(space, s, ps, q)
+        if saw_f:
+            v = unicodehelper.decode_utf8(space, substr)
+            return W_FString(v, rawmode)
         v = unicodehelper.decode_unicode_escape(space, substr)
         return space.wrap(v)
 
@@ -88,6 +111,8 @@
             return space.newbytes(substr)
         else:
             v = unicodehelper.decode_utf8(space, substr)
+            if saw_f:
+                return W_FString(v, rawmode)
             return space.wrap(v)
 
     v = PyString_DecodeEscape(space, substr, 'strict', encoding)
diff --git a/pypy/interpreter/pyparser/pytokenize.py b/pypy/interpreter/pyparser/pytokenize.py
--- a/pypy/interpreter/pyparser/pytokenize.py
+++ b/pypy/interpreter/pyparser/pytokenize.py
@@ -27,10 +27,12 @@
            'R' : None,
            "u" : None,
            "U" : None,
+           'f' : None,
+           'F' : None,
            'b' : None,
            'B' : None}
 
-for uniPrefix in ("", "b", "B"):
+for uniPrefix in ("", "b", "B", "f", "F"):
     for rawPrefix in ("", "r", "R"):
         prefix_1 = uniPrefix + rawPrefix
         prefix_2 = rawPrefix + uniPrefix
@@ -55,6 +57,11 @@
 for t in ("'''", '"""',
           "r'''", 'r"""', "R'''", 'R"""',
           "u'''", 'u"""', "U'''", 'U"""',
+          "f'''", 'f"""', "F'''", 'F"""',
+          "fr'''", 'fr"""', "Fr'''", 'Fr"""',
+          "fR'''", 'fR"""', "FR'''", 'FR"""',
+          "rf'''", 'rf"""', "rF'''", 'rF"""',
+          "Rf'''", 'Rf"""', "RF'''", 'RF"""',
           "b'''", 'b"""', "B'''", 'B"""',
           "br'''", 'br"""', "Br'''", 'Br"""',
           "bR'''", 'bR"""', "BR'''", 'BR"""',
@@ -65,6 +72,11 @@
 for t in ("'", '"',
           "r'", 'r"', "R'", 'R"',
           "u'", 'u"', "U'", 'U"',
+          "f'", 'f"', "F'", 'F"',
+          "fr'", 'fr"', "Fr'", 'Fr"',
+          "fR'", 'fR"', "FR'", 'FR"',
+          "rf'", 'rf"', "rF'", 'rF"',
+          "Rf'", 'Rf"', "RF'", 'RF"',
           "b'", 'b"', "B'", 'B"',
           "br'", 'br"', "Br'", 'Br"',
           "bR'", 'bR"', "BR'", 'BR"',
diff --git a/pypy/module/parser/pyparser.py b/pypy/module/parser/pyparser.py
--- a/pypy/module/parser/pyparser.py
+++ b/pypy/module/parser/pyparser.py
@@ -9,9 +9,10 @@
 
 
 class W_STType(W_Root):
-    def __init__(self, tree, mode):
+    def __init__(self, tree, mode, recursive_parser=None):
         self.tree = tree
         self.mode = mode
+        self.recursive_parser = recursive_parser
 
     @specialize.arg(3)
     def _build_app_tree(self, space, node, seq_maker, with_lineno, with_column):
@@ -52,7 +53,7 @@
     def descr_compile(self, space, filename="<syntax-tree>"):
         info = pyparse.CompileInfo(filename, self.mode)
         try:
-            ast = ast_from_node(space, self.tree, info)
+            ast = ast_from_node(space, self.tree, info, self.recursive_parser)
             result = compile_ast(space, ast, info)
         except error.IndentationError as e:
             raise OperationError(space.w_IndentationError,
@@ -82,7 +83,7 @@
     except error.SyntaxError as e:
         raise OperationError(space.w_SyntaxError,
                              e.wrap_info(space))
-    return space.wrap(W_STType(tree, mode))
+    return space.wrap(W_STType(tree, mode, recursive_parser=parser))
 
 
 @unwrap_spec(source=str)


More information about the pypy-commit mailing list