[Python-checkins] Merge deoptimization blocks in interpreter (GH-32155)

markshannon webhook-mailer at python.org
Wed Mar 30 08:11:53 EDT 2022


https://github.com/python/cpython/commit/04acfa94bb383cce973739478a7b58ab20ab47f4
commit: 04acfa94bb383cce973739478a7b58ab20ab47f4
branch: main
author: Mark Shannon <mark at hotpy.org>
committer: markshannon <mark at hotpy.org>
date: 2022-03-30T13:11:33+01:00
summary:

Merge deoptimization blocks in interpreter (GH-32155)

files:
M Include/internal/pycore_code.h
M Python/ceval.c
M Python/specialize.c

diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h
index 0f6613b6c1e3d..8c868bcd5b5cb 100644
--- a/Include/internal/pycore_code.h
+++ b/Include/internal/pycore_code.h
@@ -110,6 +110,8 @@ _PyCode_Warmup(PyCodeObject *code)
     }
 }
 
+extern uint8_t _PyOpcode_Adaptive[256];
+
 extern Py_ssize_t _Py_QuickenedCount;
 
 // Borrowed references to common callables:
diff --git a/Python/ceval.c b/Python/ceval.c
index 4b9d33404bb78..a7b377724bb54 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -1431,7 +1431,7 @@ eval_frame_handle_pending(PyThreadState *tstate)
 #define JUMP_TO_INSTRUCTION(op) goto PREDICT_ID(op)
 
 
-#define DEOPT_IF(cond, instname) if (cond) { goto instname ## _miss; }
+#define DEOPT_IF(cond, instname) if (cond) { goto miss; }
 
 
 #define GLOBALS() frame->f_globals
@@ -2551,18 +2551,18 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
             }
             Py_DECREF(v);
             if (gen_status == PYGEN_ERROR) {
-                assert (retval == NULL);
+                assert(retval == NULL);
                 goto error;
             }
             if (gen_status == PYGEN_RETURN) {
-                assert (retval != NULL);
+                assert(retval != NULL);
                 Py_DECREF(receiver);
                 SET_TOP(retval);
                 JUMPBY(oparg);
                 DISPATCH();
             }
-            assert (gen_status == PYGEN_NEXT);
-            assert (retval != NULL);
+            assert(gen_status == PYGEN_NEXT);
+            assert(retval != NULL);
             PUSH(retval);
             DISPATCH();
         }
@@ -4595,7 +4595,6 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
         }
 
         TARGET(CALL) {
-            PREDICTED(CALL);
             int is_meth;
         call_function:
             is_meth = is_method(stack_pointer, oparg);
@@ -5524,34 +5523,25 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
 
 /* Specialization misses */
 
-#define MISS_WITH_INLINE_CACHE(opname) \
-opname ## _miss: \
-    { \
-        STAT_INC(opcode, miss); \
-        STAT_INC(opname, miss); \
-        /* The counter is always the first cache entry: */ \
-        _Py_CODEUNIT *counter = (_Py_CODEUNIT *)next_instr; \
-        *counter -= 1; \
-        if (*counter == 0) { \
-            _Py_SET_OPCODE(next_instr[-1], opname ## _ADAPTIVE); \
-            STAT_INC(opname, deopt); \
-            *counter = ADAPTIVE_CACHE_BACKOFF; \
-        } \
-        JUMP_TO_INSTRUCTION(opname); \
+miss:
+    {
+        STAT_INC(opcode, miss);
+        opcode = _PyOpcode_Deopt[opcode];
+        STAT_INC(opcode, miss);
+        /* The counter is always the first cache entry: */
+        _Py_CODEUNIT *counter = (_Py_CODEUNIT *)next_instr;
+        *counter -= 1;
+        if (*counter == 0) {
+            int adaptive_opcode = _PyOpcode_Adaptive[opcode];
+            assert(adaptive_opcode);
+            _Py_SET_OPCODE(next_instr[-1], adaptive_opcode);
+            STAT_INC(opcode, deopt);
+            *counter = ADAPTIVE_CACHE_BACKOFF;
+        }
+        next_instr--;
+        DISPATCH_GOTO();
     }
 
-MISS_WITH_INLINE_CACHE(LOAD_ATTR)
-MISS_WITH_INLINE_CACHE(STORE_ATTR)
-MISS_WITH_INLINE_CACHE(LOAD_GLOBAL)
-MISS_WITH_INLINE_CACHE(LOAD_METHOD)
-MISS_WITH_INLINE_CACHE(PRECALL)
-MISS_WITH_INLINE_CACHE(CALL)
-MISS_WITH_INLINE_CACHE(BINARY_OP)
-MISS_WITH_INLINE_CACHE(COMPARE_OP)
-MISS_WITH_INLINE_CACHE(BINARY_SUBSCR)
-MISS_WITH_INLINE_CACHE(UNPACK_SEQUENCE)
-MISS_WITH_INLINE_CACHE(STORE_SUBSCR)
-
 binary_subscr_dict_error:
         {
             PyObject *sub = POP();
@@ -6717,7 +6707,7 @@ call_trace(Py_tracefunc func, PyObject *obj,
     int old_what = tstate->tracing_what;
     tstate->tracing_what = what;
     PyThreadState_EnterTracing(tstate);
-    assert (frame->f_lasti >= 0);
+    assert(frame->f_lasti >= 0);
     initialize_trace_info(&tstate->trace_info, frame);
     f->f_lineno = _PyCode_CheckLineNumber(frame->f_lasti*sizeof(_Py_CODEUNIT), &tstate->trace_info.bounds);
     result = func(obj, f, what, arg);
diff --git a/Python/specialize.c b/Python/specialize.c
index 5839d7629466d..244318a609e66 100644
--- a/Python/specialize.c
+++ b/Python/specialize.c
@@ -17,7 +17,7 @@
 
 /* Map from opcode to adaptive opcode.
   Values of zero are ignored. */
-static uint8_t adaptive_opcodes[256] = {
+uint8_t _PyOpcode_Adaptive[256] = {
     [LOAD_ATTR] = LOAD_ATTR_ADAPTIVE,
     [LOAD_GLOBAL] = LOAD_GLOBAL_ADAPTIVE,
     [LOAD_METHOD] = LOAD_METHOD_ADAPTIVE,
@@ -143,7 +143,7 @@ print_spec_stats(FILE *out, OpcodeStats *stats)
      * even though we don't specialize them yet. */
     fprintf(out, "opcode[%d].specializable : 1\n", FOR_ITER);
     for (int i = 0; i < 256; i++) {
-        if (adaptive_opcodes[i]) {
+        if (_PyOpcode_Adaptive[i]) {
             fprintf(out, "opcode[%d].specializable : 1\n", i);
         }
         PRINT_STAT(i, specialization.success);
@@ -259,7 +259,7 @@ _PyCode_Quicken(PyCodeObject *code)
     _Py_CODEUNIT *instructions = _PyCode_CODE(code);
     for (int i = 0; i < Py_SIZE(code); i++) {
         int opcode = _Py_OPCODE(instructions[i]);
-        uint8_t adaptive_opcode = adaptive_opcodes[opcode];
+        uint8_t adaptive_opcode = _PyOpcode_Adaptive[opcode];
         if (adaptive_opcode) {
             _Py_SET_OPCODE(instructions[i], adaptive_opcode);
             // Make sure the adaptive counter is zero:



More information about the Python-checkins mailing list