vm.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2013-2019 Damien P. George
  7. * Copyright (c) 2014-2015 Paul Sokolovsky
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <stdio.h>
  28. #include <string.h>
  29. #include <assert.h>
  30. #include "py/emitglue.h"
  31. #include "py/objtype.h"
  32. #include "py/runtime.h"
  33. #include "py/bc0.h"
  34. #include "py/bc.h"
  35. #include "py/profile.h"
  36. // *FORMAT-OFF*
  37. #if 0
  38. #define TRACE(ip) printf("sp=%d ", (int)(sp - &code_state->state[0] + 1)); mp_bytecode_print2(&mp_plat_print, ip, 1, code_state->fun_bc->const_table);
  39. #else
  40. #define TRACE(ip)
  41. #endif
  42. // Value stack grows up (this makes it incompatible with native C stack, but
  43. // makes sure that arguments to functions are in natural order arg1..argN
  44. // (Python semantics mandates left-to-right evaluation order, including for
  45. // function arguments). Stack pointer is pre-incremented and points at the
  46. // top element.
  47. // Exception stack also grows up, top element is also pointed at.
  48. #define DECODE_UINT \
  49. mp_uint_t unum = 0; \
  50. do { \
  51. unum = (unum << 7) + (*ip & 0x7f); \
  52. } while ((*ip++ & 0x80) != 0)
  53. #define DECODE_ULABEL size_t ulab = (ip[0] | (ip[1] << 8)); ip += 2
  54. #define DECODE_SLABEL size_t slab = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2
  55. #if MICROPY_PERSISTENT_CODE
  56. #define DECODE_QSTR \
  57. qstr qst = ip[0] | ip[1] << 8; \
  58. ip += 2;
  59. #define DECODE_PTR \
  60. DECODE_UINT; \
  61. void *ptr = (void*)(uintptr_t)code_state->fun_bc->const_table[unum]
  62. #define DECODE_OBJ \
  63. DECODE_UINT; \
  64. mp_obj_t obj = (mp_obj_t)code_state->fun_bc->const_table[unum]
  65. #else
  66. #define DECODE_QSTR qstr qst = 0; \
  67. do { \
  68. qst = (qst << 7) + (*ip & 0x7f); \
  69. } while ((*ip++ & 0x80) != 0)
  70. #define DECODE_PTR \
  71. ip = (byte*)MP_ALIGN(ip, sizeof(void*)); \
  72. void *ptr = *(void**)ip; \
  73. ip += sizeof(void*)
  74. #define DECODE_OBJ \
  75. ip = (byte*)MP_ALIGN(ip, sizeof(mp_obj_t)); \
  76. mp_obj_t obj = *(mp_obj_t*)ip; \
  77. ip += sizeof(mp_obj_t)
  78. #endif
  79. #define PUSH(val) *++sp = (val)
  80. #define POP() (*sp--)
  81. #define TOP() (*sp)
  82. #define SET_TOP(val) *sp = (val)
  83. #if MICROPY_PY_SYS_EXC_INFO
  84. #define CLEAR_SYS_EXC_INFO() MP_STATE_VM(cur_exception) = NULL;
  85. #else
  86. #define CLEAR_SYS_EXC_INFO()
  87. #endif
  88. #define PUSH_EXC_BLOCK(with_or_finally) do { \
  89. DECODE_ULABEL; /* except labels are always forward */ \
  90. ++exc_sp; \
  91. exc_sp->handler = ip + ulab; \
  92. exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1)); \
  93. exc_sp->prev_exc = NULL; \
  94. } while (0)
  95. #define POP_EXC_BLOCK() \
  96. exc_sp--; /* pop back to previous exception handler */ \
  97. CLEAR_SYS_EXC_INFO() /* just clear sys.exc_info(), not compliant, but it shouldn't be used in 1st place */
  98. #define CANCEL_ACTIVE_FINALLY(sp) do { \
  99. if (mp_obj_is_small_int(sp[-1])) { \
  100. /* Stack: (..., prev_dest_ip, prev_cause, dest_ip) */ \
  101. /* Cancel the unwind through the previous finally, replace with current one */ \
  102. sp[-2] = sp[0]; \
  103. sp -= 2; \
  104. } else { \
  105. assert(sp[-1] == mp_const_none || mp_obj_is_exception_instance(sp[-1])); \
  106. /* Stack: (..., None/exception, dest_ip) */ \
  107. /* Silence the finally's exception value (may be None or an exception) */ \
  108. sp[-1] = sp[0]; \
  109. --sp; \
  110. } \
  111. } while (0)
  112. #if MICROPY_PY_SYS_SETTRACE
  113. #define FRAME_SETUP() do { \
  114. assert(code_state != code_state->prev_state); \
  115. MP_STATE_THREAD(current_code_state) = code_state; \
  116. assert(code_state != code_state->prev_state); \
  117. } while(0)
  118. #define FRAME_ENTER() do { \
  119. assert(code_state != code_state->prev_state); \
  120. code_state->prev_state = MP_STATE_THREAD(current_code_state); \
  121. assert(code_state != code_state->prev_state); \
  122. if (!mp_prof_is_executing) { \
  123. mp_prof_frame_enter(code_state); \
  124. } \
  125. } while(0)
  126. #define FRAME_LEAVE() do { \
  127. assert(code_state != code_state->prev_state); \
  128. MP_STATE_THREAD(current_code_state) = code_state->prev_state; \
  129. assert(code_state != code_state->prev_state); \
  130. } while(0)
  131. #define FRAME_UPDATE() do { \
  132. assert(MP_STATE_THREAD(current_code_state) == code_state); \
  133. if (!mp_prof_is_executing) { \
  134. code_state->frame = MP_OBJ_TO_PTR(mp_prof_frame_update(code_state)); \
  135. } \
  136. } while(0)
  137. #define TRACE_TICK(current_ip, current_sp, is_exception) do { \
  138. assert(code_state != code_state->prev_state); \
  139. assert(MP_STATE_THREAD(current_code_state) == code_state); \
  140. if (!mp_prof_is_executing && code_state->frame && MP_STATE_THREAD(prof_trace_callback)) { \
  141. MP_PROF_INSTR_DEBUG_PRINT(code_state->ip); \
  142. } \
  143. if (!mp_prof_is_executing && code_state->frame && code_state->frame->callback) { \
  144. mp_prof_instr_tick(code_state, is_exception); \
  145. } \
  146. } while(0)
  147. #else // MICROPY_PY_SYS_SETTRACE
  148. #define FRAME_SETUP()
  149. #define FRAME_ENTER()
  150. #define FRAME_LEAVE()
  151. #define FRAME_UPDATE()
  152. #define TRACE_TICK(current_ip, current_sp, is_exception)
  153. #endif // MICROPY_PY_SYS_SETTRACE
  154. #if MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  155. static inline mp_map_elem_t *mp_map_cached_lookup(mp_map_t *map, qstr qst, uint8_t *idx_cache) {
  156. size_t idx = *idx_cache;
  157. mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
  158. mp_map_elem_t *elem = NULL;
  159. if (idx < map->alloc && map->table[idx].key == key) {
  160. elem = &map->table[idx];
  161. } else {
  162. elem = mp_map_lookup(map, key, MP_MAP_LOOKUP);
  163. if (elem != NULL) {
  164. *idx_cache = (elem - &map->table[0]) & 0xff;
  165. }
  166. }
  167. return elem;
  168. }
  169. #endif
  170. // fastn has items in reverse order (fastn[0] is local[0], fastn[-1] is local[1], etc)
  171. // sp points to bottom of stack which grows up
  172. // returns:
  173. // MP_VM_RETURN_NORMAL, sp valid, return value in *sp
  174. // MP_VM_RETURN_YIELD, ip, sp valid, yielded value in *sp
  175. // MP_VM_RETURN_EXCEPTION, exception in state[0]
  176. mp_vm_return_kind_t mp_execute_bytecode(mp_code_state_t *code_state, volatile mp_obj_t inject_exc) {
  177. #define SELECTIVE_EXC_IP (0)
  178. #if SELECTIVE_EXC_IP
  179. #define MARK_EXC_IP_SELECTIVE() { code_state->ip = ip; } /* stores ip 1 byte past last opcode */
  180. #define MARK_EXC_IP_GLOBAL()
  181. #else
  182. #define MARK_EXC_IP_SELECTIVE()
  183. #define MARK_EXC_IP_GLOBAL() { code_state->ip = ip; } /* stores ip pointing to last opcode */
  184. #endif
  185. #if MICROPY_OPT_COMPUTED_GOTO
  186. #include "py/vmentrytable.h"
  187. #define DISPATCH() do { \
  188. TRACE(ip); \
  189. MARK_EXC_IP_GLOBAL(); \
  190. TRACE_TICK(ip, sp, false); \
  191. goto *entry_table[*ip++]; \
  192. } while (0)
  193. #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
  194. #define ENTRY(op) entry_##op
  195. #define ENTRY_DEFAULT entry_default
  196. #else
  197. #define DISPATCH() goto dispatch_loop
  198. #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
  199. #define ENTRY(op) case op
  200. #define ENTRY_DEFAULT default
  201. #endif
  202. // nlr_raise needs to be implemented as a goto, so that the C compiler's flow analyser
  203. // sees that it's possible for us to jump from the dispatch loop to the exception
  204. // handler. Without this, the code may have a different stack layout in the dispatch
  205. // loop and the exception handler, leading to very obscure bugs.
  206. #define RAISE(o) do { nlr_pop(); nlr.ret_val = MP_OBJ_TO_PTR(o); goto exception_handler; } while (0)
  207. #if MICROPY_STACKLESS
  208. run_code_state: ;
  209. #endif
  210. FRAME_ENTER();
  211. #if MICROPY_STACKLESS
  212. run_code_state_from_return: ;
  213. #endif
  214. FRAME_SETUP();
  215. // Pointers which are constant for particular invocation of mp_execute_bytecode()
  216. mp_obj_t * /*const*/ fastn;
  217. mp_exc_stack_t * /*const*/ exc_stack;
  218. {
  219. size_t n_state = code_state->n_state;
  220. fastn = &code_state->state[n_state - 1];
  221. exc_stack = (mp_exc_stack_t*)(code_state->state + n_state);
  222. }
  223. // variables that are visible to the exception handler (declared volatile)
  224. mp_exc_stack_t *volatile exc_sp = MP_CODE_STATE_EXC_SP_IDX_TO_PTR(exc_stack, code_state->exc_sp_idx); // stack grows up, exc_sp points to top of stack
  225. #if MICROPY_PY_THREAD_GIL && MICROPY_PY_THREAD_GIL_VM_DIVISOR
  226. // This needs to be volatile and outside the VM loop so it persists across handling
  227. // of any exceptions. Otherwise it's possible that the VM never gives up the GIL.
  228. volatile int gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR;
  229. #endif
  230. // outer exception handling loop
  231. for (;;) {
  232. nlr_buf_t nlr;
  233. outer_dispatch_loop:
  234. if (nlr_push(&nlr) == 0) {
  235. // local variables that are not visible to the exception handler
  236. const byte *ip = code_state->ip;
  237. mp_obj_t *sp = code_state->sp;
  238. mp_obj_t obj_shared;
  239. MICROPY_VM_HOOK_INIT
  240. // If we have exception to inject, now that we finish setting up
  241. // execution context, raise it. This works as if MP_BC_RAISE_OBJ
  242. // bytecode was executed.
  243. // Injecting exc into yield from generator is a special case,
  244. // handled by MP_BC_YIELD_FROM itself
  245. if (inject_exc != MP_OBJ_NULL && *ip != MP_BC_YIELD_FROM) {
  246. mp_obj_t exc = inject_exc;
  247. inject_exc = MP_OBJ_NULL;
  248. exc = mp_make_raise_obj(exc);
  249. RAISE(exc);
  250. }
  251. // loop to execute byte code
  252. for (;;) {
  253. dispatch_loop:
  254. #if MICROPY_OPT_COMPUTED_GOTO
  255. DISPATCH();
  256. #else
  257. TRACE(ip);
  258. MARK_EXC_IP_GLOBAL();
  259. TRACE_TICK(ip, sp, false);
  260. switch (*ip++) {
  261. #endif
  262. ENTRY(MP_BC_LOAD_CONST_FALSE):
  263. PUSH(mp_const_false);
  264. DISPATCH();
  265. ENTRY(MP_BC_LOAD_CONST_NONE):
  266. PUSH(mp_const_none);
  267. DISPATCH();
  268. ENTRY(MP_BC_LOAD_CONST_TRUE):
  269. PUSH(mp_const_true);
  270. DISPATCH();
  271. ENTRY(MP_BC_LOAD_CONST_SMALL_INT): {
  272. mp_int_t num = 0;
  273. if ((ip[0] & 0x40) != 0) {
  274. // Number is negative
  275. num--;
  276. }
  277. do {
  278. num = (num << 7) | (*ip & 0x7f);
  279. } while ((*ip++ & 0x80) != 0);
  280. PUSH(MP_OBJ_NEW_SMALL_INT(num));
  281. DISPATCH();
  282. }
  283. ENTRY(MP_BC_LOAD_CONST_STRING): {
  284. DECODE_QSTR;
  285. PUSH(MP_OBJ_NEW_QSTR(qst));
  286. DISPATCH();
  287. }
  288. ENTRY(MP_BC_LOAD_CONST_OBJ): {
  289. DECODE_OBJ;
  290. PUSH(obj);
  291. DISPATCH();
  292. }
  293. ENTRY(MP_BC_LOAD_NULL):
  294. PUSH(MP_OBJ_NULL);
  295. DISPATCH();
  296. ENTRY(MP_BC_LOAD_FAST_N): {
  297. DECODE_UINT;
  298. obj_shared = fastn[-unum];
  299. load_check:
  300. if (obj_shared == MP_OBJ_NULL) {
  301. local_name_error: {
  302. MARK_EXC_IP_SELECTIVE();
  303. mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NameError, MP_ERROR_TEXT("local variable referenced before assignment"));
  304. RAISE(obj);
  305. }
  306. }
  307. PUSH(obj_shared);
  308. DISPATCH();
  309. }
  310. ENTRY(MP_BC_LOAD_DEREF): {
  311. DECODE_UINT;
  312. obj_shared = mp_obj_cell_get(fastn[-unum]);
  313. goto load_check;
  314. }
  315. #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  316. ENTRY(MP_BC_LOAD_NAME): {
  317. MARK_EXC_IP_SELECTIVE();
  318. DECODE_QSTR;
  319. PUSH(mp_load_name(qst));
  320. DISPATCH();
  321. }
  322. #else
  323. ENTRY(MP_BC_LOAD_NAME): {
  324. MARK_EXC_IP_SELECTIVE();
  325. DECODE_QSTR;
  326. mp_map_elem_t *elem = mp_map_cached_lookup(&mp_locals_get()->map, qst, (uint8_t*)ip);
  327. mp_obj_t obj;
  328. if (elem != NULL) {
  329. obj = elem->value;
  330. } else {
  331. obj = mp_load_name(qst);
  332. }
  333. PUSH(obj);
  334. ip++;
  335. DISPATCH();
  336. }
  337. #endif
  338. #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  339. ENTRY(MP_BC_LOAD_GLOBAL): {
  340. MARK_EXC_IP_SELECTIVE();
  341. DECODE_QSTR;
  342. PUSH(mp_load_global(qst));
  343. DISPATCH();
  344. }
  345. #else
  346. ENTRY(MP_BC_LOAD_GLOBAL): {
  347. MARK_EXC_IP_SELECTIVE();
  348. DECODE_QSTR;
  349. mp_map_elem_t *elem = mp_map_cached_lookup(&mp_globals_get()->map, qst, (uint8_t*)ip);
  350. mp_obj_t obj;
  351. if (elem != NULL) {
  352. obj = elem->value;
  353. } else {
  354. obj = mp_load_global(qst);
  355. }
  356. PUSH(obj);
  357. ip++;
  358. DISPATCH();
  359. }
  360. #endif
  361. #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  362. ENTRY(MP_BC_LOAD_ATTR): {
  363. FRAME_UPDATE();
  364. MARK_EXC_IP_SELECTIVE();
  365. DECODE_QSTR;
  366. SET_TOP(mp_load_attr(TOP(), qst));
  367. DISPATCH();
  368. }
  369. #else
  370. ENTRY(MP_BC_LOAD_ATTR): {
  371. FRAME_UPDATE();
  372. MARK_EXC_IP_SELECTIVE();
  373. DECODE_QSTR;
  374. mp_obj_t top = TOP();
  375. mp_map_elem_t *elem = NULL;
  376. if (mp_obj_is_instance_type(mp_obj_get_type(top))) {
  377. mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
  378. elem = mp_map_cached_lookup(&self->members, qst, (uint8_t*)ip);
  379. }
  380. mp_obj_t obj;
  381. if (elem != NULL) {
  382. obj = elem->value;
  383. } else {
  384. obj = mp_load_attr(top, qst);
  385. }
  386. SET_TOP(obj);
  387. ip++;
  388. DISPATCH();
  389. }
  390. #endif
  391. ENTRY(MP_BC_LOAD_METHOD): {
  392. MARK_EXC_IP_SELECTIVE();
  393. DECODE_QSTR;
  394. mp_load_method(*sp, qst, sp);
  395. sp += 1;
  396. DISPATCH();
  397. }
  398. ENTRY(MP_BC_LOAD_SUPER_METHOD): {
  399. MARK_EXC_IP_SELECTIVE();
  400. DECODE_QSTR;
  401. sp -= 1;
  402. mp_load_super_method(qst, sp - 1);
  403. DISPATCH();
  404. }
  405. ENTRY(MP_BC_LOAD_BUILD_CLASS):
  406. MARK_EXC_IP_SELECTIVE();
  407. PUSH(mp_load_build_class());
  408. DISPATCH();
  409. ENTRY(MP_BC_LOAD_SUBSCR): {
  410. MARK_EXC_IP_SELECTIVE();
  411. mp_obj_t index = POP();
  412. SET_TOP(mp_obj_subscr(TOP(), index, MP_OBJ_SENTINEL));
  413. DISPATCH();
  414. }
  415. ENTRY(MP_BC_STORE_FAST_N): {
  416. DECODE_UINT;
  417. fastn[-unum] = POP();
  418. DISPATCH();
  419. }
  420. ENTRY(MP_BC_STORE_DEREF): {
  421. DECODE_UINT;
  422. mp_obj_cell_set(fastn[-unum], POP());
  423. DISPATCH();
  424. }
  425. ENTRY(MP_BC_STORE_NAME): {
  426. MARK_EXC_IP_SELECTIVE();
  427. DECODE_QSTR;
  428. mp_store_name(qst, POP());
  429. DISPATCH();
  430. }
  431. ENTRY(MP_BC_STORE_GLOBAL): {
  432. MARK_EXC_IP_SELECTIVE();
  433. DECODE_QSTR;
  434. mp_store_global(qst, POP());
  435. DISPATCH();
  436. }
  437. #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  438. ENTRY(MP_BC_STORE_ATTR): {
  439. FRAME_UPDATE();
  440. MARK_EXC_IP_SELECTIVE();
  441. DECODE_QSTR;
  442. mp_store_attr(sp[0], qst, sp[-1]);
  443. sp -= 2;
  444. DISPATCH();
  445. }
  446. #else
  447. // This caching code works with MICROPY_PY_BUILTINS_PROPERTY and/or
  448. // MICROPY_PY_DESCRIPTORS enabled because if the attr exists in
  449. // self->members then it can't be a property or have descriptors. A
  450. // consequence of this is that we can't use MP_MAP_LOOKUP_ADD_IF_NOT_FOUND
  451. // in the fast-path below, because that store could override a property.
  452. ENTRY(MP_BC_STORE_ATTR): {
  453. FRAME_UPDATE();
  454. MARK_EXC_IP_SELECTIVE();
  455. DECODE_QSTR;
  456. mp_map_elem_t *elem = NULL;
  457. mp_obj_t top = TOP();
  458. if (mp_obj_is_instance_type(mp_obj_get_type(top)) && sp[-1] != MP_OBJ_NULL) {
  459. mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
  460. elem = mp_map_cached_lookup(&self->members, qst, (uint8_t*)ip);
  461. }
  462. if (elem != NULL) {
  463. elem->value = sp[-1];
  464. } else {
  465. mp_store_attr(sp[0], qst, sp[-1]);
  466. }
  467. sp -= 2;
  468. ip++;
  469. DISPATCH();
  470. }
  471. #endif
  472. ENTRY(MP_BC_STORE_SUBSCR):
  473. MARK_EXC_IP_SELECTIVE();
  474. mp_obj_subscr(sp[-1], sp[0], sp[-2]);
  475. sp -= 3;
  476. DISPATCH();
  477. ENTRY(MP_BC_DELETE_FAST): {
  478. MARK_EXC_IP_SELECTIVE();
  479. DECODE_UINT;
  480. if (fastn[-unum] == MP_OBJ_NULL) {
  481. goto local_name_error;
  482. }
  483. fastn[-unum] = MP_OBJ_NULL;
  484. DISPATCH();
  485. }
  486. ENTRY(MP_BC_DELETE_DEREF): {
  487. MARK_EXC_IP_SELECTIVE();
  488. DECODE_UINT;
  489. if (mp_obj_cell_get(fastn[-unum]) == MP_OBJ_NULL) {
  490. goto local_name_error;
  491. }
  492. mp_obj_cell_set(fastn[-unum], MP_OBJ_NULL);
  493. DISPATCH();
  494. }
  495. ENTRY(MP_BC_DELETE_NAME): {
  496. MARK_EXC_IP_SELECTIVE();
  497. DECODE_QSTR;
  498. mp_delete_name(qst);
  499. DISPATCH();
  500. }
  501. ENTRY(MP_BC_DELETE_GLOBAL): {
  502. MARK_EXC_IP_SELECTIVE();
  503. DECODE_QSTR;
  504. mp_delete_global(qst);
  505. DISPATCH();
  506. }
  507. ENTRY(MP_BC_DUP_TOP): {
  508. mp_obj_t top = TOP();
  509. PUSH(top);
  510. DISPATCH();
  511. }
  512. ENTRY(MP_BC_DUP_TOP_TWO):
  513. sp += 2;
  514. sp[0] = sp[-2];
  515. sp[-1] = sp[-3];
  516. DISPATCH();
  517. ENTRY(MP_BC_POP_TOP):
  518. sp -= 1;
  519. DISPATCH();
  520. ENTRY(MP_BC_ROT_TWO): {
  521. mp_obj_t top = sp[0];
  522. sp[0] = sp[-1];
  523. sp[-1] = top;
  524. DISPATCH();
  525. }
  526. ENTRY(MP_BC_ROT_THREE): {
  527. mp_obj_t top = sp[0];
  528. sp[0] = sp[-1];
  529. sp[-1] = sp[-2];
  530. sp[-2] = top;
  531. DISPATCH();
  532. }
  533. ENTRY(MP_BC_JUMP): {
  534. DECODE_SLABEL;
  535. ip += slab;
  536. DISPATCH_WITH_PEND_EXC_CHECK();
  537. }
  538. ENTRY(MP_BC_POP_JUMP_IF_TRUE): {
  539. DECODE_SLABEL;
  540. if (mp_obj_is_true(POP())) {
  541. ip += slab;
  542. }
  543. DISPATCH_WITH_PEND_EXC_CHECK();
  544. }
  545. ENTRY(MP_BC_POP_JUMP_IF_FALSE): {
  546. DECODE_SLABEL;
  547. if (!mp_obj_is_true(POP())) {
  548. ip += slab;
  549. }
  550. DISPATCH_WITH_PEND_EXC_CHECK();
  551. }
  552. ENTRY(MP_BC_JUMP_IF_TRUE_OR_POP): {
  553. DECODE_SLABEL;
  554. if (mp_obj_is_true(TOP())) {
  555. ip += slab;
  556. } else {
  557. sp--;
  558. }
  559. DISPATCH_WITH_PEND_EXC_CHECK();
  560. }
  561. ENTRY(MP_BC_JUMP_IF_FALSE_OR_POP): {
  562. DECODE_SLABEL;
  563. if (mp_obj_is_true(TOP())) {
  564. sp--;
  565. } else {
  566. ip += slab;
  567. }
  568. DISPATCH_WITH_PEND_EXC_CHECK();
  569. }
  570. ENTRY(MP_BC_SETUP_WITH): {
  571. MARK_EXC_IP_SELECTIVE();
  572. // stack: (..., ctx_mgr)
  573. mp_obj_t obj = TOP();
  574. mp_load_method(obj, MP_QSTR___exit__, sp);
  575. mp_load_method(obj, MP_QSTR___enter__, sp + 2);
  576. mp_obj_t ret = mp_call_method_n_kw(0, 0, sp + 2);
  577. sp += 1;
  578. PUSH_EXC_BLOCK(1);
  579. PUSH(ret);
  580. // stack: (..., __exit__, ctx_mgr, as_value)
  581. DISPATCH();
  582. }
  583. ENTRY(MP_BC_WITH_CLEANUP): {
  584. MARK_EXC_IP_SELECTIVE();
  585. // Arriving here, there's "exception control block" on top of stack,
  586. // and __exit__ method (with self) underneath it. Bytecode calls __exit__,
  587. // and "deletes" it off stack, shifting "exception control block"
  588. // to its place.
  589. // The bytecode emitter ensures that there is enough space on the Python
  590. // value stack to hold the __exit__ method plus an additional 4 entries.
  591. if (TOP() == mp_const_none) {
  592. // stack: (..., __exit__, ctx_mgr, None)
  593. sp[1] = mp_const_none;
  594. sp[2] = mp_const_none;
  595. sp -= 2;
  596. mp_call_method_n_kw(3, 0, sp);
  597. SET_TOP(mp_const_none);
  598. } else if (mp_obj_is_small_int(TOP())) {
  599. // Getting here there are two distinct cases:
  600. // - unwind return, stack: (..., __exit__, ctx_mgr, ret_val, SMALL_INT(-1))
  601. // - unwind jump, stack: (..., __exit__, ctx_mgr, dest_ip, SMALL_INT(num_exc))
  602. // For both cases we do exactly the same thing.
  603. mp_obj_t data = sp[-1];
  604. mp_obj_t cause = sp[0];
  605. sp[-1] = mp_const_none;
  606. sp[0] = mp_const_none;
  607. sp[1] = mp_const_none;
  608. mp_call_method_n_kw(3, 0, sp - 3);
  609. sp[-3] = data;
  610. sp[-2] = cause;
  611. sp -= 2; // we removed (__exit__, ctx_mgr)
  612. } else {
  613. assert(mp_obj_is_exception_instance(TOP()));
  614. // stack: (..., __exit__, ctx_mgr, exc_instance)
  615. // Need to pass (exc_type, exc_instance, None) as arguments to __exit__.
  616. sp[1] = sp[0];
  617. sp[0] = MP_OBJ_FROM_PTR(mp_obj_get_type(sp[0]));
  618. sp[2] = mp_const_none;
  619. sp -= 2;
  620. mp_obj_t ret_value = mp_call_method_n_kw(3, 0, sp);
  621. if (mp_obj_is_true(ret_value)) {
  622. // We need to silence/swallow the exception. This is done
  623. // by popping the exception and the __exit__ handler and
  624. // replacing it with None, which signals END_FINALLY to just
  625. // execute the finally handler normally.
  626. SET_TOP(mp_const_none);
  627. } else {
  628. // We need to re-raise the exception. We pop __exit__ handler
  629. // by copying the exception instance down to the new top-of-stack.
  630. sp[0] = sp[3];
  631. }
  632. }
  633. DISPATCH();
  634. }
  635. ENTRY(MP_BC_UNWIND_JUMP): {
  636. MARK_EXC_IP_SELECTIVE();
  637. DECODE_SLABEL;
  638. PUSH((mp_obj_t)(mp_uint_t)(uintptr_t)(ip + slab)); // push destination ip for jump
  639. PUSH((mp_obj_t)(mp_uint_t)(*ip)); // push number of exception handlers to unwind (0x80 bit set if we also need to pop stack)
  640. unwind_jump:;
  641. mp_uint_t unum = (mp_uint_t)POP(); // get number of exception handlers to unwind
  642. while ((unum & 0x7f) > 0) {
  643. unum -= 1;
  644. assert(exc_sp >= exc_stack);
  645. if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
  646. if (exc_sp->handler > ip) {
  647. // Found a finally handler that isn't active; run it.
  648. // Getting here the stack looks like:
  649. // (..., X, dest_ip)
  650. // where X is pointed to by exc_sp->val_sp and in the case
  651. // of a "with" block contains the context manager info.
  652. assert(&sp[-1] == MP_TAGPTR_PTR(exc_sp->val_sp));
  653. // We're going to run "finally" code as a coroutine
  654. // (not calling it recursively). Set up a sentinel
  655. // on the stack so it can return back to us when it is
  656. // done (when WITH_CLEANUP or END_FINALLY reached).
  657. // The sentinel is the number of exception handlers left to
  658. // unwind, which is a non-negative integer.
  659. PUSH(MP_OBJ_NEW_SMALL_INT(unum));
  660. ip = exc_sp->handler;
  661. goto dispatch_loop;
  662. } else {
  663. // Found a finally handler that is already active; cancel it.
  664. CANCEL_ACTIVE_FINALLY(sp);
  665. }
  666. }
  667. POP_EXC_BLOCK();
  668. }
  669. ip = (const byte*)MP_OBJ_TO_PTR(POP()); // pop destination ip for jump
  670. if (unum != 0) {
  671. // pop the exhausted iterator
  672. sp -= MP_OBJ_ITER_BUF_NSLOTS;
  673. }
  674. DISPATCH_WITH_PEND_EXC_CHECK();
  675. }
  676. ENTRY(MP_BC_SETUP_EXCEPT):
  677. ENTRY(MP_BC_SETUP_FINALLY): {
  678. MARK_EXC_IP_SELECTIVE();
  679. #if SELECTIVE_EXC_IP
  680. PUSH_EXC_BLOCK((code_state->ip[-1] == MP_BC_SETUP_FINALLY) ? 1 : 0);
  681. #else
  682. PUSH_EXC_BLOCK((code_state->ip[0] == MP_BC_SETUP_FINALLY) ? 1 : 0);
  683. #endif
  684. DISPATCH();
  685. }
  686. ENTRY(MP_BC_END_FINALLY):
  687. MARK_EXC_IP_SELECTIVE();
  688. // if TOS is None, just pops it and continues
  689. // if TOS is an integer, finishes coroutine and returns control to caller
  690. // if TOS is an exception, reraises the exception
  691. assert(exc_sp >= exc_stack);
  692. POP_EXC_BLOCK();
  693. if (TOP() == mp_const_none) {
  694. sp--;
  695. } else if (mp_obj_is_small_int(TOP())) {
  696. // We finished "finally" coroutine and now dispatch back
  697. // to our caller, based on TOS value
  698. mp_int_t cause = MP_OBJ_SMALL_INT_VALUE(POP());
  699. if (cause < 0) {
  700. // A negative cause indicates unwind return
  701. goto unwind_return;
  702. } else {
  703. // Otherwise it's an unwind jump and we must push as a raw
  704. // number the number of exception handlers to unwind
  705. PUSH((mp_obj_t)cause);
  706. goto unwind_jump;
  707. }
  708. } else {
  709. assert(mp_obj_is_exception_instance(TOP()));
  710. RAISE(TOP());
  711. }
  712. DISPATCH();
  713. ENTRY(MP_BC_GET_ITER):
  714. MARK_EXC_IP_SELECTIVE();
  715. SET_TOP(mp_getiter(TOP(), NULL));
  716. DISPATCH();
  717. // An iterator for a for-loop takes MP_OBJ_ITER_BUF_NSLOTS slots on
  718. // the Python value stack. These slots are either used to store the
  719. // iterator object itself, or the first slot is MP_OBJ_NULL and
  720. // the second slot holds a reference to the iterator object.
  721. ENTRY(MP_BC_GET_ITER_STACK): {
  722. MARK_EXC_IP_SELECTIVE();
  723. mp_obj_t obj = TOP();
  724. mp_obj_iter_buf_t *iter_buf = (mp_obj_iter_buf_t*)sp;
  725. sp += MP_OBJ_ITER_BUF_NSLOTS - 1;
  726. obj = mp_getiter(obj, iter_buf);
  727. if (obj != MP_OBJ_FROM_PTR(iter_buf)) {
  728. // Iterator didn't use the stack so indicate that with MP_OBJ_NULL.
  729. sp[-MP_OBJ_ITER_BUF_NSLOTS + 1] = MP_OBJ_NULL;
  730. sp[-MP_OBJ_ITER_BUF_NSLOTS + 2] = obj;
  731. }
  732. DISPATCH();
  733. }
  734. ENTRY(MP_BC_FOR_ITER): {
  735. FRAME_UPDATE();
  736. MARK_EXC_IP_SELECTIVE();
  737. DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
  738. code_state->sp = sp;
  739. mp_obj_t obj;
  740. if (sp[-MP_OBJ_ITER_BUF_NSLOTS + 1] == MP_OBJ_NULL) {
  741. obj = sp[-MP_OBJ_ITER_BUF_NSLOTS + 2];
  742. } else {
  743. obj = MP_OBJ_FROM_PTR(&sp[-MP_OBJ_ITER_BUF_NSLOTS + 1]);
  744. }
  745. mp_obj_t value = mp_iternext_allow_raise(obj);
  746. if (value == MP_OBJ_STOP_ITERATION) {
  747. sp -= MP_OBJ_ITER_BUF_NSLOTS; // pop the exhausted iterator
  748. ip += ulab; // jump to after for-block
  749. } else {
  750. PUSH(value); // push the next iteration value
  751. #if MICROPY_PY_SYS_SETTRACE
  752. // LINE event should trigger for every iteration so invalidate last trigger
  753. if (code_state->frame) {
  754. code_state->frame->lineno = 0;
  755. }
  756. #endif
  757. }
  758. DISPATCH();
  759. }
  760. ENTRY(MP_BC_POP_EXCEPT_JUMP): {
  761. assert(exc_sp >= exc_stack);
  762. POP_EXC_BLOCK();
  763. DECODE_ULABEL;
  764. ip += ulab;
  765. DISPATCH_WITH_PEND_EXC_CHECK();
  766. }
  767. ENTRY(MP_BC_BUILD_TUPLE): {
  768. MARK_EXC_IP_SELECTIVE();
  769. DECODE_UINT;
  770. sp -= unum - 1;
  771. SET_TOP(mp_obj_new_tuple(unum, sp));
  772. DISPATCH();
  773. }
  774. ENTRY(MP_BC_BUILD_LIST): {
  775. MARK_EXC_IP_SELECTIVE();
  776. DECODE_UINT;
  777. sp -= unum - 1;
  778. SET_TOP(mp_obj_new_list(unum, sp));
  779. DISPATCH();
  780. }
  781. ENTRY(MP_BC_BUILD_MAP): {
  782. MARK_EXC_IP_SELECTIVE();
  783. DECODE_UINT;
  784. PUSH(mp_obj_new_dict(unum));
  785. DISPATCH();
  786. }
  787. ENTRY(MP_BC_STORE_MAP):
  788. MARK_EXC_IP_SELECTIVE();
  789. sp -= 2;
  790. mp_obj_dict_store(sp[0], sp[2], sp[1]);
  791. DISPATCH();
  792. #if MICROPY_PY_BUILTINS_SET
  793. ENTRY(MP_BC_BUILD_SET): {
  794. MARK_EXC_IP_SELECTIVE();
  795. DECODE_UINT;
  796. sp -= unum - 1;
  797. SET_TOP(mp_obj_new_set(unum, sp));
  798. DISPATCH();
  799. }
  800. #endif
  801. #if MICROPY_PY_BUILTINS_SLICE
  802. ENTRY(MP_BC_BUILD_SLICE): {
  803. MARK_EXC_IP_SELECTIVE();
  804. mp_obj_t step = mp_const_none;
  805. if (*ip++ == 3) {
  806. // 3-argument slice includes step
  807. step = POP();
  808. }
  809. mp_obj_t stop = POP();
  810. mp_obj_t start = TOP();
  811. SET_TOP(mp_obj_new_slice(start, stop, step));
  812. DISPATCH();
  813. }
  814. #endif
  815. ENTRY(MP_BC_STORE_COMP): {
  816. MARK_EXC_IP_SELECTIVE();
  817. DECODE_UINT;
  818. mp_obj_t obj = sp[-(unum >> 2)];
  819. if ((unum & 3) == 0) {
  820. mp_obj_list_append(obj, sp[0]);
  821. sp--;
  822. } else if (!MICROPY_PY_BUILTINS_SET || (unum & 3) == 1) {
  823. mp_obj_dict_store(obj, sp[0], sp[-1]);
  824. sp -= 2;
  825. #if MICROPY_PY_BUILTINS_SET
  826. } else {
  827. mp_obj_set_store(obj, sp[0]);
  828. sp--;
  829. #endif
  830. }
  831. DISPATCH();
  832. }
  833. ENTRY(MP_BC_UNPACK_SEQUENCE): {
  834. MARK_EXC_IP_SELECTIVE();
  835. DECODE_UINT;
  836. mp_unpack_sequence(sp[0], unum, sp);
  837. sp += unum - 1;
  838. DISPATCH();
  839. }
  840. ENTRY(MP_BC_UNPACK_EX): {
  841. MARK_EXC_IP_SELECTIVE();
  842. DECODE_UINT;
  843. mp_unpack_ex(sp[0], unum, sp);
  844. sp += (unum & 0xff) + ((unum >> 8) & 0xff);
  845. DISPATCH();
  846. }
  847. ENTRY(MP_BC_MAKE_FUNCTION): {
  848. DECODE_PTR;
  849. PUSH(mp_make_function_from_raw_code(ptr, MP_OBJ_NULL, MP_OBJ_NULL));
  850. DISPATCH();
  851. }
  852. ENTRY(MP_BC_MAKE_FUNCTION_DEFARGS): {
  853. DECODE_PTR;
  854. // Stack layout: def_tuple def_dict <- TOS
  855. mp_obj_t def_dict = POP();
  856. SET_TOP(mp_make_function_from_raw_code(ptr, TOP(), def_dict));
  857. DISPATCH();
  858. }
  859. ENTRY(MP_BC_MAKE_CLOSURE): {
  860. DECODE_PTR;
  861. size_t n_closed_over = *ip++;
  862. // Stack layout: closed_overs <- TOS
  863. sp -= n_closed_over - 1;
  864. SET_TOP(mp_make_closure_from_raw_code(ptr, n_closed_over, sp));
  865. DISPATCH();
  866. }
  867. ENTRY(MP_BC_MAKE_CLOSURE_DEFARGS): {
  868. DECODE_PTR;
  869. size_t n_closed_over = *ip++;
  870. // Stack layout: def_tuple def_dict closed_overs <- TOS
  871. sp -= 2 + n_closed_over - 1;
  872. SET_TOP(mp_make_closure_from_raw_code(ptr, 0x100 | n_closed_over, sp));
  873. DISPATCH();
  874. }
  875. ENTRY(MP_BC_CALL_FUNCTION): {
  876. FRAME_UPDATE();
  877. MARK_EXC_IP_SELECTIVE();
  878. DECODE_UINT;
  879. // unum & 0xff == n_positional
  880. // (unum >> 8) & 0xff == n_keyword
  881. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe);
  882. #if MICROPY_STACKLESS
  883. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  884. code_state->ip = ip;
  885. code_state->sp = sp;
  886. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  887. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1);
  888. #if !MICROPY_ENABLE_PYSTACK
  889. if (new_state == NULL) {
  890. // Couldn't allocate codestate on heap: in the strict case raise
  891. // an exception, otherwise just fall through to stack allocation.
  892. #if MICROPY_STACKLESS_STRICT
  893. deep_recursion_error:
  894. mp_raise_recursion_depth();
  895. #endif
  896. } else
  897. #endif
  898. {
  899. new_state->prev = code_state;
  900. code_state = new_state;
  901. nlr_pop();
  902. goto run_code_state;
  903. }
  904. }
  905. #endif
  906. SET_TOP(mp_call_function_n_kw(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1));
  907. DISPATCH();
  908. }
  909. ENTRY(MP_BC_CALL_FUNCTION_VAR_KW): {
  910. FRAME_UPDATE();
  911. MARK_EXC_IP_SELECTIVE();
  912. DECODE_UINT;
  913. // unum & 0xff == n_positional
  914. // (unum >> 8) & 0xff == n_keyword
  915. // We have following stack layout here:
  916. // fun arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
  917. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 2;
  918. #if MICROPY_STACKLESS
  919. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  920. code_state->ip = ip;
  921. code_state->sp = sp;
  922. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  923. mp_call_args_t out_args;
  924. mp_call_prepare_args_n_kw_var(false, unum, sp, &out_args);
  925. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
  926. out_args.n_args, out_args.n_kw, out_args.args);
  927. #if !MICROPY_ENABLE_PYSTACK
  928. // Freeing args at this point does not follow a LIFO order so only do it if
  929. // pystack is not enabled. For pystack, they are freed when code_state is.
  930. mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
  931. #endif
  932. #if !MICROPY_ENABLE_PYSTACK
  933. if (new_state == NULL) {
  934. // Couldn't allocate codestate on heap: in the strict case raise
  935. // an exception, otherwise just fall through to stack allocation.
  936. #if MICROPY_STACKLESS_STRICT
  937. goto deep_recursion_error;
  938. #endif
  939. } else
  940. #endif
  941. {
  942. new_state->prev = code_state;
  943. code_state = new_state;
  944. nlr_pop();
  945. goto run_code_state;
  946. }
  947. }
  948. #endif
  949. SET_TOP(mp_call_method_n_kw_var(false, unum, sp));
  950. DISPATCH();
  951. }
  952. ENTRY(MP_BC_CALL_METHOD): {
  953. FRAME_UPDATE();
  954. MARK_EXC_IP_SELECTIVE();
  955. DECODE_UINT;
  956. // unum & 0xff == n_positional
  957. // (unum >> 8) & 0xff == n_keyword
  958. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 1;
  959. #if MICROPY_STACKLESS
  960. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  961. code_state->ip = ip;
  962. code_state->sp = sp;
  963. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  964. size_t n_args = unum & 0xff;
  965. size_t n_kw = (unum >> 8) & 0xff;
  966. int adjust = (sp[1] == MP_OBJ_NULL) ? 0 : 1;
  967. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, n_args + adjust, n_kw, sp + 2 - adjust);
  968. #if !MICROPY_ENABLE_PYSTACK
  969. if (new_state == NULL) {
  970. // Couldn't allocate codestate on heap: in the strict case raise
  971. // an exception, otherwise just fall through to stack allocation.
  972. #if MICROPY_STACKLESS_STRICT
  973. goto deep_recursion_error;
  974. #endif
  975. } else
  976. #endif
  977. {
  978. new_state->prev = code_state;
  979. code_state = new_state;
  980. nlr_pop();
  981. goto run_code_state;
  982. }
  983. }
  984. #endif
  985. SET_TOP(mp_call_method_n_kw(unum & 0xff, (unum >> 8) & 0xff, sp));
  986. DISPATCH();
  987. }
  988. ENTRY(MP_BC_CALL_METHOD_VAR_KW): {
  989. FRAME_UPDATE();
  990. MARK_EXC_IP_SELECTIVE();
  991. DECODE_UINT;
  992. // unum & 0xff == n_positional
  993. // (unum >> 8) & 0xff == n_keyword
  994. // We have following stack layout here:
  995. // fun self arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
  996. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 3;
  997. #if MICROPY_STACKLESS
  998. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  999. code_state->ip = ip;
  1000. code_state->sp = sp;
  1001. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  1002. mp_call_args_t out_args;
  1003. mp_call_prepare_args_n_kw_var(true, unum, sp, &out_args);
  1004. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
  1005. out_args.n_args, out_args.n_kw, out_args.args);
  1006. #if !MICROPY_ENABLE_PYSTACK
  1007. // Freeing args at this point does not follow a LIFO order so only do it if
  1008. // pystack is not enabled. For pystack, they are freed when code_state is.
  1009. mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
  1010. #endif
  1011. #if !MICROPY_ENABLE_PYSTACK
  1012. if (new_state == NULL) {
  1013. // Couldn't allocate codestate on heap: in the strict case raise
  1014. // an exception, otherwise just fall through to stack allocation.
  1015. #if MICROPY_STACKLESS_STRICT
  1016. goto deep_recursion_error;
  1017. #endif
  1018. } else
  1019. #endif
  1020. {
  1021. new_state->prev = code_state;
  1022. code_state = new_state;
  1023. nlr_pop();
  1024. goto run_code_state;
  1025. }
  1026. }
  1027. #endif
  1028. SET_TOP(mp_call_method_n_kw_var(true, unum, sp));
  1029. DISPATCH();
  1030. }
  1031. ENTRY(MP_BC_RETURN_VALUE):
  1032. MARK_EXC_IP_SELECTIVE();
  1033. unwind_return:
  1034. // Search for and execute finally handlers that aren't already active
  1035. while (exc_sp >= exc_stack) {
  1036. if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
  1037. if (exc_sp->handler > ip) {
  1038. // Found a finally handler that isn't active; run it.
  1039. // Getting here the stack looks like:
  1040. // (..., X, [iter0, iter1, ...,] ret_val)
  1041. // where X is pointed to by exc_sp->val_sp and in the case
  1042. // of a "with" block contains the context manager info.
  1043. // There may be 0 or more for-iterators between X and the
  1044. // return value, and these must be removed before control can
  1045. // pass to the finally code. We simply copy the ret_value down
  1046. // over these iterators, if they exist. If they don't then the
  1047. // following is a null operation.
  1048. mp_obj_t *finally_sp = MP_TAGPTR_PTR(exc_sp->val_sp);
  1049. finally_sp[1] = sp[0];
  1050. sp = &finally_sp[1];
  1051. // We're going to run "finally" code as a coroutine
  1052. // (not calling it recursively). Set up a sentinel
  1053. // on a stack so it can return back to us when it is
  1054. // done (when WITH_CLEANUP or END_FINALLY reached).
  1055. PUSH(MP_OBJ_NEW_SMALL_INT(-1));
  1056. ip = exc_sp->handler;
  1057. goto dispatch_loop;
  1058. } else {
  1059. // Found a finally handler that is already active; cancel it.
  1060. CANCEL_ACTIVE_FINALLY(sp);
  1061. }
  1062. }
  1063. POP_EXC_BLOCK();
  1064. }
  1065. nlr_pop();
  1066. code_state->sp = sp;
  1067. assert(exc_sp == exc_stack - 1);
  1068. MICROPY_VM_HOOK_RETURN
  1069. #if MICROPY_STACKLESS
  1070. if (code_state->prev != NULL) {
  1071. mp_obj_t res = *sp;
  1072. mp_globals_set(code_state->old_globals);
  1073. mp_code_state_t *new_code_state = code_state->prev;
  1074. #if MICROPY_ENABLE_PYSTACK
  1075. // Free code_state, and args allocated by mp_call_prepare_args_n_kw_var
  1076. // (The latter is implicitly freed when using pystack due to its LIFO nature.)
  1077. // The sizeof in the following statement does not include the size of the variable
  1078. // part of the struct. This arg is anyway not used if pystack is enabled.
  1079. mp_nonlocal_free(code_state, sizeof(mp_code_state_t));
  1080. #endif
  1081. code_state = new_code_state;
  1082. *code_state->sp = res;
  1083. goto run_code_state_from_return;
  1084. }
  1085. #endif
  1086. FRAME_LEAVE();
  1087. return MP_VM_RETURN_NORMAL;
  1088. ENTRY(MP_BC_RAISE_LAST): {
  1089. MARK_EXC_IP_SELECTIVE();
  1090. // search for the inner-most previous exception, to reraise it
  1091. mp_obj_t obj = MP_OBJ_NULL;
  1092. for (mp_exc_stack_t *e = exc_sp; e >= exc_stack; --e) {
  1093. if (e->prev_exc != NULL) {
  1094. obj = MP_OBJ_FROM_PTR(e->prev_exc);
  1095. break;
  1096. }
  1097. }
  1098. if (obj == MP_OBJ_NULL) {
  1099. obj = mp_obj_new_exception_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("no active exception to reraise"));
  1100. }
  1101. RAISE(obj);
  1102. }
  1103. ENTRY(MP_BC_RAISE_OBJ): {
  1104. MARK_EXC_IP_SELECTIVE();
  1105. mp_obj_t obj = mp_make_raise_obj(TOP());
  1106. RAISE(obj);
  1107. }
  1108. ENTRY(MP_BC_RAISE_FROM): {
  1109. MARK_EXC_IP_SELECTIVE();
  1110. mp_warning(NULL, "exception chaining not supported");
  1111. sp--; // ignore (pop) "from" argument
  1112. mp_obj_t obj = mp_make_raise_obj(TOP());
  1113. RAISE(obj);
  1114. }
  1115. ENTRY(MP_BC_YIELD_VALUE):
  1116. yield:
  1117. nlr_pop();
  1118. code_state->ip = ip;
  1119. code_state->sp = sp;
  1120. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  1121. FRAME_LEAVE();
  1122. return MP_VM_RETURN_YIELD;
  1123. ENTRY(MP_BC_YIELD_FROM): {
  1124. MARK_EXC_IP_SELECTIVE();
  1125. //#define EXC_MATCH(exc, type) mp_obj_is_type(exc, type)
  1126. #define EXC_MATCH(exc, type) mp_obj_exception_match(exc, type)
  1127. #define GENERATOR_EXIT_IF_NEEDED(t) if (t != MP_OBJ_NULL && EXC_MATCH(t, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) { mp_obj_t raise_t = mp_make_raise_obj(t); RAISE(raise_t); }
  1128. mp_vm_return_kind_t ret_kind;
  1129. mp_obj_t send_value = POP();
  1130. mp_obj_t t_exc = MP_OBJ_NULL;
  1131. mp_obj_t ret_value;
  1132. code_state->sp = sp; // Save sp because it's needed if mp_resume raises StopIteration
  1133. if (inject_exc != MP_OBJ_NULL) {
  1134. t_exc = inject_exc;
  1135. inject_exc = MP_OBJ_NULL;
  1136. ret_kind = mp_resume(TOP(), MP_OBJ_NULL, t_exc, &ret_value);
  1137. } else {
  1138. ret_kind = mp_resume(TOP(), send_value, MP_OBJ_NULL, &ret_value);
  1139. }
  1140. if (ret_kind == MP_VM_RETURN_YIELD) {
  1141. ip--;
  1142. PUSH(ret_value);
  1143. goto yield;
  1144. } else if (ret_kind == MP_VM_RETURN_NORMAL) {
  1145. // Pop exhausted gen
  1146. sp--;
  1147. if (ret_value == MP_OBJ_STOP_ITERATION) {
  1148. // Optimize StopIteration
  1149. // TODO: get StopIteration's value
  1150. PUSH(mp_const_none);
  1151. } else {
  1152. PUSH(ret_value);
  1153. }
  1154. // If we injected GeneratorExit downstream, then even
  1155. // if it was swallowed, we re-raise GeneratorExit
  1156. GENERATOR_EXIT_IF_NEEDED(t_exc);
  1157. DISPATCH();
  1158. } else {
  1159. assert(ret_kind == MP_VM_RETURN_EXCEPTION);
  1160. assert(!EXC_MATCH(ret_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration)));
  1161. // Pop exhausted gen
  1162. sp--;
  1163. RAISE(ret_value);
  1164. }
  1165. }
  1166. ENTRY(MP_BC_IMPORT_NAME): {
  1167. FRAME_UPDATE();
  1168. MARK_EXC_IP_SELECTIVE();
  1169. DECODE_QSTR;
  1170. mp_obj_t obj = POP();
  1171. SET_TOP(mp_import_name(qst, obj, TOP()));
  1172. DISPATCH();
  1173. }
  1174. ENTRY(MP_BC_IMPORT_FROM): {
  1175. FRAME_UPDATE();
  1176. MARK_EXC_IP_SELECTIVE();
  1177. DECODE_QSTR;
  1178. mp_obj_t obj = mp_import_from(TOP(), qst);
  1179. PUSH(obj);
  1180. DISPATCH();
  1181. }
  1182. ENTRY(MP_BC_IMPORT_STAR):
  1183. MARK_EXC_IP_SELECTIVE();
  1184. mp_import_all(POP());
  1185. DISPATCH();
  1186. #if MICROPY_OPT_COMPUTED_GOTO
  1187. ENTRY(MP_BC_LOAD_CONST_SMALL_INT_MULTI):
  1188. PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS));
  1189. DISPATCH();
  1190. ENTRY(MP_BC_LOAD_FAST_MULTI):
  1191. obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
  1192. goto load_check;
  1193. ENTRY(MP_BC_STORE_FAST_MULTI):
  1194. fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
  1195. DISPATCH();
  1196. ENTRY(MP_BC_UNARY_OP_MULTI):
  1197. MARK_EXC_IP_SELECTIVE();
  1198. SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
  1199. DISPATCH();
  1200. ENTRY(MP_BC_BINARY_OP_MULTI): {
  1201. MARK_EXC_IP_SELECTIVE();
  1202. mp_obj_t rhs = POP();
  1203. mp_obj_t lhs = TOP();
  1204. SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
  1205. DISPATCH();
  1206. }
  1207. ENTRY_DEFAULT:
  1208. MARK_EXC_IP_SELECTIVE();
  1209. #else
  1210. ENTRY_DEFAULT:
  1211. if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM) {
  1212. PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS));
  1213. DISPATCH();
  1214. } else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + MP_BC_LOAD_FAST_MULTI_NUM) {
  1215. obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
  1216. goto load_check;
  1217. } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + MP_BC_STORE_FAST_MULTI_NUM) {
  1218. fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
  1219. DISPATCH();
  1220. } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + MP_BC_UNARY_OP_MULTI_NUM) {
  1221. SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
  1222. DISPATCH();
  1223. } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + MP_BC_BINARY_OP_MULTI_NUM) {
  1224. mp_obj_t rhs = POP();
  1225. mp_obj_t lhs = TOP();
  1226. SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
  1227. DISPATCH();
  1228. } else
  1229. #endif
  1230. {
  1231. mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NotImplementedError, MP_ERROR_TEXT("opcode"));
  1232. nlr_pop();
  1233. code_state->state[0] = obj;
  1234. FRAME_LEAVE();
  1235. return MP_VM_RETURN_EXCEPTION;
  1236. }
  1237. #if !MICROPY_OPT_COMPUTED_GOTO
  1238. } // switch
  1239. #endif
  1240. pending_exception_check:
  1241. MICROPY_VM_HOOK_LOOP
  1242. #if MICROPY_ENABLE_SCHEDULER
  1243. // This is an inlined variant of mp_handle_pending
  1244. if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
  1245. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  1246. // Re-check state is still pending now that we're in the atomic section.
  1247. if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
  1248. MARK_EXC_IP_SELECTIVE();
  1249. mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
  1250. if (obj != MP_OBJ_NULL) {
  1251. MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
  1252. if (!mp_sched_num_pending()) {
  1253. MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
  1254. }
  1255. MICROPY_END_ATOMIC_SECTION(atomic_state);
  1256. RAISE(obj);
  1257. }
  1258. mp_handle_pending_tail(atomic_state);
  1259. } else {
  1260. MICROPY_END_ATOMIC_SECTION(atomic_state);
  1261. }
  1262. }
  1263. #else
  1264. // This is an inlined variant of mp_handle_pending
  1265. if (MP_STATE_VM(mp_pending_exception) != MP_OBJ_NULL) {
  1266. MARK_EXC_IP_SELECTIVE();
  1267. mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
  1268. MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
  1269. RAISE(obj);
  1270. }
  1271. #endif
  1272. #if MICROPY_PY_THREAD_GIL
  1273. #if MICROPY_PY_THREAD_GIL_VM_DIVISOR
  1274. if (--gil_divisor == 0)
  1275. #endif
  1276. {
  1277. #if MICROPY_PY_THREAD_GIL_VM_DIVISOR
  1278. gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR;
  1279. #endif
  1280. #if MICROPY_ENABLE_SCHEDULER
  1281. // can only switch threads if the scheduler is unlocked
  1282. if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE)
  1283. #endif
  1284. {
  1285. MP_THREAD_GIL_EXIT();
  1286. MP_THREAD_GIL_ENTER();
  1287. }
  1288. }
  1289. #endif
  1290. } // for loop
  1291. } else {
  1292. exception_handler:
  1293. // exception occurred
  1294. #if MICROPY_PY_SYS_EXC_INFO
  1295. MP_STATE_VM(cur_exception) = nlr.ret_val;
  1296. #endif
  1297. #if SELECTIVE_EXC_IP
  1298. // with selective ip, we store the ip 1 byte past the opcode, so move ptr back
  1299. code_state->ip -= 1;
  1300. #endif
  1301. if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
  1302. if (code_state->ip) {
  1303. // check if it's a StopIteration within a for block
  1304. if (*code_state->ip == MP_BC_FOR_ITER) {
  1305. const byte *ip = code_state->ip + 1;
  1306. DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
  1307. code_state->ip = ip + ulab; // jump to after for-block
  1308. code_state->sp -= MP_OBJ_ITER_BUF_NSLOTS; // pop the exhausted iterator
  1309. goto outer_dispatch_loop; // continue with dispatch loop
  1310. } else if (*code_state->ip == MP_BC_YIELD_FROM) {
  1311. // StopIteration inside yield from call means return a value of
  1312. // yield from, so inject exception's value as yield from's result
  1313. // (Instead of stack pop then push we just replace exhausted gen with value)
  1314. *code_state->sp = mp_obj_exception_get_value(MP_OBJ_FROM_PTR(nlr.ret_val));
  1315. code_state->ip++; // yield from is over, move to next instruction
  1316. goto outer_dispatch_loop; // continue with dispatch loop
  1317. }
  1318. }
  1319. }
  1320. #if MICROPY_PY_SYS_SETTRACE
  1321. // Exceptions are traced here
  1322. if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_Exception))) {
  1323. TRACE_TICK(code_state->ip, code_state->sp, true /* yes, it's an exception */);
  1324. }
  1325. #endif
  1326. #if MICROPY_STACKLESS
  1327. unwind_loop:
  1328. #endif
  1329. // Set traceback info (file and line number) where the exception occurred, but not for:
  1330. // - constant GeneratorExit object, because it's const
  1331. // - exceptions re-raised by END_FINALLY
  1332. // - exceptions re-raised explicitly by "raise"
  1333. if (nlr.ret_val != &mp_const_GeneratorExit_obj
  1334. && *code_state->ip != MP_BC_END_FINALLY
  1335. && *code_state->ip != MP_BC_RAISE_LAST) {
  1336. const byte *ip = code_state->fun_bc->bytecode;
  1337. MP_BC_PRELUDE_SIG_DECODE(ip);
  1338. MP_BC_PRELUDE_SIZE_DECODE(ip);
  1339. const byte *bytecode_start = ip + n_info + n_cell;
  1340. #if !MICROPY_PERSISTENT_CODE
  1341. // so bytecode is aligned
  1342. bytecode_start = MP_ALIGN(bytecode_start, sizeof(mp_uint_t));
  1343. #endif
  1344. size_t bc = code_state->ip - bytecode_start;
  1345. #if MICROPY_PERSISTENT_CODE
  1346. qstr block_name = ip[0] | (ip[1] << 8);
  1347. qstr source_file = ip[2] | (ip[3] << 8);
  1348. ip += 4;
  1349. #else
  1350. qstr block_name = mp_decode_uint_value(ip);
  1351. ip = mp_decode_uint_skip(ip);
  1352. qstr source_file = mp_decode_uint_value(ip);
  1353. ip = mp_decode_uint_skip(ip);
  1354. #endif
  1355. size_t source_line = mp_bytecode_get_source_line(ip, bc);
  1356. mp_obj_exception_add_traceback(MP_OBJ_FROM_PTR(nlr.ret_val), source_file, source_line, block_name);
  1357. }
  1358. while (exc_sp >= exc_stack && exc_sp->handler <= code_state->ip) {
  1359. // nested exception
  1360. assert(exc_sp >= exc_stack);
  1361. // TODO make a proper message for nested exception
  1362. // at the moment we are just raising the very last exception (the one that caused the nested exception)
  1363. // move up to previous exception handler
  1364. POP_EXC_BLOCK();
  1365. }
  1366. if (exc_sp >= exc_stack) {
  1367. // catch exception and pass to byte code
  1368. code_state->ip = exc_sp->handler;
  1369. mp_obj_t *sp = MP_TAGPTR_PTR(exc_sp->val_sp);
  1370. // save this exception in the stack so it can be used in a reraise, if needed
  1371. exc_sp->prev_exc = nlr.ret_val;
  1372. // push exception object so it can be handled by bytecode
  1373. PUSH(MP_OBJ_FROM_PTR(nlr.ret_val));
  1374. code_state->sp = sp;
  1375. #if MICROPY_STACKLESS
  1376. } else if (code_state->prev != NULL) {
  1377. mp_globals_set(code_state->old_globals);
  1378. mp_code_state_t *new_code_state = code_state->prev;
  1379. #if MICROPY_ENABLE_PYSTACK
  1380. // Free code_state, and args allocated by mp_call_prepare_args_n_kw_var
  1381. // (The latter is implicitly freed when using pystack due to its LIFO nature.)
  1382. // The sizeof in the following statement does not include the size of the variable
  1383. // part of the struct. This arg is anyway not used if pystack is enabled.
  1384. mp_nonlocal_free(code_state, sizeof(mp_code_state_t));
  1385. #endif
  1386. code_state = new_code_state;
  1387. size_t n_state = code_state->n_state;
  1388. fastn = &code_state->state[n_state - 1];
  1389. exc_stack = (mp_exc_stack_t*)(code_state->state + n_state);
  1390. // variables that are visible to the exception handler (declared volatile)
  1391. exc_sp = MP_CODE_STATE_EXC_SP_IDX_TO_PTR(exc_stack, code_state->exc_sp_idx); // stack grows up, exc_sp points to top of stack
  1392. goto unwind_loop;
  1393. #endif
  1394. } else {
  1395. // propagate exception to higher level
  1396. // Note: ip and sp don't have usable values at this point
  1397. code_state->state[0] = MP_OBJ_FROM_PTR(nlr.ret_val); // put exception here because sp is invalid
  1398. FRAME_LEAVE();
  1399. return MP_VM_RETURN_EXCEPTION;
  1400. }
  1401. }
  1402. }
  1403. }