gc.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2013, 2014 Damien P. George
  7. * Copyright (c) 2014 Paul Sokolovsky
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <assert.h>
  28. #include <stdio.h>
  29. #include <string.h>
  30. #include "py/gc.h"
  31. #include "py/runtime.h"
  32. #if MICROPY_ENABLE_GC
  33. #if MICROPY_DEBUG_VERBOSE // print debugging info
  34. #define DEBUG_PRINT (1)
  35. #define DEBUG_printf DEBUG_printf
  36. #else // don't print debugging info
  37. #define DEBUG_PRINT (0)
  38. #define DEBUG_printf(...) (void)0
  39. #endif
  40. // make this 1 to dump the heap each time it changes
  41. #define EXTENSIVE_HEAP_PROFILING (0)
  42. // make this 1 to zero out swept memory to more eagerly
  43. // detect untraced object still in use
  44. #define CLEAR_ON_SWEEP (0)
  45. #define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / BYTES_PER_WORD)
  46. #define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK)
  47. // ATB = allocation table byte
  48. // 0b00 = FREE -- free block
  49. // 0b01 = HEAD -- head of a chain of blocks
  50. // 0b10 = TAIL -- in the tail of a chain of blocks
  51. // 0b11 = MARK -- marked head block
  52. #define AT_FREE (0)
  53. #define AT_HEAD (1)
  54. #define AT_TAIL (2)
  55. #define AT_MARK (3)
  56. #define BLOCKS_PER_ATB (4)
  57. #define ATB_MASK_0 (0x03)
  58. #define ATB_MASK_1 (0x0c)
  59. #define ATB_MASK_2 (0x30)
  60. #define ATB_MASK_3 (0xc0)
  61. #define ATB_0_IS_FREE(a) (((a) & ATB_MASK_0) == 0)
  62. #define ATB_1_IS_FREE(a) (((a) & ATB_MASK_1) == 0)
  63. #define ATB_2_IS_FREE(a) (((a) & ATB_MASK_2) == 0)
  64. #define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0)
  65. #define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
  66. #define ATB_GET_KIND(block) ((MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
  67. #define ATB_ANY_TO_FREE(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
  68. #define ATB_FREE_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
  69. #define ATB_FREE_TO_TAIL(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
  70. #define ATB_HEAD_TO_MARK(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
  71. #define ATB_MARK_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
  72. #define BLOCK_FROM_PTR(ptr) (((byte *)(ptr) - MP_STATE_MEM(gc_pool_start)) / BYTES_PER_BLOCK)
  73. #define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (uintptr_t)MP_STATE_MEM(gc_pool_start)))
  74. #define ATB_FROM_BLOCK(bl) ((bl) / BLOCKS_PER_ATB)
  75. #if MICROPY_ENABLE_FINALISER
  76. // FTB = finaliser table byte
  77. // if set, then the corresponding block may have a finaliser
  78. #define BLOCKS_PER_FTB (8)
  79. #define FTB_GET(block) ((MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
  80. #define FTB_SET(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
  81. #define FTB_CLEAR(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
  82. #endif
  83. #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
  84. #define GC_ENTER() mp_thread_mutex_lock(&MP_STATE_MEM(gc_mutex), 1)
  85. #define GC_EXIT() mp_thread_mutex_unlock(&MP_STATE_MEM(gc_mutex))
  86. #else
  87. #define GC_ENTER()
  88. #define GC_EXIT()
  89. #endif
  90. // TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
  91. void gc_init(void *start, void *end) {
  92. // align end pointer on block boundary
  93. end = (void *)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
  94. DEBUG_printf("Initializing GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte *)end - (byte *)start);
  95. // calculate parameters for GC (T=total, A=alloc table, F=finaliser table, P=pool; all in bytes):
  96. // T = A + F + P
  97. // F = A * BLOCKS_PER_ATB / BLOCKS_PER_FTB
  98. // P = A * BLOCKS_PER_ATB * BYTES_PER_BLOCK
  99. // => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
  100. size_t total_byte_len = (byte *)end - (byte *)start;
  101. #if MICROPY_ENABLE_FINALISER
  102. MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len * BITS_PER_BYTE / (BITS_PER_BYTE + BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
  103. #else
  104. MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
  105. #endif
  106. MP_STATE_MEM(gc_alloc_table_start) = (byte *)start;
  107. #if MICROPY_ENABLE_FINALISER
  108. size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
  109. MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len);
  110. #endif
  111. size_t gc_pool_block_len = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
  112. MP_STATE_MEM(gc_pool_start) = (byte *)end - gc_pool_block_len * BYTES_PER_BLOCK;
  113. MP_STATE_MEM(gc_pool_end) = end;
  114. #if MICROPY_ENABLE_FINALISER
  115. assert(MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len);
  116. #endif
  117. // clear ATBs
  118. memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len));
  119. #if MICROPY_ENABLE_FINALISER
  120. // clear FTBs
  121. memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len);
  122. #endif
  123. // set last free ATB index to start of heap
  124. MP_STATE_MEM(gc_last_free_atb_index) = 0;
  125. // unlock the GC
  126. MP_STATE_MEM(gc_lock_depth) = 0;
  127. // allow auto collection
  128. MP_STATE_MEM(gc_auto_collect_enabled) = 1;
  129. #if MICROPY_GC_ALLOC_THRESHOLD
  130. // by default, maxuint for gc threshold, effectively turning gc-by-threshold off
  131. MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1;
  132. MP_STATE_MEM(gc_alloc_amount) = 0;
  133. #endif
  134. #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
  135. mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex));
  136. #endif
  137. DEBUG_printf("GC layout:\n");
  138. DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_alloc_table_start), MP_STATE_MEM(gc_alloc_table_byte_len), MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB);
  139. #if MICROPY_ENABLE_FINALISER
  140. DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_finaliser_table_start), gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
  141. #endif
  142. DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_pool_start), gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
  143. }
  144. void gc_lock(void) {
  145. GC_ENTER();
  146. MP_STATE_MEM(gc_lock_depth)++;
  147. GC_EXIT();
  148. }
  149. void gc_unlock(void) {
  150. GC_ENTER();
  151. MP_STATE_MEM(gc_lock_depth)--;
  152. GC_EXIT();
  153. }
  154. bool gc_is_locked(void) {
  155. return MP_STATE_MEM(gc_lock_depth) != 0;
  156. }
  157. // ptr should be of type void*
  158. #define VERIFY_PTR(ptr) ( \
  159. ((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
  160. && ptr >= (void *)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \
  161. && ptr < (void *)MP_STATE_MEM(gc_pool_end) /* must be below end of pool */ \
  162. )
  163. #ifndef TRACE_MARK
  164. #if DEBUG_PRINT
  165. #define TRACE_MARK(block, ptr) DEBUG_printf("gc_mark(%p)\n", ptr)
  166. #else
  167. #define TRACE_MARK(block, ptr)
  168. #endif
  169. #endif
  170. // Take the given block as the topmost block on the stack. Check all it's
  171. // children: mark the unmarked child blocks and put those newly marked
  172. // blocks on the stack. When all children have been checked, pop off the
  173. // topmost block on the stack and repeat with that one.
  174. STATIC void gc_mark_subtree(size_t block) {
  175. // Start with the block passed in the argument.
  176. size_t sp = 0;
  177. for (;;) {
  178. // work out number of consecutive blocks in the chain starting with this one
  179. size_t n_blocks = 0;
  180. do {
  181. n_blocks += 1;
  182. } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL);
  183. // check this block's children
  184. void **ptrs = (void **)PTR_FROM_BLOCK(block);
  185. for (size_t i = n_blocks * BYTES_PER_BLOCK / sizeof(void *); i > 0; i--, ptrs++) {
  186. void *ptr = *ptrs;
  187. if (VERIFY_PTR(ptr)) {
  188. // Mark and push this pointer
  189. size_t childblock = BLOCK_FROM_PTR(ptr);
  190. if (ATB_GET_KIND(childblock) == AT_HEAD) {
  191. // an unmarked head, mark it, and push it on gc stack
  192. TRACE_MARK(childblock, ptr);
  193. ATB_HEAD_TO_MARK(childblock);
  194. if (sp < MICROPY_ALLOC_GC_STACK_SIZE) {
  195. MP_STATE_MEM(gc_stack)[sp++] = childblock;
  196. } else {
  197. MP_STATE_MEM(gc_stack_overflow) = 1;
  198. }
  199. }
  200. }
  201. }
  202. // Are there any blocks on the stack?
  203. if (sp == 0) {
  204. break; // No, stack is empty, we're done.
  205. }
  206. // pop the next block off the stack
  207. block = MP_STATE_MEM(gc_stack)[--sp];
  208. }
  209. }
  210. STATIC void gc_deal_with_stack_overflow(void) {
  211. while (MP_STATE_MEM(gc_stack_overflow)) {
  212. MP_STATE_MEM(gc_stack_overflow) = 0;
  213. // scan entire memory looking for blocks which have been marked but not their children
  214. for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
  215. // trace (again) if mark bit set
  216. if (ATB_GET_KIND(block) == AT_MARK) {
  217. gc_mark_subtree(block);
  218. }
  219. }
  220. }
  221. }
  222. STATIC void gc_sweep(void) {
  223. #if MICROPY_PY_GC_COLLECT_RETVAL
  224. MP_STATE_MEM(gc_collected) = 0;
  225. #endif
  226. // free unmarked heads and their tails
  227. int free_tail = 0;
  228. for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
  229. switch (ATB_GET_KIND(block)) {
  230. case AT_HEAD:
  231. #if MICROPY_ENABLE_FINALISER
  232. if (FTB_GET(block)) {
  233. mp_obj_base_t *obj = (mp_obj_base_t *)PTR_FROM_BLOCK(block);
  234. if (obj->type != NULL) {
  235. // if the object has a type then see if it has a __del__ method
  236. mp_obj_t dest[2];
  237. mp_load_method_maybe(MP_OBJ_FROM_PTR(obj), MP_QSTR___del__, dest);
  238. if (dest[0] != MP_OBJ_NULL) {
  239. // load_method returned a method, execute it in a protected environment
  240. #if MICROPY_ENABLE_SCHEDULER
  241. mp_sched_lock();
  242. #endif
  243. mp_call_function_1_protected(dest[0], dest[1]);
  244. #if MICROPY_ENABLE_SCHEDULER
  245. mp_sched_unlock();
  246. #endif
  247. }
  248. }
  249. // clear finaliser flag
  250. FTB_CLEAR(block);
  251. }
  252. #endif
  253. free_tail = 1;
  254. DEBUG_printf("gc_sweep(%p)\n", PTR_FROM_BLOCK(block));
  255. #if MICROPY_PY_GC_COLLECT_RETVAL
  256. MP_STATE_MEM(gc_collected)++;
  257. #endif
  258. // fall through to free the head
  259. MP_FALLTHROUGH
  260. case AT_TAIL:
  261. if (free_tail) {
  262. ATB_ANY_TO_FREE(block);
  263. #if CLEAR_ON_SWEEP
  264. memset((void *)PTR_FROM_BLOCK(block), 0, BYTES_PER_BLOCK);
  265. #endif
  266. }
  267. break;
  268. case AT_MARK:
  269. ATB_MARK_TO_HEAD(block);
  270. free_tail = 0;
  271. break;
  272. }
  273. }
  274. }
  275. void gc_collect_start(void) {
  276. GC_ENTER();
  277. MP_STATE_MEM(gc_lock_depth)++;
  278. #if MICROPY_GC_ALLOC_THRESHOLD
  279. MP_STATE_MEM(gc_alloc_amount) = 0;
  280. #endif
  281. MP_STATE_MEM(gc_stack_overflow) = 0;
  282. // Trace root pointers. This relies on the root pointers being organised
  283. // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
  284. // dict_globals, then the root pointer section of mp_state_vm.
  285. void **ptrs = (void **)(void *)&mp_state_ctx;
  286. size_t root_start = offsetof(mp_state_ctx_t, thread.dict_locals);
  287. size_t root_end = offsetof(mp_state_ctx_t, vm.qstr_last_chunk);
  288. gc_collect_root(ptrs + root_start / sizeof(void *), (root_end - root_start) / sizeof(void *));
  289. #if MICROPY_ENABLE_PYSTACK
  290. // Trace root pointers from the Python stack.
  291. ptrs = (void **)(void *)MP_STATE_THREAD(pystack_start);
  292. gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void *));
  293. #endif
  294. }
  295. void gc_collect_root(void **ptrs, size_t len) {
  296. for (size_t i = 0; i < len; i++) {
  297. void *ptr = ptrs[i];
  298. if (VERIFY_PTR(ptr)) {
  299. size_t block = BLOCK_FROM_PTR(ptr);
  300. if (ATB_GET_KIND(block) == AT_HEAD) {
  301. // An unmarked head: mark it, and mark all its children
  302. TRACE_MARK(block, ptr);
  303. ATB_HEAD_TO_MARK(block);
  304. gc_mark_subtree(block);
  305. }
  306. }
  307. }
  308. }
  309. void gc_collect_end(void) {
  310. gc_deal_with_stack_overflow();
  311. gc_sweep();
  312. MP_STATE_MEM(gc_last_free_atb_index) = 0;
  313. MP_STATE_MEM(gc_lock_depth)--;
  314. GC_EXIT();
  315. }
  316. void gc_sweep_all(void) {
  317. GC_ENTER();
  318. MP_STATE_MEM(gc_lock_depth)++;
  319. MP_STATE_MEM(gc_stack_overflow) = 0;
  320. gc_collect_end();
  321. }
  322. void gc_info(gc_info_t *info) {
  323. GC_ENTER();
  324. info->total = MP_STATE_MEM(gc_pool_end) - MP_STATE_MEM(gc_pool_start);
  325. info->used = 0;
  326. info->free = 0;
  327. info->max_free = 0;
  328. info->num_1block = 0;
  329. info->num_2block = 0;
  330. info->max_block = 0;
  331. bool finish = false;
  332. for (size_t block = 0, len = 0, len_free = 0; !finish;) {
  333. size_t kind = ATB_GET_KIND(block);
  334. switch (kind) {
  335. case AT_FREE:
  336. info->free += 1;
  337. len_free += 1;
  338. len = 0;
  339. break;
  340. case AT_HEAD:
  341. info->used += 1;
  342. len = 1;
  343. break;
  344. case AT_TAIL:
  345. info->used += 1;
  346. len += 1;
  347. break;
  348. case AT_MARK:
  349. // shouldn't happen
  350. break;
  351. }
  352. block++;
  353. finish = (block == MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB);
  354. // Get next block type if possible
  355. if (!finish) {
  356. kind = ATB_GET_KIND(block);
  357. }
  358. if (finish || kind == AT_FREE || kind == AT_HEAD) {
  359. if (len == 1) {
  360. info->num_1block += 1;
  361. } else if (len == 2) {
  362. info->num_2block += 1;
  363. }
  364. if (len > info->max_block) {
  365. info->max_block = len;
  366. }
  367. if (finish || kind == AT_HEAD) {
  368. if (len_free > info->max_free) {
  369. info->max_free = len_free;
  370. }
  371. len_free = 0;
  372. }
  373. }
  374. }
  375. info->used *= BYTES_PER_BLOCK;
  376. info->free *= BYTES_PER_BLOCK;
  377. GC_EXIT();
  378. }
  379. void *gc_alloc(size_t n_bytes, unsigned int alloc_flags) {
  380. bool has_finaliser = alloc_flags & GC_ALLOC_FLAG_HAS_FINALISER;
  381. size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK;
  382. DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
  383. // check for 0 allocation
  384. if (n_blocks == 0) {
  385. return NULL;
  386. }
  387. GC_ENTER();
  388. // check if GC is locked
  389. if (MP_STATE_MEM(gc_lock_depth) > 0) {
  390. GC_EXIT();
  391. return NULL;
  392. }
  393. size_t i;
  394. size_t end_block;
  395. size_t start_block;
  396. size_t n_free;
  397. int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
  398. #if MICROPY_GC_ALLOC_THRESHOLD
  399. if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) {
  400. GC_EXIT();
  401. gc_collect();
  402. collected = 1;
  403. GC_ENTER();
  404. }
  405. #endif
  406. for (;;) {
  407. // look for a run of n_blocks available blocks
  408. n_free = 0;
  409. for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) {
  410. byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
  411. // *FORMAT-OFF*
  412. if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
  413. if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
  414. if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
  415. if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
  416. // *FORMAT-ON*
  417. }
  418. GC_EXIT();
  419. // nothing found!
  420. if (collected) {
  421. return NULL;
  422. }
  423. DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes);
  424. gc_collect();
  425. collected = 1;
  426. GC_ENTER();
  427. }
  428. // found, ending at block i inclusive
  429. found:
  430. // get starting and end blocks, both inclusive
  431. end_block = i;
  432. start_block = i - n_free + 1;
  433. // Set last free ATB index to block after last block we found, for start of
  434. // next scan. To reduce fragmentation, we only do this if we were looking
  435. // for a single free block, which guarantees that there are no free blocks
  436. // before this one. Also, whenever we free or shink a block we must check
  437. // if this index needs adjusting (see gc_realloc and gc_free).
  438. if (n_free == 1) {
  439. MP_STATE_MEM(gc_last_free_atb_index) = (i + 1) / BLOCKS_PER_ATB;
  440. }
  441. // mark first block as used head
  442. ATB_FREE_TO_HEAD(start_block);
  443. // mark rest of blocks as used tail
  444. // TODO for a run of many blocks can make this more efficient
  445. for (size_t bl = start_block + 1; bl <= end_block; bl++) {
  446. ATB_FREE_TO_TAIL(bl);
  447. }
  448. // get pointer to first block
  449. // we must create this pointer before unlocking the GC so a collection can find it
  450. void *ret_ptr = (void *)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
  451. DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
  452. #if MICROPY_GC_ALLOC_THRESHOLD
  453. MP_STATE_MEM(gc_alloc_amount) += n_blocks;
  454. #endif
  455. GC_EXIT();
  456. #if MICROPY_GC_CONSERVATIVE_CLEAR
  457. // be conservative and zero out all the newly allocated blocks
  458. memset((byte *)ret_ptr, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK);
  459. #else
  460. // zero out the additional bytes of the newly allocated blocks
  461. // This is needed because the blocks may have previously held pointers
  462. // to the heap and will not be set to something else if the caller
  463. // doesn't actually use the entire block. As such they will continue
  464. // to point to the heap and may prevent other blocks from being reclaimed.
  465. memset((byte *)ret_ptr + n_bytes, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK - n_bytes);
  466. #endif
  467. #if MICROPY_ENABLE_FINALISER
  468. if (has_finaliser) {
  469. // clear type pointer in case it is never set
  470. ((mp_obj_base_t *)ret_ptr)->type = NULL;
  471. // set mp_obj flag only if it has a finaliser
  472. GC_ENTER();
  473. FTB_SET(start_block);
  474. GC_EXIT();
  475. }
  476. #else
  477. (void)has_finaliser;
  478. #endif
  479. #if EXTENSIVE_HEAP_PROFILING
  480. gc_dump_alloc_table();
  481. #endif
  482. return ret_ptr;
  483. }
  484. /*
  485. void *gc_alloc(mp_uint_t n_bytes) {
  486. return _gc_alloc(n_bytes, false);
  487. }
  488. void *gc_alloc_with_finaliser(mp_uint_t n_bytes) {
  489. return _gc_alloc(n_bytes, true);
  490. }
  491. */
  492. // force the freeing of a piece of memory
  493. // TODO: freeing here does not call finaliser
  494. void gc_free(void *ptr) {
  495. GC_ENTER();
  496. if (MP_STATE_MEM(gc_lock_depth) > 0) {
  497. // TODO how to deal with this error?
  498. GC_EXIT();
  499. return;
  500. }
  501. DEBUG_printf("gc_free(%p)\n", ptr);
  502. if (ptr == NULL) {
  503. GC_EXIT();
  504. } else {
  505. // get the GC block number corresponding to this pointer
  506. assert(VERIFY_PTR(ptr));
  507. size_t block = BLOCK_FROM_PTR(ptr);
  508. assert(ATB_GET_KIND(block) == AT_HEAD);
  509. #if MICROPY_ENABLE_FINALISER
  510. FTB_CLEAR(block);
  511. #endif
  512. // set the last_free pointer to this block if it's earlier in the heap
  513. if (block / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
  514. MP_STATE_MEM(gc_last_free_atb_index) = block / BLOCKS_PER_ATB;
  515. }
  516. // free head and all of its tail blocks
  517. do {
  518. ATB_ANY_TO_FREE(block);
  519. block += 1;
  520. } while (ATB_GET_KIND(block) == AT_TAIL);
  521. GC_EXIT();
  522. #if EXTENSIVE_HEAP_PROFILING
  523. gc_dump_alloc_table();
  524. #endif
  525. }
  526. }
  527. size_t gc_nbytes(const void *ptr) {
  528. GC_ENTER();
  529. if (VERIFY_PTR(ptr)) {
  530. size_t block = BLOCK_FROM_PTR(ptr);
  531. if (ATB_GET_KIND(block) == AT_HEAD) {
  532. // work out number of consecutive blocks in the chain starting with this on
  533. size_t n_blocks = 0;
  534. do {
  535. n_blocks += 1;
  536. } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL);
  537. GC_EXIT();
  538. return n_blocks * BYTES_PER_BLOCK;
  539. }
  540. }
  541. // invalid pointer
  542. GC_EXIT();
  543. return 0;
  544. }
  545. #if 0
  546. // old, simple realloc that didn't expand memory in place
  547. void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
  548. mp_uint_t n_existing = gc_nbytes(ptr);
  549. if (n_bytes <= n_existing) {
  550. return ptr;
  551. } else {
  552. bool has_finaliser;
  553. if (ptr == NULL) {
  554. has_finaliser = false;
  555. } else {
  556. #if MICROPY_ENABLE_FINALISER
  557. has_finaliser = FTB_GET(BLOCK_FROM_PTR((mp_uint_t)ptr));
  558. #else
  559. has_finaliser = false;
  560. #endif
  561. }
  562. void *ptr2 = gc_alloc(n_bytes, has_finaliser);
  563. if (ptr2 == NULL) {
  564. return ptr2;
  565. }
  566. memcpy(ptr2, ptr, n_existing);
  567. gc_free(ptr);
  568. return ptr2;
  569. }
  570. }
  571. #else // Alternative gc_realloc impl
  572. void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
  573. // check for pure allocation
  574. if (ptr_in == NULL) {
  575. return gc_alloc(n_bytes, false);
  576. }
  577. // check for pure free
  578. if (n_bytes == 0) {
  579. gc_free(ptr_in);
  580. return NULL;
  581. }
  582. void *ptr = ptr_in;
  583. GC_ENTER();
  584. if (MP_STATE_MEM(gc_lock_depth) > 0) {
  585. GC_EXIT();
  586. return NULL;
  587. }
  588. // get the GC block number corresponding to this pointer
  589. assert(VERIFY_PTR(ptr));
  590. size_t block = BLOCK_FROM_PTR(ptr);
  591. assert(ATB_GET_KIND(block) == AT_HEAD);
  592. // compute number of new blocks that are requested
  593. size_t new_blocks = (n_bytes + BYTES_PER_BLOCK - 1) / BYTES_PER_BLOCK;
  594. // Get the total number of consecutive blocks that are already allocated to
  595. // this chunk of memory, and then count the number of free blocks following
  596. // it. Stop if we reach the end of the heap, or if we find enough extra
  597. // free blocks to satisfy the realloc. Note that we need to compute the
  598. // total size of the existing memory chunk so we can correctly and
  599. // efficiently shrink it (see below for shrinking code).
  600. size_t n_free = 0;
  601. size_t n_blocks = 1; // counting HEAD block
  602. size_t max_block = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
  603. for (size_t bl = block + n_blocks; bl < max_block; bl++) {
  604. byte block_type = ATB_GET_KIND(bl);
  605. if (block_type == AT_TAIL) {
  606. n_blocks++;
  607. continue;
  608. }
  609. if (block_type == AT_FREE) {
  610. n_free++;
  611. if (n_blocks + n_free >= new_blocks) {
  612. // stop as soon as we find enough blocks for n_bytes
  613. break;
  614. }
  615. continue;
  616. }
  617. break;
  618. }
  619. // return original ptr if it already has the requested number of blocks
  620. if (new_blocks == n_blocks) {
  621. GC_EXIT();
  622. return ptr_in;
  623. }
  624. // check if we can shrink the allocated area
  625. if (new_blocks < n_blocks) {
  626. // free unneeded tail blocks
  627. for (size_t bl = block + new_blocks, count = n_blocks - new_blocks; count > 0; bl++, count--) {
  628. ATB_ANY_TO_FREE(bl);
  629. }
  630. // set the last_free pointer to end of this block if it's earlier in the heap
  631. if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
  632. MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB;
  633. }
  634. GC_EXIT();
  635. #if EXTENSIVE_HEAP_PROFILING
  636. gc_dump_alloc_table();
  637. #endif
  638. return ptr_in;
  639. }
  640. // check if we can expand in place
  641. if (new_blocks <= n_blocks + n_free) {
  642. // mark few more blocks as used tail
  643. for (size_t bl = block + n_blocks; bl < block + new_blocks; bl++) {
  644. assert(ATB_GET_KIND(bl) == AT_FREE);
  645. ATB_FREE_TO_TAIL(bl);
  646. }
  647. GC_EXIT();
  648. #if MICROPY_GC_CONSERVATIVE_CLEAR
  649. // be conservative and zero out all the newly allocated blocks
  650. memset((byte *)ptr_in + n_blocks * BYTES_PER_BLOCK, 0, (new_blocks - n_blocks) * BYTES_PER_BLOCK);
  651. #else
  652. // zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc)
  653. memset((byte *)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes);
  654. #endif
  655. #if EXTENSIVE_HEAP_PROFILING
  656. gc_dump_alloc_table();
  657. #endif
  658. return ptr_in;
  659. }
  660. #if MICROPY_ENABLE_FINALISER
  661. bool ftb_state = FTB_GET(block);
  662. #else
  663. bool ftb_state = false;
  664. #endif
  665. GC_EXIT();
  666. if (!allow_move) {
  667. // not allowed to move memory block so return failure
  668. return NULL;
  669. }
  670. // can't resize inplace; try to find a new contiguous chain
  671. void *ptr_out = gc_alloc(n_bytes, ftb_state);
  672. // check that the alloc succeeded
  673. if (ptr_out == NULL) {
  674. return NULL;
  675. }
  676. DEBUG_printf("gc_realloc(%p -> %p)\n", ptr_in, ptr_out);
  677. memcpy(ptr_out, ptr_in, n_blocks * BYTES_PER_BLOCK);
  678. gc_free(ptr_in);
  679. return ptr_out;
  680. }
  681. #endif // Alternative gc_realloc impl
  682. void gc_dump_info(void) {
  683. gc_info_t info;
  684. gc_info(&info);
  685. mp_printf(&mp_plat_print, "GC: total: %u, used: %u, free: %u\n",
  686. (uint)info.total, (uint)info.used, (uint)info.free);
  687. mp_printf(&mp_plat_print, " No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u, max free sz: %u\n",
  688. (uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block, (uint)info.max_free);
  689. }
  690. void gc_dump_alloc_table(void) {
  691. GC_ENTER();
  692. static const size_t DUMP_BYTES_PER_LINE = 64;
  693. #if !EXTENSIVE_HEAP_PROFILING
  694. // When comparing heap output we don't want to print the starting
  695. // pointer of the heap because it changes from run to run.
  696. mp_printf(&mp_plat_print, "GC memory layout; from %p:", MP_STATE_MEM(gc_pool_start));
  697. #endif
  698. for (size_t bl = 0; bl < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; bl++) {
  699. if (bl % DUMP_BYTES_PER_LINE == 0) {
  700. // a new line of blocks
  701. {
  702. // check if this line contains only free blocks
  703. size_t bl2 = bl;
  704. while (bl2 < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB && ATB_GET_KIND(bl2) == AT_FREE) {
  705. bl2++;
  706. }
  707. if (bl2 - bl >= 2 * DUMP_BYTES_PER_LINE) {
  708. // there are at least 2 lines containing only free blocks, so abbreviate their printing
  709. mp_printf(&mp_plat_print, "\n (%u lines all free)", (uint)(bl2 - bl) / DUMP_BYTES_PER_LINE);
  710. bl = bl2 & (~(DUMP_BYTES_PER_LINE - 1));
  711. if (bl >= MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB) {
  712. // got to end of heap
  713. break;
  714. }
  715. }
  716. }
  717. // print header for new line of blocks
  718. // (the cast to uint32_t is for 16-bit ports)
  719. // mp_printf(&mp_plat_print, "\n%05x: ", (uint)(PTR_FROM_BLOCK(bl) & (uint32_t)0xfffff));
  720. mp_printf(&mp_plat_print, "\n%05x: ", (uint)((bl * BYTES_PER_BLOCK) & (uint32_t)0xfffff));
  721. }
  722. int c = ' ';
  723. switch (ATB_GET_KIND(bl)) {
  724. case AT_FREE:
  725. c = '.';
  726. break;
  727. /* this prints out if the object is reachable from BSS or STACK (for unix only)
  728. case AT_HEAD: {
  729. c = 'h';
  730. void **ptrs = (void**)(void*)&mp_state_ctx;
  731. mp_uint_t len = offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(mp_uint_t);
  732. for (mp_uint_t i = 0; i < len; i++) {
  733. mp_uint_t ptr = (mp_uint_t)ptrs[i];
  734. if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
  735. c = 'B';
  736. break;
  737. }
  738. }
  739. if (c == 'h') {
  740. ptrs = (void**)&c;
  741. len = ((mp_uint_t)MP_STATE_THREAD(stack_top) - (mp_uint_t)&c) / sizeof(mp_uint_t);
  742. for (mp_uint_t i = 0; i < len; i++) {
  743. mp_uint_t ptr = (mp_uint_t)ptrs[i];
  744. if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
  745. c = 'S';
  746. break;
  747. }
  748. }
  749. }
  750. break;
  751. }
  752. */
  753. /* this prints the uPy object type of the head block */
  754. case AT_HEAD: {
  755. void **ptr = (void **)(MP_STATE_MEM(gc_pool_start) + bl * BYTES_PER_BLOCK);
  756. if (*ptr == &mp_type_tuple) {
  757. c = 'T';
  758. } else if (*ptr == &mp_type_list) {
  759. c = 'L';
  760. } else if (*ptr == &mp_type_dict) {
  761. c = 'D';
  762. } else if (*ptr == &mp_type_str || *ptr == &mp_type_bytes) {
  763. c = 'S';
  764. }
  765. #if MICROPY_PY_BUILTINS_BYTEARRAY
  766. else if (*ptr == &mp_type_bytearray) {
  767. c = 'A';
  768. }
  769. #endif
  770. #if MICROPY_PY_ARRAY
  771. else if (*ptr == &mp_type_array) {
  772. c = 'A';
  773. }
  774. #endif
  775. #if MICROPY_PY_BUILTINS_FLOAT
  776. else if (*ptr == &mp_type_float) {
  777. c = 'F';
  778. }
  779. #endif
  780. else if (*ptr == &mp_type_fun_bc) {
  781. c = 'B';
  782. } else if (*ptr == &mp_type_module) {
  783. c = 'M';
  784. } else {
  785. c = 'h';
  786. #if 0
  787. // This code prints "Q" for qstr-pool data, and "q" for qstr-str
  788. // data. It can be useful to see how qstrs are being allocated,
  789. // but is disabled by default because it is very slow.
  790. for (qstr_pool_t *pool = MP_STATE_VM(last_pool); c == 'h' && pool != NULL; pool = pool->prev) {
  791. if ((qstr_pool_t *)ptr == pool) {
  792. c = 'Q';
  793. break;
  794. }
  795. for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
  796. if ((const byte *)ptr == *q) {
  797. c = 'q';
  798. break;
  799. }
  800. }
  801. }
  802. #endif
  803. }
  804. break;
  805. }
  806. case AT_TAIL:
  807. c = '=';
  808. break;
  809. case AT_MARK:
  810. c = 'm';
  811. break;
  812. }
  813. mp_printf(&mp_plat_print, "%c", c);
  814. }
  815. mp_print_str(&mp_plat_print, "\n");
  816. GC_EXIT();
  817. }
  818. #if 0
  819. // For testing the GC functions
  820. void gc_test(void) {
  821. mp_uint_t len = 500;
  822. mp_uint_t *heap = malloc(len);
  823. gc_init(heap, heap + len / sizeof(mp_uint_t));
  824. void *ptrs[100];
  825. {
  826. mp_uint_t **p = gc_alloc(16, false);
  827. p[0] = gc_alloc(64, false);
  828. p[1] = gc_alloc(1, false);
  829. p[2] = gc_alloc(1, false);
  830. p[3] = gc_alloc(1, false);
  831. mp_uint_t ***p2 = gc_alloc(16, false);
  832. p2[0] = p;
  833. p2[1] = p;
  834. ptrs[0] = p2;
  835. }
  836. for (int i = 0; i < 25; i += 2) {
  837. mp_uint_t *p = gc_alloc(i, false);
  838. printf("p=%p\n", p);
  839. if (i & 3) {
  840. // ptrs[i] = p;
  841. }
  842. }
  843. printf("Before GC:\n");
  844. gc_dump_alloc_table();
  845. printf("Starting GC...\n");
  846. gc_collect_start();
  847. gc_collect_root(ptrs, sizeof(ptrs) / sizeof(void *));
  848. gc_collect_end();
  849. printf("After GC:\n");
  850. gc_dump_alloc_table();
  851. }
  852. #endif
  853. #endif // MICROPY_ENABLE_GC