Przeglądaj źródła

esp-rom: create a patch of tlsf_check() for target(s) supporting ROM implementation of TLSF

The tlsf implementation in the ROM does not provide a mechanism
to register a callback to be called in by tlsf_check().

This commit is creating a patch of the tlsf implementation to provide
a definition of the function allowing to register the callback called
in tlsf_check() and add the call of this callback in tlsf_check().

This patch is only compiled for target(s) with ESP_ROM_HAS_HEAP_TLSF
set and ESP_ROM_TLSF_CHECK_PATCH set. For all the other configurations
the environment remains unchanged by those modifications.
Guillaume Souchere 3 lat temu
rodzic
commit
b9abad7a89

+ 5 - 0
components/esp_rom/CMakeLists.txt

@@ -20,6 +20,11 @@ else()
                         "patches/esp_rom_spiflash.c"
                         "patches/esp_rom_regi2c.c"
                         "patches/esp_rom_efuse.c")
+
+    if(CONFIG_ESP_ROM_HAS_HEAP_TLSF AND CONFIG_ESP_ROM_TLSF_CHECK_PATCH)
+        list(APPEND sources "patches/esp_rom_tlsf.c")
+    endif()
+
     list(APPEND private_required_comp soc hal)
 endif()
 

+ 4 - 0
components/esp_rom/esp32c2/Kconfig.soc_caps.in

@@ -38,3 +38,7 @@ config ESP_ROM_HAS_HAL_SYSTIMER
 config ESP_ROM_HAS_HEAP_TLSF
     bool
     default y
+
+config ESP_ROM_TLSF_CHECK_PATCH
+    bool
+    default y

+ 1 - 0
components/esp_rom/esp32c2/esp_rom_caps.h

@@ -15,3 +15,4 @@
 #define ESP_ROM_HAS_HAL_WDT                 (1) // ROM has the implementation of Watchdog HAL driver
 #define ESP_ROM_HAS_HAL_SYSTIMER            (1) // ROM has the implementation of Systimer HAL driver
 #define ESP_ROM_HAS_HEAP_TLSF               (1) // ROM has the implementation of the tlsf and multi-heap library
+#define ESP_ROM_TLSF_CHECK_PATCH            (1) // ROM does not contain the patch of tlsf_check()

+ 9 - 0
components/esp_rom/include/esp32c2/rom/tlsf.h

@@ -18,6 +18,15 @@ extern "C" {
  */
 void tlsf_poison_fill_pfunc_set(void *pfunc);
 
+/*!
+ * @brief Set the function to call for checking memory region when
+ * poisoning is configured.
+ *
+ * @param pfunc The callback function to trigger for checking
+ * the content of a memory region.
+ */
+void tlsf_poison_check_pfunc_set(void *pfunc);
+
 #ifdef __cplusplus
 }
 #endif

+ 240 - 0
components/esp_rom/patches/esp_rom_tlsf.c

@@ -0,0 +1,240 @@
+/*
+ * SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+/*
+ * This file is a patch for the tlsf implementation stored in ROM
+ * - tlsf_check() now implements a call to a hook giving the user the possibility
+ * to implement specific checks on the memory of every free blocks.
+ * - The function tlsf_poison_check_pfunc_set() was added to allow the user to
+ * register the hook function called in tlsf_check().
+ */
+
+#include <stddef.h>
+#include <stdbool.h>
+
+#include "esp_rom_caps.h"
+#include "rom/tlsf.h"
+
+/* ----------------------------------------------------------------
+ * Bring certain inline functions, macro and structures from the
+ * tlsf ROM implementation to be able to compile the patch.
+ * ---------------------------------------------------------------- */
+
+#define tlsf_cast(t, exp)	((t) (exp))
+
+enum tlsf_config {
+    /* log2 of number of linear subdivisions of block sizes. Larger
+    ** values require more memory in the control structure. Values of
+    ** 4 or 5 are typical.
+    */
+    SL_INDEX_COUNT_LOG2 = 5,
+
+    /* All allocation sizes and addresses are aligned to 4 bytes. */
+    ALIGN_SIZE_LOG2 = 2,
+    ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
+
+/*
+    ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
+    ** However, because we linearly subdivide the second-level lists, and
+    ** our minimum size granularity is 4 bytes, it doesn't make sense to
+    ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
+    ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
+    ** trying to split size ranges into more slots than we have available.
+    ** Instead, we calculate the minimum threshold size, and place all
+    ** blocks below that size into the 0th first-level list.
+    */
+
+    /* Fix the value of FL_INDEX_MAX to match the value that is defined
+     * in the ROM implementation. */
+    FL_INDEX_MAX = 18, //Each pool can have up 256KB
+
+    SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
+    FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
+    FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
+
+    SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
+};
+
+#define block_header_free_bit  (1 << 0)
+#define block_header_prev_free_bit  (1 << 1)
+#define block_header_overhead  (sizeof(size_t))
+#define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t))
+#define block_size_min  (sizeof(block_header_t) - sizeof(block_header_t*))
+
+typedef ptrdiff_t tlsfptr_t;
+
+typedef struct block_header_t
+{
+    /* Points to the previous physical block. */
+    struct block_header_t* prev_phys_block;
+
+    /* The size of this block, excluding the block header. */
+    size_t size;
+
+    /* Next and previous free blocks. */
+    struct block_header_t* next_free;
+    struct block_header_t* prev_free;
+} block_header_t;
+
+/* The TLSF control structure. */
+typedef struct control_t
+{
+    /* Empty lists point at this block to indicate they are free. */
+    block_header_t block_null;
+
+    /* Bitmaps for free lists. */
+    unsigned int fl_bitmap;
+    unsigned int sl_bitmap[FL_INDEX_COUNT];
+
+    /* Head of free lists. */
+    block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
+} control_t;
+
+static inline __attribute__((__always_inline__)) int tlsf_fls(unsigned int word)
+{
+    const int bit = word ? 32 - __builtin_clz(word) : 0;
+    return bit - 1;
+}
+
+static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block)
+{
+    return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
+}
+
+static inline __attribute__((__always_inline__)) int block_is_free(const block_header_t* block)
+{
+    return tlsf_cast(int, block->size & block_header_free_bit);
+}
+
+static inline __attribute__((__always_inline__)) int block_is_prev_free(const block_header_t* block)
+{
+    return tlsf_cast(int, block->size & block_header_prev_free_bit);
+}
+
+static inline __attribute__((__always_inline__)) block_header_t* offset_to_block(const void* ptr, size_t size)
+{
+    return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
+}
+
+static inline __attribute__((__always_inline__)) void* block_to_ptr(const block_header_t* block)
+{
+    return tlsf_cast(void*,
+        tlsf_cast(unsigned char*, block) + block_start_offset);
+}
+
+static inline __attribute__((__always_inline__)) block_header_t* block_next(const block_header_t* block)
+{
+    block_header_t* next = offset_to_block(block_to_ptr(block),
+        block_size(block) - block_header_overhead);
+    return next;
+}
+
+static inline __attribute__((__always_inline__)) void mapping_insert(size_t size, int* fli, int* sli)
+{
+    int fl, sl;
+    if (size < SMALL_BLOCK_SIZE)
+    {
+        /* Store small blocks in first list. */
+        fl = 0;
+        sl = tlsf_cast(int, size) >> 2;
+    }
+    else
+    {
+        fl = tlsf_fls(size);
+        sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
+        fl -= (FL_INDEX_SHIFT - 1);
+    }
+    *fli = fl;
+    *sli = sl;
+}
+
+/* ----------------------------------------------------------------
+ * End of the environment necessary to compile and link the patch
+ * defined below
+ * ---------------------------------------------------------------- */
+
+typedef bool (*poison_check_pfunc_t)(void *start, size_t size, bool is_free, bool print_errors);
+static poison_check_pfunc_t s_poison_check_region = NULL;
+
+void tlsf_poison_check_pfunc_set(void *pfunc)
+{
+    s_poison_check_region = (poison_check_pfunc_t)pfunc;
+}
+
+#define tlsf_insist_no_assert(x) { if (!(x)) { status--; } }
+
+int tlsf_check(void* tlsf)
+{
+    int i, j;
+
+    control_t* control = tlsf_cast(control_t*, tlsf);
+    int status = 0;
+
+    /* Check that the free lists and bitmaps are accurate. */
+    for (i = 0; i < FL_INDEX_COUNT; ++i)
+    {
+        for (j = 0; j < SL_INDEX_COUNT; ++j)
+        {
+            const int fl_map = control->fl_bitmap & (1 << i);
+            const int sl_list = control->sl_bitmap[i];
+            const int sl_map = sl_list & (1 << j);
+            const block_header_t* block = control->blocks[i][j];
+
+            /* Check that first- and second-level lists agree. */
+            if (!fl_map)
+            {
+                tlsf_insist_no_assert(!sl_map && "second-level map must be null");
+            }
+
+            if (!sl_map)
+            {
+                tlsf_insist_no_assert(block == &control->block_null && "block list must be null");
+                continue;
+            }
+
+            /* Check that there is at least one free block. */
+            tlsf_insist_no_assert(sl_list && "no free blocks in second-level map");
+            tlsf_insist_no_assert(block != &control->block_null && "block should not be null");
+
+            while (block != &control->block_null)
+            {
+                int fli, sli;
+                const bool is_block_free = block_is_free(block);
+                tlsf_insist_no_assert(is_block_free && "block should be free");
+                tlsf_insist_no_assert(!block_is_prev_free(block) && "blocks should have coalesced");
+                tlsf_insist_no_assert(!block_is_free(block_next(block)) && "blocks should have coalesced");
+                tlsf_insist_no_assert(block_is_prev_free(block_next(block)) && "block should be free");
+                tlsf_insist_no_assert(block_size(block) >= block_size_min && "block not minimum size");
+
+                mapping_insert(block_size(block), &fli, &sli);
+                tlsf_insist_no_assert(fli == i && sli == j && "block size indexed in wrong list");
+                block = block->next_free;
+
+                    /* block_size(block) returns the size of the usable memory when the block is allocated.
+                     * As the block under test is free, we need to subtract to the block size the next_free
+                     * and prev_free fields of the block header as they are not a part of the usable memory
+                     * when the block is free. In addition, we also need to subtract the size of prev_phys_block
+                     * as this field is in fact part of the current free block and not part of the next (allocated)
+                     * block. Check the comments in block_split function for more details.
+                     */
+                    const size_t actual_free_block_size = block_size(block)
+                                                            - offsetof(block_header_t, next_free)
+                                                            - block_header_overhead;
+
+                if (s_poison_check_region != NULL) {
+                   tlsf_insist_no_assert(s_poison_check_region((char *)block + sizeof(block_header_t),
+                                                    actual_free_block_size, is_block_free, true /* print errors */));
+                }
+
+
+            }
+        }
+    }
+
+    return status;
+}
+
+#undef tlsf_insist_no_assert

+ 3 - 2
components/heap/multi_heap_poisoning.c

@@ -25,8 +25,8 @@
 #include "tlsf.h"
 #else
 /* Header containing the declaration of tlsf_poison_fill_pfunc_set()
- * used to register multi_heap_internal_poison_fill_region() as a
- * callback to fill memory region with given patterns in the heap
+ * and tlsf_poison_check_pfunc_set() used to register callbacks to
+ * fill and check memory region with given patterns in the heap
  * components.
  */
 #include "rom/tlsf.h"
@@ -361,6 +361,7 @@ multi_heap_handle_t multi_heap_register(void *start, size_t size)
 #endif
 #ifdef CONFIG_HEAP_TLSF_USE_ROM_IMPL
     tlsf_poison_fill_pfunc_set(multi_heap_internal_poison_fill_region);
+    tlsf_poison_check_pfunc_set(multi_heap_internal_check_block_poisoning);
 #endif
     return multi_heap_register_impl(start, size);
 }