Просмотр исходного кода

apptrace: Adds ESP32-C3 support

Alexey Gerenkov 4 лет назад
Родитель
Сommit
20fd09728f

+ 4 - 0
components/app_trace/CMakeLists.txt

@@ -16,6 +16,10 @@ if(CONFIG_APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE)
         list(APPEND srcs
             "port/xtensa/port.c")
     endif()
+    if(CONFIG_IDF_TARGET_ARCH_RISCV)
+        list(APPEND srcs
+            "port/riscv/port.c")
+    endif()
 endif()
 
 if(CONFIG_APPTRACE_SV_ENABLE)

+ 1 - 1
components/app_trace/Kconfig

@@ -68,7 +68,7 @@ menu "Application Level Tracing"
         depends on APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE && !APPTRACE_DEST_TRAX
         default 16384
         help
-            Size of the memory buffer for trace datats in bytes.
+            Size of the memory buffer for trace data in bytes.
 
     config APPTRACE_PENDING_DATA_SIZE_MAX
         int "Size of the pending data buffer"

+ 374 - 0
components/app_trace/port/riscv/port.c

@@ -0,0 +1,374 @@
+#include "esp_log.h"
+#include "esp_app_trace_membufs_proto.h"
+#include "esp_app_trace_port.h"
+
+/** RISCV HW transport data */
+typedef struct {
+    uint8_t                             inited; // initialization state flags for every core
+#if CONFIG_APPTRACE_LOCK_ENABLE
+    esp_apptrace_lock_t                 lock;   // sync lock
+#endif
+    esp_apptrace_membufs_proto_data_t   membufs;
+} esp_apptrace_riscv_data_t;
+
+/** RISCV memory host iface control block */
+typedef struct {
+    uint32_t                    ctrl;
+    // - Guard field. If this register is not zero then CPU is changing this struct and
+    //   this guard field holds address of the instruction which application will execute when CPU finishes with those modifications.
+    uint32_t                    stat;
+    esp_apptrace_mem_block_t *  mem_blocks;
+} esp_apptrace_riscv_ctrl_block_t;
+
+#define RISCV_APPTRACE_SYSNR    0x64
+
+#define ESP_APPTRACE_RISCV_BLOCK_LEN_MSK         0x7FFFUL
+#define ESP_APPTRACE_RISCV_BLOCK_LEN(_l_)        ((_l_) & ESP_APPTRACE_RISCV_BLOCK_LEN_MSK)
+#define ESP_APPTRACE_RISCV_BLOCK_LEN_GET(_v_)    ((_v_) & ESP_APPTRACE_RISCV_BLOCK_LEN_MSK)
+#define ESP_APPTRACE_RISCV_BLOCK_ID_MSK          0x7FUL
+#define ESP_APPTRACE_RISCV_BLOCK_ID(_id_)        (((_id_) & ESP_APPTRACE_RISCV_BLOCK_ID_MSK) << 15)
+#define ESP_APPTRACE_RISCV_BLOCK_ID_GET(_v_)     (((_v_) >> 15) & ESP_APPTRACE_RISCV_BLOCK_ID_MSK)
+#define ESP_APPTRACE_RISCV_HOST_DATA             (1 << 22)
+#define ESP_APPTRACE_RISCV_HOST_CONNECT          (1 << 23)
+
+#define ESP_APPTRACE_RISCV_INITED(_hw_)          ((_hw_)->inited & (1 << 0/*cpu_hal_get_core_id()*/))
+
+static esp_err_t esp_apptrace_riscv_init(esp_apptrace_riscv_data_t *hw_data);
+static esp_err_t esp_apptrace_riscv_flush(esp_apptrace_riscv_data_t *hw_data, esp_apptrace_tmo_t *tmo);
+static esp_err_t esp_apptrace_riscv_flush_nolock(esp_apptrace_riscv_data_t *hw_data, uint32_t min_sz, esp_apptrace_tmo_t *tmo);
+static uint8_t *esp_apptrace_riscv_up_buffer_get(esp_apptrace_riscv_data_t *hw_data, uint32_t size, esp_apptrace_tmo_t *tmo);
+static esp_err_t esp_apptrace_riscv_up_buffer_put(esp_apptrace_riscv_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo);
+static void esp_apptrace_riscv_down_buffer_config(esp_apptrace_riscv_data_t *hw_data, uint8_t *buf, uint32_t size);
+static uint8_t *esp_apptrace_riscv_down_buffer_get(esp_apptrace_riscv_data_t *hw_data, uint32_t *size, esp_apptrace_tmo_t *tmo);
+static esp_err_t esp_apptrace_riscv_down_buffer_put(esp_apptrace_riscv_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo);
+static bool esp_apptrace_riscv_host_is_connected(esp_apptrace_riscv_data_t *hw_data);
+static esp_err_t esp_apptrace_riscv_buffer_swap_start(uint32_t curr_block_id);
+static esp_err_t esp_apptrace_riscv_buffer_swap(uint32_t new_block_id);
+static esp_err_t esp_apptrace_riscv_buffer_swap_end(uint32_t new_block_id, uint32_t prev_block_len);
+static bool esp_apptrace_riscv_host_data_pending(void);
+
+
+const static char *TAG = "esp_apptrace";
+
+static esp_apptrace_riscv_ctrl_block_t s_tracing_ctrl[portNUM_PROCESSORS];
+
+esp_apptrace_hw_t *esp_apptrace_uart_hw_get(int num, void **data)
+{
+    return NULL;
+}
+
+esp_apptrace_hw_t *esp_apptrace_jtag_hw_get(void **data)
+{
+#if CONFIG_APPTRACE_DEST_JTAG
+    static esp_apptrace_membufs_proto_hw_t s_trace_proto_hw = {
+        .swap_start = esp_apptrace_riscv_buffer_swap_start,
+        .swap = esp_apptrace_riscv_buffer_swap,
+        .swap_end = esp_apptrace_riscv_buffer_swap_end,
+        .host_data_pending = esp_apptrace_riscv_host_data_pending,
+    };
+    static esp_apptrace_riscv_data_t s_trace_hw_data = {
+        .membufs = {
+            .hw = &s_trace_proto_hw,
+        },
+    };
+    static esp_apptrace_hw_t s_trace_hw = {
+        .init = (esp_err_t (*)(void *))esp_apptrace_riscv_init,
+        .get_up_buffer = (uint8_t *(*)(void *, uint32_t, esp_apptrace_tmo_t *))esp_apptrace_riscv_up_buffer_get,
+        .put_up_buffer = (esp_err_t (*)(void *, uint8_t *, esp_apptrace_tmo_t *))esp_apptrace_riscv_up_buffer_put,
+        .flush_up_buffer_nolock = (esp_err_t (*)(void *, uint32_t, esp_apptrace_tmo_t *))esp_apptrace_riscv_flush_nolock,
+        .flush_up_buffer = (esp_err_t (*)(void *, esp_apptrace_tmo_t *))esp_apptrace_riscv_flush,
+        .down_buffer_config = (void (*)(void *, uint8_t *, uint32_t ))esp_apptrace_riscv_down_buffer_config,
+        .get_down_buffer = (uint8_t *(*)(void *, uint32_t *, esp_apptrace_tmo_t *))esp_apptrace_riscv_down_buffer_get,
+        .put_down_buffer = (esp_err_t (*)(void *, uint8_t *, esp_apptrace_tmo_t *))esp_apptrace_riscv_down_buffer_put,
+        .host_is_connected = (bool (*)(void *))esp_apptrace_riscv_host_is_connected,
+    };
+    *data = &s_trace_hw_data;
+    return &s_trace_hw;
+#else
+    return NULL;
+#endif
+}
+
+/* Advertises apptrace control block address to host.
+   This function can be overriden with custom implementation,
+   e.g. OpenOCD flasher stub use own implementation of it. */
+__attribute__((weak)) int esp_apptrace_advertise_ctrl_block(void *ctrl_block_addr)
+{
+    register int sys_nr = RISCV_APPTRACE_SYSNR;
+    register int host_ret = 0;
+
+    if (!esp_cpu_in_ocd_debug_mode()) {
+        return 0;
+    }
+    __asm__ volatile ( \
+        ".option push\n" \
+        ".option norvc\n" \
+        "mv a0, %[sys_nr]\n" \
+        "mv a1, %[arg1]\n" \
+        "slli    zero,zero,0x1f\n" \
+        "ebreak\n" \
+        "srai    zero,zero,0x7\n" \
+        "mv %[host_ret], a0\n" \
+    	".option pop\n" \
+        :[host_ret]"=r"(host_ret)
+        :[sys_nr]"r"(sys_nr),[arg1]"r"(ctrl_block_addr):"a0","a1");
+    return host_ret;
+}
+
+/* Returns up buffers config.
+   This function can be overriden with custom implementation,
+   e.g. OpenOCD flasher stub use own implementation of it. */
+__attribute__((weak)) void esp_apptrace_get_up_buffers(esp_apptrace_mem_block_t mem_blocks_cfg[2])
+{
+    static uint8_t s_mem_blocks[2][CONFIG_APPTRACE_BUF_SIZE];
+
+    mem_blocks_cfg[0].start = s_mem_blocks[0];
+    mem_blocks_cfg[0].sz = CONFIG_APPTRACE_BUF_SIZE;
+    mem_blocks_cfg[1].start = s_mem_blocks[1];
+    mem_blocks_cfg[1].sz = CONFIG_APPTRACE_BUF_SIZE;
+}
+
+static esp_err_t esp_apptrace_riscv_lock(esp_apptrace_riscv_data_t *hw_data, esp_apptrace_tmo_t *tmo)
+{
+#if CONFIG_APPTRACE_LOCK_ENABLE
+    esp_err_t ret = esp_apptrace_lock_take(&hw_data->lock, tmo);
+    if (ret != ESP_OK) {
+        return ESP_FAIL;
+    }
+#endif
+    return ESP_OK;
+}
+
+static esp_err_t esp_apptrace_riscv_unlock(esp_apptrace_riscv_data_t *hw_data)
+{
+    esp_err_t ret = ESP_OK;
+#if CONFIG_APPTRACE_LOCK_ENABLE
+    ret = esp_apptrace_lock_give(&hw_data->lock);
+#endif
+    return ret;
+}
+
+/*****************************************************************************************/
+/***************************** Apptrace HW iface *****************************************/
+/*****************************************************************************************/
+
+static esp_err_t esp_apptrace_riscv_init(esp_apptrace_riscv_data_t *hw_data)
+{
+    int core_id = cpu_hal_get_core_id();
+
+    if (hw_data->inited == 0) {
+        esp_apptrace_mem_block_t mem_blocks_cfg[2];
+        esp_apptrace_get_up_buffers(mem_blocks_cfg);
+        esp_err_t res = esp_apptrace_membufs_init(&hw_data->membufs, mem_blocks_cfg);
+        if (res != ESP_OK) {
+            ESP_APPTRACE_LOGE("Failed to init membufs proto (%d)!", res);
+            return res;
+        }
+#if CONFIG_APPTRACE_LOCK_ENABLE
+        esp_apptrace_lock_init(&hw_data->lock);
+#endif
+    }
+    hw_data->inited |= 1 << core_id;
+    ESP_APPTRACE_LOGI("Apptrace initialized on CPU%d. Tracing control block @ %p.", core_id, &s_tracing_ctrl[core_id]);
+    s_tracing_ctrl[core_id].mem_blocks = hw_data->membufs.blocks;
+    for (int i = 0; i < 2; i++) {
+        ESP_APPTRACE_LOGD("Mem buf[%d] %d bytes @ %p (%p/%p)", i,
+            s_tracing_ctrl[core_id].mem_blocks[i].sz, s_tracing_ctrl[core_id].mem_blocks[i].start,
+            &(s_tracing_ctrl[core_id].mem_blocks[i].start), &(s_tracing_ctrl[core_id].mem_blocks[i].sz));
+    }
+    // notify host about control block address
+    int res = esp_apptrace_advertise_ctrl_block(&s_tracing_ctrl[core_id]);
+    assert(res == 0 && "Falied to send config to host!");
+
+    return ESP_OK;
+}
+
+static uint8_t *esp_apptrace_riscv_up_buffer_get(esp_apptrace_riscv_data_t *hw_data, uint32_t size, esp_apptrace_tmo_t *tmo)
+{
+    uint8_t *ptr;
+
+    if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
+        return NULL;
+    }
+    esp_err_t res = esp_apptrace_riscv_lock(hw_data, tmo);
+    if (res != ESP_OK) {
+        return NULL;
+    }
+
+    ptr = esp_apptrace_membufs_up_buffer_get(&hw_data->membufs, size, tmo);
+
+    // now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
+    if (esp_apptrace_riscv_unlock(hw_data) != ESP_OK) {
+        assert(false && "Failed to unlock apptrace data!");
+    }
+    return ptr;
+}
+
+static esp_err_t esp_apptrace_riscv_up_buffer_put(esp_apptrace_riscv_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
+{
+    if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
+        return ESP_ERR_INVALID_STATE;
+    }
+    // Can avoid locking because esp_apptrace_membufs_up_buffer_put() just modifies buffer's header
+    esp_err_t res = esp_apptrace_membufs_up_buffer_put(&hw_data->membufs, ptr, tmo);
+    return res;
+}
+
+static void esp_apptrace_riscv_down_buffer_config(esp_apptrace_riscv_data_t *hw_data, uint8_t *buf, uint32_t size)
+{
+    if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
+        return;
+    }
+    esp_apptrace_membufs_down_buffer_config(&hw_data->membufs, buf, size);
+}
+
+static uint8_t *esp_apptrace_riscv_down_buffer_get(esp_apptrace_riscv_data_t *hw_data, uint32_t *size, esp_apptrace_tmo_t *tmo)
+{
+    uint8_t *ptr;
+
+    if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
+        return NULL;
+    }
+    esp_err_t res = esp_apptrace_riscv_lock(hw_data, tmo);
+    if (res != ESP_OK) {
+        return NULL;
+    }
+
+    ptr = esp_apptrace_membufs_down_buffer_get(&hw_data->membufs, size, tmo);
+
+    // now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
+    if (esp_apptrace_riscv_unlock(hw_data) != ESP_OK) {
+        assert(false && "Failed to unlock apptrace data!");
+    }
+    return ptr;
+}
+
+static esp_err_t esp_apptrace_riscv_down_buffer_put(esp_apptrace_riscv_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
+{
+    if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
+        return ESP_ERR_INVALID_STATE;
+    }
+    // Can avoid locking because esp_apptrace_membufs_down_buffer_put() does nothing
+    /*esp_err_t res = esp_apptrace_riscv_lock(hw_data, tmo);
+    if (res != ESP_OK) {
+        return res;
+    }*/
+
+    esp_err_t res = esp_apptrace_membufs_down_buffer_put(&hw_data->membufs, ptr, tmo);
+
+    // now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
+    /*if (esp_apptrace_riscv_unlock(hw_data) != ESP_OK) {
+        assert(false && "Failed to unlock apptrace data!");
+    }*/
+    return res;
+}
+
+static bool esp_apptrace_riscv_host_is_connected(esp_apptrace_riscv_data_t *hw_data)
+{
+    if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
+        return false;
+    }
+    return s_tracing_ctrl[cpu_hal_get_core_id()].ctrl & ESP_APPTRACE_RISCV_HOST_CONNECT ? true : false;
+}
+
+static esp_err_t esp_apptrace_riscv_flush_nolock(esp_apptrace_riscv_data_t *hw_data, uint32_t min_sz, esp_apptrace_tmo_t *tmo)
+{
+    if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
+        return ESP_ERR_INVALID_STATE;
+    }
+    return esp_apptrace_membufs_flush_nolock(&hw_data->membufs, min_sz, tmo);
+}
+
+static esp_err_t esp_apptrace_riscv_flush(esp_apptrace_riscv_data_t *hw_data, esp_apptrace_tmo_t *tmo)
+{
+    if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
+        return ESP_ERR_INVALID_STATE;
+    }
+    esp_err_t res = esp_apptrace_riscv_lock(hw_data, tmo);
+    if (res != ESP_OK) {
+        return res;
+    }
+
+    res = esp_apptrace_membufs_flush_nolock(&hw_data->membufs, 0, tmo);
+
+    // now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
+    if (esp_apptrace_riscv_unlock(hw_data) != ESP_OK) {
+        assert(false && "Failed to unlock apptrace data!");
+    }
+    return res;
+}
+
+/*****************************************************************************************/
+/************************** Membufs proto HW iface ***************************************/
+/*****************************************************************************************/
+
+static inline void esp_apptrace_riscv_buffer_swap_lock(void)
+{
+    extern uint32_t __esp_apptrace_riscv_updated;
+
+    // indicate to host that we are about to update.
+    // this is used only to place CPU into streaming mode at tracing startup
+    // before starting streaming host can halt us after we read  ESP_APPTRACE_RISCV_CTRL_REG and before we updated it
+    // HACK: in this case host will set breakpoint just after ESP_APPTRACE_RISCV_CTRL_REG update,
+    // here we set address to set bp at
+    // enter ERI update critical section
+    s_tracing_ctrl[cpu_hal_get_core_id()].stat = (uint32_t)&__esp_apptrace_riscv_updated;
+}
+
+static __attribute__((noinline)) void esp_apptrace_riscv_buffer_swap_unlock(void)
+{
+    // exit ERI update critical section
+    s_tracing_ctrl[cpu_hal_get_core_id()].stat = 0;
+    // TODO: currently host sets breakpoint, use break instruction to stop;
+    // it will allow to use ESP_APPTRACE_RISCV_STAT_REG for other purposes
+    asm volatile (
+        "    .global     __esp_apptrace_riscv_updated\n"
+        "__esp_apptrace_riscv_updated:\n"); // host will set bp here to resolve collision at streaming start
+}
+
+static esp_err_t esp_apptrace_riscv_buffer_swap_start(uint32_t curr_block_id)
+{
+    esp_err_t res = ESP_OK;
+
+    esp_apptrace_riscv_buffer_swap_lock();
+
+    uint32_t ctrl_reg = s_tracing_ctrl[cpu_hal_get_core_id()].ctrl;
+    uint32_t host_connected = ESP_APPTRACE_RISCV_HOST_CONNECT & ctrl_reg;
+    if (host_connected) {
+        uint32_t acked_block = ESP_APPTRACE_RISCV_BLOCK_ID_GET(ctrl_reg);
+        uint32_t host_to_read = ESP_APPTRACE_RISCV_BLOCK_LEN_GET(ctrl_reg);
+        if (host_to_read != 0 || acked_block != (curr_block_id & ESP_APPTRACE_RISCV_BLOCK_ID_MSK)) {
+            ESP_APPTRACE_LOGD("[%d]: Can not switch %x %d %x %x/%lx", cpu_hal_get_core_id(), ctrl_reg, host_to_read, acked_block,
+                curr_block_id & ESP_APPTRACE_RISCV_BLOCK_ID_MSK, curr_block_id);
+            res = ESP_ERR_NO_MEM;
+            goto _on_err;
+        }
+    }
+    return ESP_OK;
+_on_err:
+    esp_apptrace_riscv_buffer_swap_unlock();
+    return res;
+}
+
+static esp_err_t esp_apptrace_riscv_buffer_swap_end(uint32_t new_block_id, uint32_t prev_block_len)
+{
+    uint32_t ctrl_reg = s_tracing_ctrl[cpu_hal_get_core_id()].ctrl;
+    uint32_t host_connected = ESP_APPTRACE_RISCV_HOST_CONNECT & ctrl_reg;
+    s_tracing_ctrl[cpu_hal_get_core_id()].ctrl = ESP_APPTRACE_RISCV_BLOCK_ID(new_block_id) |
+              host_connected | ESP_APPTRACE_RISCV_BLOCK_LEN(prev_block_len);
+    esp_apptrace_riscv_buffer_swap_unlock();
+    return ESP_OK;
+}
+
+static esp_err_t esp_apptrace_riscv_buffer_swap(uint32_t new_block_id)
+{
+    /* do nothing */
+    return ESP_OK;
+}
+
+static bool esp_apptrace_riscv_host_data_pending(void)
+{
+    uint32_t ctrl_reg = s_tracing_ctrl[cpu_hal_get_core_id()].ctrl;
+    // ESP_APPTRACE_LOGV("%s() 0x%x", __func__, ctrl_reg);
+    return (ctrl_reg & ESP_APPTRACE_RISCV_HOST_DATA) ? true : false;
+}

+ 15 - 1
components/app_trace/sys_view/Sample/Config/SEGGER_SYSVIEW_Config_FreeRTOS.c

@@ -73,6 +73,8 @@ Revision: $Rev: 3734 $
 #include "esp32/clk.h"
 #elif CONFIG_IDF_TARGET_ESP32S2
 #include "esp32s2/clk.h"
+#elif CONFIG_IDF_TARGET_ESP32C3
+#include "esp32c3/clk.h"
 #endif
 
 
@@ -89,6 +91,12 @@ extern const SEGGER_SYSVIEW_OS_API SYSVIEW_X_OS_TraceAPI;
 
 // The target device name
 #define SYSVIEW_DEVICE_NAME     CONFIG_IDF_TARGET
+// The target core name
+#if CONFIG_IDF_TARGET_ARCH_XTENSA
+#define SYSVIEW_CORE_NAME       "xtensa"
+#elif CONFIG_IDF_TARGET_ARCH_RISCV
+#define SYSVIEW_CORE_NAME       "riscv"
+#endif
 
 // Determine which timer to use as timestamp source
 #if CONFIG_APPTRACE_SV_TS_SOURCE_CCOUNT
@@ -143,12 +151,16 @@ extern const SEGGER_SYSVIEW_OS_API SYSVIEW_X_OS_TraceAPI;
 // The lowest RAM address used for IDs (pointers)
 #define SYSVIEW_RAM_BASE        (SOC_DROM_LOW)
 
+#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
 #if CONFIG_FREERTOS_CORETIMER_0
     #define SYSTICK_INTR_ID (ETS_INTERNAL_TIMER0_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF)
 #endif
 #if CONFIG_FREERTOS_CORETIMER_1
     #define SYSTICK_INTR_ID (ETS_INTERNAL_TIMER1_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF)
 #endif
+#elif CONFIG_IDF_TARGET_ESP32C3
+    #define SYSTICK_INTR_ID (ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF)
+#endif
 
 // SystemView is single core specific: it implies that SEGGER_SYSVIEW_LOCK()
 // disables IRQs (disables rescheduling globally). So we can not use finite timeouts for locks and return error
@@ -167,11 +179,13 @@ static esp_apptrace_lock_t s_sys_view_lock = {.mux = portMUX_INITIALIZER_UNLOCKE
 */
 static void _cbSendSystemDesc(void) {
     char irq_str[32];
-    SEGGER_SYSVIEW_SendSysDesc("N="SYSVIEW_APP_NAME",D="SYSVIEW_DEVICE_NAME",C=Xtensa,O=FreeRTOS");
+    SEGGER_SYSVIEW_SendSysDesc("N="SYSVIEW_APP_NAME",D="SYSVIEW_DEVICE_NAME",C="SYSVIEW_CORE_NAME",O=FreeRTOS");
     snprintf(irq_str, sizeof(irq_str), "I#%d=SysTick", SYSTICK_INTR_ID);
     SEGGER_SYSVIEW_SendSysDesc(irq_str);
     size_t isr_count = sizeof(esp_isr_names)/sizeof(esp_isr_names[0]);
     for (size_t i = 0; i < isr_count; ++i) {
+        if (esp_isr_names[i] == NULL || (ETS_INTERNAL_INTR_SOURCE_OFF + i) == SYSTICK_INTR_ID)
+            continue;
         snprintf(irq_str, sizeof(irq_str), "I#%d=%s", ETS_INTERNAL_INTR_SOURCE_OFF + i, esp_isr_names[i]);
         SEGGER_SYSVIEW_SendSysDesc(irq_str);
     }

+ 7 - 0
components/esp_system/port/cpu_start.c

@@ -270,6 +270,13 @@ void IRAM_ATTR call_start_cpu0(void)
 #endif
 
 #ifdef __riscv
+    if (cpu_hal_is_debugger_attached()) {
+        /* Let debugger some time to detect that target started, halt it, enable ebreaks and resume.
+           500ms should be enough. */
+        for (uint32_t ms_num = 0; ms_num < 2; ms_num++) {
+            esp_rom_delay_us(100000);
+        }
+    }
     // Configure the global pointer register
     // (This should be the first thing IDF app does, as any other piece of code could be
     // relaxed by the linker to access something relative to __global_pointer$)

+ 9 - 0
components/freertos/port/riscv/include/freertos/FreeRTOSConfig.h

@@ -91,4 +91,13 @@
 #define configISR_STACK_SIZE                            (CONFIG_FREERTOS_ISR_STACKSIZE)
 #endif
 
+#ifndef __ASSEMBLER__
+#if CONFIG_APPTRACE_SV_ENABLE
+extern int xPortSwitchFlag;
+#define os_task_switch_is_pended(_cpu_) (xPortSwitchFlag)
+#else
+#define os_task_switch_is_pended(_cpu_) (false)
+#endif
+#endif
+
 #endif // FREERTOS_CONFIG_RISCV_H

+ 1 - 0
components/freertos/port/riscv/port.c

@@ -346,6 +346,7 @@ void vPortYieldOtherCore(BaseType_t coreid)
 
 void vPortYieldFromISR( void )
 {
+    traceISR_EXIT_TO_SCHEDULER();
     uxSchedulerRunning = 1;
     xPortSwitchFlag = 1;
 }

+ 16 - 16
components/soc/esp32c3/interrupts.c

@@ -14,9 +14,9 @@
 
 #include "soc/interrupts.h"
 
-const char *const esp_isr_names[ETS_MAX_INTR_SOURCE] = {
+const char *const esp_isr_names[] = {
     [0] = "WIFI_MAC",
-    [1] = "WIFI_NMI",
+    [1] = "WIFI_MAC_NMI",
     [2] = "WIFI_PWR",
     [3] = "WIFI_BB",
     [4] = "BT_MAC",
@@ -26,7 +26,7 @@ const char *const esp_isr_names[ETS_MAX_INTR_SOURCE] = {
     [8] = "RWBLE",
     [9] = "RWBT_NMI",
     [10] = "RWBLE_NMI",
-    [11] = "I2C",
+    [11] = "I2C_MASTER",
     [12] = "SLC0",
     [13] = "SLC1",
     [14] = "APB_CTRL",
@@ -40,7 +40,7 @@ const char *const esp_isr_names[ETS_MAX_INTR_SOURCE] = {
     [22] = "UART1",
     [23] = "LEDC",
     [24] = "EFUSE",
-    [25] = "CAN",
+    [25] = "TWAI",
     [26] = "USB",
     [27] = "RTC_CORE",
     [28] = "RMT",
@@ -65,16 +65,16 @@ const char *const esp_isr_names[ETS_MAX_INTR_SOURCE] = {
     [47] = "RSA",
     [48] = "AES",
     [49] = "SHA",
-    [50] = "ETS_FROM_CPU_INTR0",
-    [51] = "ETS_FROM_CPU_INTR1",
-    [52] = "ETS_FROM_CPU_INTR2",
-    [53] = "ETS_FROM_CPU_INTR3",
-    [54] = "ETS_ASSIST_DEBUG",
-    [55] = "ETS_DMA_APBPERI_PMS",
-    [56] = "ETS_CORE0_IRAM0_PMS",
-    [57] = "ETS_CORE0_DRAM0_PMS",
-    [58] = "ETS_CORE0_PIF_PMS",
-    [59] = "ETS_CORE0_PIF_PMS_SIZE",
-    [60] = "ETS_BAK_PMS_VIOLATE",
-    [61] = "ETS_CACHE_CORE0_ACS",
+    [50] = "FROM_CPU_INTR0",
+    [51] = "FROM_CPU_INTR1",
+    [52] = "FROM_CPU_INTR2",
+    [53] = "FROM_CPU_INTR3",
+    [54] = "ASSIST_DEBUG",
+    [55] = "DMA_APBPERI_PMS",
+    [56] = "CORE0_IRAM0_PMS",
+    [57] = "CORE0_DRAM0_PMS",
+    [58] = "CORE0_PIF_PMS",
+    [59] = "CORE0_PIF_PMS_SIZE",
+    [60] = "BAK_PMS_VIOLATE",
+    [61] = "CACHE_CORE0_ACS",
 };