Browse Source

Esp32 C3 UART driver (#6214)

* Use FreeRTOS wrapper

* Add console driver

* Add patch, .config. Update readme.

* Change date
tangzz98 3 năm trước cách đây
mục cha
commit
3c3bd647f7

+ 27 - 4
bsp/ESP32_C3/.config

@@ -62,7 +62,9 @@ CONFIG_RT_USING_HEAP=y
 CONFIG_RT_USING_DEVICE=y
 # CONFIG_RT_USING_DEVICE_OPS is not set
 # CONFIG_RT_USING_INTERRUPT_INFO is not set
-# CONFIG_RT_USING_CONSOLE is not set
+CONFIG_RT_USING_CONSOLE=y
+CONFIG_RT_CONSOLEBUF_SIZE=256
+CONFIG_RT_CONSOLE_DEVICE_NAME="uart"
 CONFIG_RT_VER_NUM=0x40101
 # CONFIG_RT_USING_CPU_FFS is not set
 # CONFIG_ARCH_CPU_STACK_GROWS_UPWARD is not set
@@ -73,7 +75,21 @@ CONFIG_RT_VER_NUM=0x40101
 CONFIG_RT_USING_COMPONENTS_INIT=y
 # CONFIG_RT_USING_USER_MAIN is not set
 # CONFIG_RT_USING_LEGACY is not set
-# CONFIG_RT_USING_MSH is not set
+CONFIG_RT_USING_MSH=y
+CONFIG_RT_USING_FINSH=y
+CONFIG_FINSH_USING_MSH=y
+CONFIG_FINSH_THREAD_NAME="tshell"
+CONFIG_FINSH_THREAD_PRIORITY=20
+CONFIG_FINSH_THREAD_STACK_SIZE=4096
+CONFIG_FINSH_USING_HISTORY=y
+CONFIG_FINSH_HISTORY_LINES=5
+CONFIG_FINSH_USING_SYMTAB=y
+CONFIG_FINSH_CMD_SIZE=80
+CONFIG_MSH_USING_BUILT_IN_COMMANDS=y
+CONFIG_FINSH_USING_DESCRIPTION=y
+# CONFIG_FINSH_ECHO_DISABLE_DEFAULT is not set
+# CONFIG_FINSH_USING_AUTH is not set
+CONFIG_FINSH_ARG_MAX=10
 # CONFIG_RT_USING_DFS is not set
 # CONFIG_RT_USING_FAL is not set
 
@@ -82,7 +98,11 @@ CONFIG_RT_USING_COMPONENTS_INIT=y
 #
 CONFIG_RT_USING_DEVICE_IPC=y
 # CONFIG_RT_USING_SYSTEM_WORKQUEUE is not set
-# CONFIG_RT_USING_SERIAL is not set
+CONFIG_RT_USING_SERIAL=y
+CONFIG_RT_USING_SERIAL_V1=y
+# CONFIG_RT_USING_SERIAL_V2 is not set
+# CONFIG_RT_SERIAL_USING_DMA is not set
+CONFIG_RT_SERIAL_RB_BUFSZ=64
 # CONFIG_RT_USING_CAN is not set
 # CONFIG_RT_USING_HWTIMER is not set
 # CONFIG_RT_USING_CPUTIME is not set
@@ -620,9 +640,12 @@ CONFIG_BSP_BOARD_LUATOS_ESP32C3=y
 # Onboard Peripheral Drivers
 #
 CONFIG_RT_BSP_LED_PIN=12
+CONFIG_RT_BSP_UART_PORT=0
+CONFIG_RT_BSP_UART_TX_PIN=21
+CONFIG_RT_BSP_UART_RX_PIN=20
 
 #
 # On-chip Peripheral Drivers
 #
 CONFIG_BSP_USING_GPIO=y
-# CONFIG_BSP_USING_UART is not set
+CONFIG_BSP_USING_UART=y

+ 0 - 393
bsp/ESP32_C3/0001-add-the-config-of-RTTHREAD.patch

@@ -1,393 +0,0 @@
-From 317ee995e9d530587bfb14439b3b1ee38d1afe77 Mon Sep 17 00:00:00 2001
-From: supperthomas <78900636@qq.com>
-Date: Fri, 6 May 2022 23:06:28 +0800
-Subject: [PATCH] add the config of RTTHREAD
-
-add the init link file
----
- Kconfig                                       |   4 +
- .../esp_system/ld/esp32c3/sections.ld.in      |  26 +++
- components/freertos/port/port_common.c        |   5 +
- components/freertos/port/port_systick.c       |   3 +
- components/riscv/vectors.S                    | 220 ++++++++++++++++--
- 5 files changed, 239 insertions(+), 19 deletions(-)
-
-diff --git a/Kconfig b/Kconfig
-index 928d274106..d368adaa37 100644
---- a/Kconfig
-+++ b/Kconfig
-@@ -61,6 +61,10 @@ mainmenu "Espressif IoT Development Framework Configuration"
-         bool
-         default "y" if IDF_TARGET="linux"
- 
-+    config IDF_RTOS_RTTHREAD
-+        bool "RT-THREAD SELECT"
-+        default "n" 
-+
-     config IDF_FIRMWARE_CHIP_ID
-         hex
-         default 0x0000 if IDF_TARGET_ESP32
-diff --git a/components/esp_system/ld/esp32c3/sections.ld.in b/components/esp_system/ld/esp32c3/sections.ld.in
-index 0ebeda06c1..8215237fff 100644
---- a/components/esp_system/ld/esp32c3/sections.ld.in
-+++ b/components/esp_system/ld/esp32c3/sections.ld.in
-@@ -183,6 +183,32 @@ SECTIONS
-     _noinit_end = ABSOLUTE(.);
-   } > dram0_0_seg
- 
-+  .stack_dummy (COPY):
-+  {
-+      . = ALIGN(8);
-+       __STACKSIZE__ = 40960; 
-+      __stack_start__ = .;
-+      *(.stack*)
-+      . += __STACKSIZE__;
-+      __stack_cpu0 = .;
-+      __stack_end__ = .;
-+  } > dram0_0_seg
-+
-+  .stack_dummy (COPY):
-+  {
-+      . = ALIGN(8);
-+       __HEAPSIZE__ = 40960; 
-+      __heap_start__ = .;
-+      . += __STACKSIZE__;
-+      __heap_end__ = .;
-+        /* section information for initial. */
-+        . = ALIGN(4);
-+        __rt_init_start = .;
-+        KEEP(*(SORT(.rti_fn*)))
-+        __rt_init_end = .;
-+
-+        . = ALIGN(4);
-+  } > dram0_0_seg
-   /* Shared RAM */
-   .dram0.bss (NOLOAD) :
-   {
-diff --git a/components/freertos/port/port_common.c b/components/freertos/port/port_common.c
-index ffca3d5429..9d8159f588 100644
---- a/components/freertos/port/port_common.c
-+++ b/components/freertos/port/port_common.c
-@@ -74,11 +74,16 @@ void esp_startup_start_app_common(void)
-     esp_gdbstub_init();
- #endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
- 
-+#ifdef CONFIG_IDF_RTOS_RTTHREAD
-+    app_main();
-+#else
-     portBASE_TYPE res = xTaskCreatePinnedToCore(&main_task, "main",
-                                                 ESP_TASK_MAIN_STACK, NULL,
-                                                 ESP_TASK_MAIN_PRIO, NULL, ESP_TASK_MAIN_CORE);
-     assert(res == pdTRUE);
-     (void)res;
-+#endif
-+
- }
- 
- static void main_task(void* args)
-diff --git a/components/freertos/port/port_systick.c b/components/freertos/port/port_systick.c
-index 0c14a155a1..0fa203574b 100644
---- a/components/freertos/port/port_systick.c
-+++ b/components/freertos/port/port_systick.c
-@@ -116,6 +116,8 @@ void vPortSetupTimer(void)
-  */
- IRAM_ATTR void SysTickIsrHandler(void *arg)
- {
-+#ifdef CONFIG_IDF_RTOS_RTTHREAD
-+#else
-     uint32_t cpuid = xPortGetCoreID();
-     systimer_hal_context_t *systimer_hal = (systimer_hal_context_t *)arg;
- #ifdef CONFIG_PM_TRACE
-@@ -144,6 +146,7 @@ IRAM_ATTR void SysTickIsrHandler(void *arg)
- #ifdef CONFIG_PM_TRACE
-     ESP_PM_TRACE_EXIT(TICK, cpuid);
- #endif
-+#endif
- }
- 
- #endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
-diff --git a/components/riscv/vectors.S b/components/riscv/vectors.S
-index 1006d5bea5..963494fcb3 100644
---- a/components/riscv/vectors.S
-+++ b/components/riscv/vectors.S
-@@ -17,6 +17,9 @@
- #include "soc/soc_caps.h"
- #include "sdkconfig.h"
- 
-+#define STORE                   sw
-+#define LOAD                    lw
-+#define REGBYTES                4
- 
- 	.equ SAVE_REGS, 32
- 	.equ CONTEXT_SIZE, (SAVE_REGS * 4)
-@@ -218,25 +221,27 @@ _call_panic_handler:
- 	 */
- 	.global _interrupt_handler
- 	.type _interrupt_handler, @function
-+#ifndef CONFIG_IDF_RTOS_RTTHREAD
-+
- _interrupt_handler:
- 	/* entry */
--	save_regs
--	save_mepc
-+	save_regs   /* 保存寄存器 */
-+	save_mepc   /* 保存MEPC */
- 
- 	/* Before doing anythig preserve the stack pointer */
- 	/* It will be saved in current TCB, if needed */
--	mv a0, sp
-+	mv a0, sp            /* 保存SP  a0 = sp */
- 	call rtos_int_enter
- 
- 	/* Before dispatch c handler, restore interrupt to enable nested intr */
--	csrr s1, mcause
--	csrr s2, mstatus
-+	csrr s1, mcause    /* 保存mcause s1 = mcause */
-+	csrr s2, mstatus  /* 保存mstatus  s2 = mstatus */
- 
--	/* Save the interrupt threshold level */
--	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
--	lw s3, 0(t0)
-+	/* Save the interrupt threshold level 保存中断嵌套层数? */
-+	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG  /* 保存mstatus  t0 = &INTERRUPT_CORE0_CPU_INT_THRESH_REG */
-+	lw s3, 0(t0)    /*   s3 = mstatus */
- 
--	/* Increase interrupt threshold level */
-+	/* Increase interrupt threshold level 增加中断嵌套层数*/
- 	li t2, 0x7fffffff
- 	and t1, s1, t2		/* t1 = mcause & mask */
- 	slli t1, t1, 2 		/* t1 = mcause * 4 */
-@@ -247,8 +252,8 @@ _interrupt_handler:
- 	sw t2, 0(t0)		/* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
- 	fence
- 
--	li t0, 0x8
--	csrrs t0, mstatus, t0
-+	li t0, 0x8   /* t0 = 8 */
-+	csrrs t0, mstatus, t0    /*设置状态MIE寄存器,开总中断*/
- 
- 	#ifdef CONFIG_PM_TRACE
- 	li      a0, 0       /* = ESP_PM_TRACE_IDLE */
-@@ -269,34 +274,211 @@ _interrupt_handler:
- 	/* call the C dispatcher */
- 	mv      a0, sp      /* argument 1, stack pointer */
- 	mv      a1, s1      /* argument 2, interrupt number (mcause) */
--	/* mask off the interrupt flag of mcause */
-+	/* mask off the interrupt flag of mcause   屏幕异常中断*/
- 	li	    t0, 0x7fffffff
- 	and     a1, a1, t0
- 	jal     _global_interrupt_handler
- 
--	/* After dispatch c handler, disable interrupt to make freertos make context switch */
-+	/* After dispatch c handler, disable interrupt to make freertos make context switch 
-+	在调用c函数之后,disable 中断让freertos能够做内容切换
-+	*/
- 
- 	li t0, 0x8
--	csrrc t0, mstatus, t0
-+	csrrc t0, mstatus, t0  /*清状态MIE寄存器 关总中断*/
- 
--	/* restore the interrupt threshold level */
-+
-+	/* restore the interrupt threshold level  中断嵌套 */
- 	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
- 	sw s3, 0(t0)
- 	fence
- 
- 	/* Yield to the next task is needed: */
--	mv a0, sp
-+	mv a0, sp     /* a0 = sp*/
- 	call rtos_int_exit
- 
- 	/* The next (or current) stack pointer is returned in a0 */
--	mv sp, a0
-+	mv sp, a0    /* sp = a0*/
- 
- 	/* restore the rest of the registers */
--	csrw mcause, s1
--	csrw mstatus, s2
-+	csrw mcause, s1  /* mcause = s1 */
-+	csrw mstatus, s2   /* mstatus = s2 */
- 	restore_mepc
- 	restore_regs
- 
- 	/* exit, this will also re-enable the interrupts */
- 	mret
- 	.size  _interrupt_handler, .-_interrupt_handler
-+#else
-+_interrupt_handler:
-+    /* 此时CPU的sp = from_thread->sp */
-+    /* 注意: 在这里,并没有将mepc的值赋值为from_thread栈中的epc,但后面会赋值 */
-+    addi sp, sp, -32 * REGBYTES             /* sp = sp - 32 * 4 栈指针向下偏移32个寄存器长度,用来将CPU的寄存器保存到from_thread的栈中*/
-+    STORE x1,   1 * REGBYTES(sp)            /* 将CPU的x1寄存器,即ra寄存器,保存到from_thread->栈中 */
-+
-+    li    t0,   0x80                        /* t0 = 0x80 */
-+    STORE t0,   2 * REGBYTES(sp)            /* mstatus = t0, 即关闭全局中断 */
-+
-+    /* 将 CPU 的其他寄存器的值,保存到from_thread的任务栈中 */
-+    STORE x4,   4 * REGBYTES(sp)
-+    STORE x5,   5 * REGBYTES(sp)
-+    STORE x6,   6 * REGBYTES(sp)
-+    STORE x7,   7 * REGBYTES(sp)
-+    STORE x8,   8 * REGBYTES(sp)
-+    STORE x9,   9 * REGBYTES(sp)
-+    STORE x10, 10 * REGBYTES(sp)
-+    STORE x11, 11 * REGBYTES(sp)
-+    STORE x12, 12 * REGBYTES(sp)
-+    STORE x13, 13 * REGBYTES(sp)
-+    STORE x14, 14 * REGBYTES(sp)
-+    STORE x15, 15 * REGBYTES(sp)
-+    STORE x16, 16 * REGBYTES(sp)
-+    STORE x17, 17 * REGBYTES(sp)
-+    STORE x18, 18 * REGBYTES(sp)
-+    STORE x19, 19 * REGBYTES(sp)
-+    STORE x20, 20 * REGBYTES(sp)
-+    STORE x21, 21 * REGBYTES(sp)
-+    STORE x22, 22 * REGBYTES(sp)
-+    STORE x23, 23 * REGBYTES(sp)
-+    STORE x24, 24 * REGBYTES(sp)
-+    STORE x25, 25 * REGBYTES(sp)
-+    STORE x26, 26 * REGBYTES(sp)
-+    STORE x27, 27 * REGBYTES(sp)
-+    STORE x28, 28 * REGBYTES(sp)
-+    STORE x29, 29 * REGBYTES(sp)
-+    STORE x30, 30 * REGBYTES(sp)
-+    STORE x31, 31 * REGBYTES(sp)
-+
-+    /* 备份 CPU 的 sp (这时,CPU的sp其实就是from thread的sp指针) 寄存器的值到 s0 寄存器中,下面会使用s0,恢复 CPU 的寄存器 */
-+    move  s0, sp    /* s0 = sp */
-+
-+    /* 在中断函数中,中断函数中调用的C函数,需要使用 sp, 这里,在中断函数中,使用的 sp 为,系统的栈资源 */
-+    /* switch to interrupt stack */
-+    la    sp, __stack_end__   /* sp = _sp */
-+
-+    /* interrupt handle */
-+    /* 注意: 在调用C函数之前,比如sp的值为0x30001000, 在执行完C函数后,sp的值还是会变成 0x30001000 */
-+    call  rt_interrupt_enter    /* 执行所有的中断函数前,调用该函数 */
-+
-+    csrr s1, mcause
-+	csrr s2, mstatus
-+
-+    /* Save the interrupt threshold level */
-+	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
-+	lw s3, 0(t0)
-+
-+    li t2, 0x7fffffff
-+	and t1, s1, t2		/* t1 = mcause & mask */
-+	slli t1, t1, 2 		/* t1 = mcause * 4 */
-+	la t2, INTC_INT_PRIO_REG(0)
-+	add t1, t2, t1		/* t1 = INTC_INT_PRIO_REG + 4 * mcause */
-+	lw t2, 0(t1)		/* t2 = INTC_INT_PRIO_REG[mcause] */
-+	addi t2, t2, 1		/* t2 = t2 +1 */
-+	sw t2, 0(t0)		/* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
-+	fence
-+
-+    li t0, 0x8
-+	csrrs t0, mstatus, t0
-+
-+    /* call the C dispatcher */
-+	mv      a0, sp      /* argument 1, stack pointer */
-+	mv      a1, s1      /* argument 2, interrupt number (mcause) */
-+	/* mask off the interrupt flag of mcause */
-+	li	    t0, 0x7fffffff
-+	and     a1, a1, t0
-+	jal     _global_interrupt_handler
-+
-+    li t0, 0x8
-+	csrrc t0, mstatus, t0
-+
-+	/* restore the interrupt threshold level */
-+	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
-+	sw s3, 0(t0)
-+	fence
-+
-+    call  rt_interrupt_leave    /* 执行所有的中断函数后,调用该函数 */
-+
-+    /* 上面,将保存执行中断服务函数之前的CPU的sp寄存器到了s0所指向的位置处,当执行完中断服务函数,需要将之前的CPU寄存器,恢复一下,此时sp又变成了from thread的sp了 */
-+    move  sp, s0    /* sp = s0 */
-+
-+    /* 下面两句话,相当于将 rt_thread_switch_interrupt_flag 值,赋值给了s2  */
-+    /* 将 rt_thread_switch_interrupt_flag 的地址值,赋值给 s0 寄存器*/
-+    la    s0, rt_thread_switch_interrupt_flag       /* s0 = &rt_thread_switch_interrupt_flag */
-+    /* 将 s0 所指向的地址处的内容,取出来,赋值给 s2 寄存器,其实就是将  rt_thread_switch_interrupt_flag 的值,赋值给了 s2 寄存器*/
-+    lw    s2, 0(s0)                 /* s2 = *s0 = rt_thread_switch_interrupt_flag */
-+
-+    /* 如果 s2的值,即 rt_thread_switch_interrupt_flag 值,如果不为0,则需要继续执行下一条指令,如果为0,则需要跳转到 spurious_interrupt 标号处 执行 */
-+    /* 如果 s2的值等于0,rt_thread_switch_interrupt_flag等于0, 则不需要在中断处理函数中,进行上下文切换,反之则需要 */
-+    /* 如果不需要上下文切换, */
-+
-+    /* 在这里,跳转到 spurious_interrupt的话,是不会进行上下文切换的,因为,此时CPU的sp指针还是from线程的*/
-+    beqz  s2, spurious_interrupt    /* if (s2 == 0) goto spurious_interrupt; else 执行下一条语句*/
-+
-+    /* 需要上下文切换: 主要目的是将CPU的sp指针,赋值为to_thread的sp */
-+    
-+    /* 将 s0 所执向的地址的内容设置为0, 也就是,将变量 rt_thread_switch_interrupt_flag 赋值为了 0 */
-+    /* s0存放的值是 rt_thread_switch_interrupt_flag 变量的地址*/
-+    sw    zero, 0(s0)       /* *s0 = 0; 也就是 rt_thread_switch_interrupt_flag = 0 */
-+    /* 将 mepc 的值,赋值给 a0 寄存器,mepc 的值是,跳转到中断函数执行之前的 PC 指针 */
-+    /* 这时的mpec其实,还是from线程,在跳转到中断执行前的一个PC地址 */
-+    csrr  a0, mepc  /* a0 = mepc */
-+
-+    /* 将 mpec 的值写回到freom thread任务栈中的 epc 中,待后续,恢复from线程时,使用 */
-+    STORE a0, 0 * REGBYTES(sp)  /* from_thread->sp->epc = a0 ,中断入口处*/
-+
-+    /* 将from_thread的sp指针,赋值为CPU的sp指针 */
-+    la    s0, rt_interrupt_from_thread  /* s0 = &rt_interrupt_from_thread 注意: rt_interrupt_from_thread = &(from_thread->sp) */
-+    LOAD  s1, 0(s0)                     /* s1 = rt_interrupt_from_thread,也就是s1 = &(from_thread->sp) */
-+    STORE sp, 0(s1)                     /* from_thread->sp = sp*/
-+   
-+    /* 接下来,需要开始恢复CPU的sp为to_thread的sp了 */
-+    la    s0, rt_interrupt_to_thread    /* s0 = &rt_interrupt_to_thread 注意: rt_interrupt_to_thread = &(to_thred->sp)*/
-+    LOAD  s1, 0(s0)                     /* s1 = rt_interrupt_to_thread, 也就是s1 = &(to_thred->sp) */
-+    LOAD  sp, 0(s1)                     /* sp = (to_thred->sp)*/
-+
-+    /* 将CPU的 mepc设置为to_thred的mepc,待中断退出,执行mret指令后,将从该地址开始执行 */
-+    LOAD  a0,  0 * REGBYTES(sp)         /* a0 = to_thread的mepc的值*/
-+    csrw  mepc, a0                      /* mepc = a0 */
-+
-+
-+spurious_interrupt:
-+    LOAD  x1,   1 * REGBYTES(sp)
-+
-+    /* Remain in M-mode after mret */
-+    li    t0, 0x00001800
-+    csrs  mstatus, t0
-+    LOAD  t0,   2 * REGBYTES(sp)
-+    csrs  mstatus, t0
-+
-+    LOAD  x4,   4 * REGBYTES(sp)
-+    LOAD  x5,   5 * REGBYTES(sp)
-+    LOAD  x6,   6 * REGBYTES(sp)
-+    LOAD  x7,   7 * REGBYTES(sp)
-+    LOAD  x8,   8 * REGBYTES(sp)
-+    LOAD  x9,   9 * REGBYTES(sp)
-+    LOAD  x10, 10 * REGBYTES(sp)
-+    LOAD  x11, 11 * REGBYTES(sp)
-+    LOAD  x12, 12 * REGBYTES(sp)
-+    LOAD  x13, 13 * REGBYTES(sp)
-+    LOAD  x14, 14 * REGBYTES(sp)
-+    LOAD  x15, 15 * REGBYTES(sp)
-+    LOAD  x16, 16 * REGBYTES(sp)
-+    LOAD  x17, 17 * REGBYTES(sp)
-+    LOAD  x18, 18 * REGBYTES(sp)
-+    LOAD  x19, 19 * REGBYTES(sp)
-+    LOAD  x20, 20 * REGBYTES(sp)
-+    LOAD  x21, 21 * REGBYTES(sp)
-+    LOAD  x22, 22 * REGBYTES(sp)
-+    LOAD  x23, 23 * REGBYTES(sp)
-+    LOAD  x24, 24 * REGBYTES(sp)
-+    LOAD  x25, 25 * REGBYTES(sp)
-+    LOAD  x26, 26 * REGBYTES(sp)
-+    LOAD  x27, 27 * REGBYTES(sp)
-+    LOAD  x28, 28 * REGBYTES(sp)
-+    LOAD  x29, 29 * REGBYTES(sp)
-+    LOAD  x30, 30 * REGBYTES(sp)
-+    LOAD  x31, 31 * REGBYTES(sp)
-+
-+    addi  sp, sp, 32 * REGBYTES
-+    mret
-+	.size  _interrupt_handler, .-_interrupt_handler
-+#endif
--- 
-2.35.1.windows.2
-

+ 4 - 4
bsp/ESP32_C3/README.md

@@ -45,7 +45,7 @@
 | **片上外设**      | **支持情况** | **备注**                              |
 | :----------------- | :----------: | :------------------------------------- |
 | GPIO              |     支持     |  |
-| UART              |     支持中     |                                 |
+| UART              |     支持     | 使用LUATOS_ESP32C3开发板需要在UART0_TX和UART0_RX连接串口转USB芯片(如CP2102)|
 | JTAG调试          |     支持     | ESP32C3采用USB方式和PC链接的开发板可以调试                                |
 
 ## 使用说明
@@ -58,15 +58,15 @@ IDF的搭建方法有很多种,尝试了很多种方法之后,总结了一
 
 ### ESP-IDF 添加RT-THREAD patch 
 
-由于IDF使用的是FREERTOS,如果需要使用rt-thread就需要修改一些文件。将`0001-add-the-config-of-RTTHREAD.patch` 这个文件拷贝到IDF的代码目录下面,然后在`git bash`命令行内执行命令下面几条命令就可以打上patch
+由于IDF使用的是FREERTOS,如果需要使用rt-thread就需要修改一些文件。将`rtt.patch` 这个文件拷贝到IDF的代码目录下面,然后在`git bash`命令行内执行命令下面几条命令就可以打上patch
 
 ```
 cd esp/esp-idf
 git checkout v4.4
-git am 0001-add-the-config-of-RTTHREAD.patch
+git am rtt.patch
 ```
 
-如果不想用patch文件,已经将代码上传到github上面,可以进入[supperthomas/esp-idf](https://github.com/supperthomas/esp-idf) 下载最新的master分支代码即可。修改之后的IDF,原来的IDF的example还是正常使用,互不干扰,可以放心使用。
+如果不想用patch文件,已经将代码上传到github上面,可以进入[tangzz98/esp-idf](https://github.com/tangzz98/esp-idf/tree/freertos_wrapper) 下载最新的freertos_wrapper分支代码即可。修改之后的IDF,原来的IDF的example还是正常使用,互不干扰,可以放心使用。
 
 #### 编译下载
 

+ 18 - 1
bsp/ESP32_C3/board/Kconfig

@@ -27,6 +27,21 @@ menu "Onboard Peripheral Drivers"
         default 8 if BSP_BOARD_HX_EXP32C3
         depends on BSP_USING_GPIO
 
+    config RT_BSP_UART_PORT
+        int "UART PORT SET"
+        default 0
+        depends on BSP_USING_UART
+
+    config RT_BSP_UART_TX_PIN
+        int "UART TX PIN SET"
+        default 21 if BSP_BOARD_LUATOS_ESP32C3
+        depends on BSP_USING_UART
+
+    config RT_BSP_UART_RX_PIN
+        int "UART RX PIN SET"
+        default 20 if BSP_BOARD_LUATOS_ESP32C3
+        depends on BSP_USING_UART
+
 endmenu
 
 
@@ -38,7 +53,9 @@ menu "On-chip Peripheral Drivers"
     
     config BSP_USING_UART
 		bool "Enable UART"
-        default n
+        select RT_USING_SERIAL
+        select RT_USING_SERIAL_V1
+        default y
 endmenu
 
 endmenu

+ 8 - 0
bsp/ESP32_C3/main/CMakeLists.txt

@@ -1,5 +1,6 @@
 idf_component_register(SRCS "board.c" "main.c"
                     "drv_gpio.c"
+                    "drv_uart.c"
                     "../../../libcpu/risc-v/common/cpuport.c"
                     "../../../libcpu/risc-v/common/context_gcc.S"
                     "../../../src/components.c"
@@ -24,11 +25,18 @@ idf_component_register(SRCS "board.c" "main.c"
                     "../../../components/drivers/ipc/dataqueue.c"
                     "../../../components/drivers/ipc/ringbuffer.c"
                     "../../../components/drivers/ipc/workqueue.c"
+                    "../../../components/drivers/serial/serial.c"
+                    "../../../components/finsh/cmd.c"
+                    "../../../components/finsh/msh_file.c"
+                    "../../../components/finsh/msh_parse.c"
+                    "../../../components/finsh/msh.c"
+                    "../../../components/finsh/shell.c"
 
                     INCLUDE_DIRS 
                     "../../../components/drivers/include/drivers"
 
                     "../../../components/drivers/include"
+                    "../../../components/finsh"
                     "."
                     "../../../include" 
                     "../../../libcpu/risc-v/common" 

+ 7 - 13
bsp/ESP32_C3/main/board.c

@@ -21,8 +21,8 @@
 #include "rtthread.h"
 #include "rthw.h"
 #include "drv_gpio.h"
-
-#define rt_kprintf printf
+#include "drv_uart.h"
+#include "shell.h"
 
 #ifdef RT_USING_COMPONENTS_INIT
 /*
@@ -190,16 +190,13 @@ void rt_hw_systick_init(void)
 void rt_hw_board_init(void)
 {
     rt_hw_systick_init();
-#if defined(RT_USING_HEAP)
-    extern int __heap_start__;
-    extern int __heap_end__;
-    printf("%s:%d__heap_start__:%p,__heap_end__:%p\n",__func__,__LINE__,&__heap_start__,&__heap_end__);
-    rt_system_heap_init((void *)&__heap_start__, (void *)&__heap_end__);
-#endif
     /* Board underlying hardware initialization */
 #ifdef RT_USING_COMPONENTS_INIT
     rt_components_board_init();
 #endif
+#if defined(RT_USING_CONSOLE) && defined(RT_USING_DEVICE)
+    rt_console_set_device(RT_CONSOLE_DEVICE_NAME);
+#endif
 }
 
 static void rtthread_startup(void)
@@ -207,15 +204,10 @@ static void rtthread_startup(void)
     rt_hw_interrupt_disable();
     /* init board */
     rt_hw_board_init();
-    /* show RT-Thread version */
-    rt_show_version();
 
     /* timer system initialization */
     rt_system_timer_init();
 
-    /* scheduler system initialization */
-    rt_system_scheduler_init();
-
     /* create init_thread */
     rt_application_init();
 
@@ -229,6 +221,8 @@ static void rtthread_startup(void)
     rt_system_scheduler_start();
     /* init scheduler system */
     rt_hw_pin_init();
+    rt_hw_uart_init();
+    finsh_system_init();
     /* never reach here */
     return ;
 }

+ 133 - 0
bsp/ESP32_C3/main/drv_uart.c

@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-08-04     tangzz98     first version
+ *
+ */
+
+#include "drv_uart.h"
+
+#ifdef RT_USING_SERIAL_V1
+
+#ifdef CONFIG_UART_ISR_IN_IRAM
+#define UART_ISR_ATTR     IRAM_ATTR
+#else
+#define UART_ISR_ATTR
+#endif
+
+uart_hal_context_t hal[] = {
+    {
+        .dev = &UART0,
+    },
+    {
+        .dev = &UART1,
+    },
+};
+
+static struct rt_serial_device _serial;
+
+static void mcu_uart_rx_intr_handler(void *param)
+{
+    uint32_t uart_intr_status;
+    struct rt_serial_device *serial;
+    uart_port_t port;
+    rt_interrupt_enter();
+    serial = (struct rt_serial_device *)param;
+    port = (uart_port_t)serial->parent.user_data;
+    uart_intr_status = uart_hal_get_intsts_mask(&hal[port]);
+    if (uart_intr_status != 0)
+    {
+        if (uart_intr_status & UART_INTR_RXFIFO_FULL)
+        {
+            rt_hw_serial_isr(serial, RT_SERIAL_EVENT_RX_IND);
+        }
+        uart_hal_clr_intsts_mask(&hal[port], uart_intr_status);
+    }
+    rt_interrupt_leave();
+}
+
+static rt_err_t mcu_uart_configure(struct rt_serial_device *serial, struct serial_configure *cfg)
+{
+    return RT_EOK;
+}
+
+static rt_err_t mcu_uart_control(struct rt_serial_device *serial, int cmd, void *arg)
+{
+    return RT_EOK;
+}
+
+static int mcu_uart_putc(struct rt_serial_device *serial, char c)
+{
+    uart_port_t port = (uart_port_t)serial->parent.user_data;
+    uint32_t write_size = 0;
+    do
+    {
+        uart_hal_write_txfifo(&hal[port], (const uint8_t *)&c, 1, &write_size);
+    } while (write_size == 0);
+
+    return 1;
+}
+
+static int mcu_uart_getc(struct rt_serial_device *serial)
+{
+    uart_port_t port = (uart_port_t)serial->parent.user_data;
+    uint8_t c;
+    int len = uart_hal_get_rxfifo_len(&hal[port]);
+    if (len == 0)
+    {
+        return -1;
+    }
+    else
+    {
+        len = 1;
+        uart_hal_read_rxfifo(&hal[port], &c, &len);
+        return (int)c;
+    }
+}
+
+static const struct rt_uart_ops _uart_ops =
+{
+    mcu_uart_configure,
+    mcu_uart_control,
+    mcu_uart_putc,
+    mcu_uart_getc,
+    RT_NULL,
+};
+
+int rt_hw_uart_init(void)
+{
+    uart_intr_config_t uart_intr = {
+        .intr_enable_mask = UART_INTR_RXFIFO_FULL,
+        .rxfifo_full_thresh = 1,
+    };
+    struct serial_configure config = RT_SERIAL_CONFIG_DEFAULT;
+    uart_config_t uart_config = {
+        .baud_rate = BAUD_RATE_115200,
+        .data_bits = UART_DATA_8_BITS,
+        .parity    = UART_PARITY_DISABLE,
+        .stop_bits = UART_STOP_BITS_1,
+        .flow_ctrl = UART_HW_FLOWCTRL_DISABLE,
+        .source_clk = UART_SCLK_APB,
+    };
+    int intr_alloc_flags = 0;
+
+#if CONFIG_UART_ISR_IN_IRAM
+    intr_alloc_flags = ESP_INTR_FLAG_IRAM;
+#endif
+
+    ESP_ERROR_CHECK(uart_param_config(RT_BSP_UART_PORT, &uart_config));
+    ESP_ERROR_CHECK(uart_set_pin(RT_BSP_UART_PORT, RT_BSP_UART_TX_PIN, RT_BSP_UART_RX_PIN, UART_PIN_NO_CHANGE, UART_PIN_NO_CHANGE));
+    ESP_ERROR_CHECK(esp_intr_alloc(uart_periph_signal[RT_BSP_UART_PORT].irq, intr_alloc_flags, mcu_uart_rx_intr_handler, (void *)&_serial, NULL));
+    ESP_ERROR_CHECK(uart_intr_config(RT_BSP_UART_PORT, &uart_intr));
+    _serial.ops = &_uart_ops;
+    _serial.config = config;
+
+    return rt_hw_serial_register(&_serial, "uart", RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX, (void *)RT_BSP_UART_PORT);
+}
+INIT_BOARD_EXPORT(rt_hw_uart_init);
+
+#endif /* RT_USING_SERIAL_V1 */

+ 28 - 0
bsp/ESP32_C3/main/drv_uart.h

@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-08-04     tangzz98     first version
+ *
+ */
+
+#ifndef __DRV_UART_H__
+#define __DRV_UART_H__
+
+#include <rtthread.h>
+#include <rthw.h>
+
+#ifdef RT_USING_DEVICE
+#include <rtdevice.h>
+#endif
+
+#include "driver/uart.h"
+#include "hal/uart_hal.h"
+#include "sdkconfig.h"
+
+int rt_hw_uart_init(void);
+
+#endif /* __DRV_UART_H__ */

+ 3 - 1
bsp/ESP32_C3/main/main.c

@@ -15,10 +15,12 @@
 
 int rtt_main(void)
 {
+    /* show RT-Thread version */
+    rt_show_version();
+    rt_kprintf("Hello!RT-THREAD!\r\n");
     rt_pin_mode(RT_BSP_LED_PIN, PIN_MODE_OUTPUT);
     while (1)
     {
-        printf("Hello!RT-THREAD!\r\n");
         rt_pin_write(RT_BSP_LED_PIN, PIN_HIGH);
         rt_thread_mdelay(1000);
         rt_pin_write(RT_BSP_LED_PIN, PIN_LOW);

+ 22 - 0
bsp/ESP32_C3/rtconfig.h

@@ -34,16 +34,34 @@
 /* Kernel Device Object */
 
 #define RT_USING_DEVICE
+#define RT_USING_CONSOLE
+#define RT_CONSOLEBUF_SIZE 256
+#define RT_CONSOLE_DEVICE_NAME "uart"
 #define RT_VER_NUM 0x40101
 
 /* RT-Thread Components */
 
 #define RT_USING_COMPONENTS_INIT
+#define RT_USING_MSH
+#define RT_USING_FINSH
+#define FINSH_USING_MSH
+#define FINSH_THREAD_NAME "tshell"
+#define FINSH_THREAD_PRIORITY 20
+#define FINSH_THREAD_STACK_SIZE 4096
+#define FINSH_USING_HISTORY
+#define FINSH_HISTORY_LINES 5
+#define FINSH_USING_SYMTAB
+#define FINSH_CMD_SIZE 80
+#define MSH_USING_BUILT_IN_COMMANDS
+#define FINSH_USING_DESCRIPTION
+#define FINSH_ARG_MAX 10
 
 /* Device Drivers */
 
 #define RT_USING_DEVICE_IPC
 #define RT_USING_PIN
+#define RT_USING_SERIAL
+#define RT_USING_SERIAL_V1
 
 /* Using USB */
 
@@ -150,9 +168,13 @@
 /* Onboard Peripheral Drivers */
 
 #define RT_BSP_LED_PIN 12
+#define RT_BSP_UART_PORT 0
+#define RT_BSP_UART_TX_PIN 21
+#define RT_BSP_UART_RX_PIN 20
 
 /* On-chip Peripheral Drivers */
 
 #define BSP_USING_GPIO
+#define BSP_USING_UART
 
 #endif

+ 15373 - 0
bsp/ESP32_C3/rtt.patch

@@ -0,0 +1,15373 @@
+From 3d7fabf23eeae26b7d739fbb649090aa590dcf3b Mon Sep 17 00:00:00 2001
+From: supperthomas <78900636@qq.com>
+Date: Fri, 6 May 2022 23:06:28 +0800
+Subject: [PATCH 1/4] add the config of RTTHREAD
+
+add the init link file
+---
+ Kconfig                                       |   4 +
+ .../esp_system/ld/esp32c3/sections.ld.in      |  26 +++
+ components/freertos/port/port_common.c        |   5 +
+ components/freertos/port/port_systick.c       |   3 +
+ components/riscv/vectors.S                    | 220 ++++++++++++++++--
+ 5 files changed, 239 insertions(+), 19 deletions(-)
+
+diff --git a/Kconfig b/Kconfig
+index 928d274106..d368adaa37 100644
+--- a/Kconfig
++++ b/Kconfig
+@@ -61,6 +61,10 @@ mainmenu "Espressif IoT Development Framework Configuration"
+         bool
+         default "y" if IDF_TARGET="linux"
+ 
++    config IDF_RTOS_RTTHREAD
++        bool "RT-THREAD SELECT"
++        default "n" 
++
+     config IDF_FIRMWARE_CHIP_ID
+         hex
+         default 0x0000 if IDF_TARGET_ESP32
+diff --git a/components/esp_system/ld/esp32c3/sections.ld.in b/components/esp_system/ld/esp32c3/sections.ld.in
+index 0ebeda06c1..8215237fff 100644
+--- a/components/esp_system/ld/esp32c3/sections.ld.in
++++ b/components/esp_system/ld/esp32c3/sections.ld.in
+@@ -183,6 +183,32 @@ SECTIONS
+     _noinit_end = ABSOLUTE(.);
+   } > dram0_0_seg
+ 
++  .stack_dummy (COPY):
++  {
++      . = ALIGN(8);
++       __STACKSIZE__ = 40960; 
++      __stack_start__ = .;
++      *(.stack*)
++      . += __STACKSIZE__;
++      __stack_cpu0 = .;
++      __stack_end__ = .;
++  } > dram0_0_seg
++
++  .stack_dummy (COPY):
++  {
++      . = ALIGN(8);
++       __HEAPSIZE__ = 40960; 
++      __heap_start__ = .;
++      . += __STACKSIZE__;
++      __heap_end__ = .;
++        /* section information for initial. */
++        . = ALIGN(4);
++        __rt_init_start = .;
++        KEEP(*(SORT(.rti_fn*)))
++        __rt_init_end = .;
++
++        . = ALIGN(4);
++  } > dram0_0_seg
+   /* Shared RAM */
+   .dram0.bss (NOLOAD) :
+   {
+diff --git a/components/freertos/port/port_common.c b/components/freertos/port/port_common.c
+index ffca3d5429..9d8159f588 100644
+--- a/components/freertos/port/port_common.c
++++ b/components/freertos/port/port_common.c
+@@ -74,11 +74,16 @@ void esp_startup_start_app_common(void)
+     esp_gdbstub_init();
+ #endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
+ 
++#ifdef CONFIG_IDF_RTOS_RTTHREAD
++    app_main();
++#else
+     portBASE_TYPE res = xTaskCreatePinnedToCore(&main_task, "main",
+                                                 ESP_TASK_MAIN_STACK, NULL,
+                                                 ESP_TASK_MAIN_PRIO, NULL, ESP_TASK_MAIN_CORE);
+     assert(res == pdTRUE);
+     (void)res;
++#endif
++
+ }
+ 
+ static void main_task(void* args)
+diff --git a/components/freertos/port/port_systick.c b/components/freertos/port/port_systick.c
+index 0c14a155a1..0fa203574b 100644
+--- a/components/freertos/port/port_systick.c
++++ b/components/freertos/port/port_systick.c
+@@ -116,6 +116,8 @@ void vPortSetupTimer(void)
+  */
+ IRAM_ATTR void SysTickIsrHandler(void *arg)
+ {
++#ifdef CONFIG_IDF_RTOS_RTTHREAD
++#else
+     uint32_t cpuid = xPortGetCoreID();
+     systimer_hal_context_t *systimer_hal = (systimer_hal_context_t *)arg;
+ #ifdef CONFIG_PM_TRACE
+@@ -144,6 +146,7 @@ IRAM_ATTR void SysTickIsrHandler(void *arg)
+ #ifdef CONFIG_PM_TRACE
+     ESP_PM_TRACE_EXIT(TICK, cpuid);
+ #endif
++#endif
+ }
+ 
+ #endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
+diff --git a/components/riscv/vectors.S b/components/riscv/vectors.S
+index 1006d5bea5..963494fcb3 100644
+--- a/components/riscv/vectors.S
++++ b/components/riscv/vectors.S
+@@ -17,6 +17,9 @@
+ #include "soc/soc_caps.h"
+ #include "sdkconfig.h"
+ 
++#define STORE                   sw
++#define LOAD                    lw
++#define REGBYTES                4
+ 
+ 	.equ SAVE_REGS, 32
+ 	.equ CONTEXT_SIZE, (SAVE_REGS * 4)
+@@ -218,25 +221,27 @@ _call_panic_handler:
+ 	 */
+ 	.global _interrupt_handler
+ 	.type _interrupt_handler, @function
++#ifndef CONFIG_IDF_RTOS_RTTHREAD
++
+ _interrupt_handler:
+ 	/* entry */
+-	save_regs
+-	save_mepc
++	save_regs   /* 保存寄存器 */
++	save_mepc   /* 保存MEPC */
+ 
+ 	/* Before doing anythig preserve the stack pointer */
+ 	/* It will be saved in current TCB, if needed */
+-	mv a0, sp
++	mv a0, sp            /* 保存SP  a0 = sp */
+ 	call rtos_int_enter
+ 
+ 	/* Before dispatch c handler, restore interrupt to enable nested intr */
+-	csrr s1, mcause
+-	csrr s2, mstatus
++	csrr s1, mcause    /* 保存mcause s1 = mcause */
++	csrr s2, mstatus  /* 保存mstatus  s2 = mstatus */
+ 
+-	/* Save the interrupt threshold level */
+-	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
+-	lw s3, 0(t0)
++	/* Save the interrupt threshold level 保存中断嵌套层数? */
++	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG  /* 保存mstatus  t0 = &INTERRUPT_CORE0_CPU_INT_THRESH_REG */
++	lw s3, 0(t0)    /*   s3 = mstatus */
+ 
+-	/* Increase interrupt threshold level */
++	/* Increase interrupt threshold level 增加中断嵌套层数*/
+ 	li t2, 0x7fffffff
+ 	and t1, s1, t2		/* t1 = mcause & mask */
+ 	slli t1, t1, 2 		/* t1 = mcause * 4 */
+@@ -247,8 +252,8 @@ _interrupt_handler:
+ 	sw t2, 0(t0)		/* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
+ 	fence
+ 
+-	li t0, 0x8
+-	csrrs t0, mstatus, t0
++	li t0, 0x8   /* t0 = 8 */
++	csrrs t0, mstatus, t0    /*设置状态MIE寄存器,开总中断*/
+ 
+ 	#ifdef CONFIG_PM_TRACE
+ 	li      a0, 0       /* = ESP_PM_TRACE_IDLE */
+@@ -269,34 +274,211 @@ _interrupt_handler:
+ 	/* call the C dispatcher */
+ 	mv      a0, sp      /* argument 1, stack pointer */
+ 	mv      a1, s1      /* argument 2, interrupt number (mcause) */
+-	/* mask off the interrupt flag of mcause */
++	/* mask off the interrupt flag of mcause   屏幕异常中断*/
+ 	li	    t0, 0x7fffffff
+ 	and     a1, a1, t0
+ 	jal     _global_interrupt_handler
+ 
+-	/* After dispatch c handler, disable interrupt to make freertos make context switch */
++	/* After dispatch c handler, disable interrupt to make freertos make context switch 
++	在调用c函数之后,disable 中断让freertos能够做内容切换
++	*/
+ 
+ 	li t0, 0x8
+-	csrrc t0, mstatus, t0
++	csrrc t0, mstatus, t0  /*清状态MIE寄存器 关总中断*/
+ 
+-	/* restore the interrupt threshold level */
++
++	/* restore the interrupt threshold level  中断嵌套 */
+ 	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
+ 	sw s3, 0(t0)
+ 	fence
+ 
+ 	/* Yield to the next task is needed: */
+-	mv a0, sp
++	mv a0, sp     /* a0 = sp*/
+ 	call rtos_int_exit
+ 
+ 	/* The next (or current) stack pointer is returned in a0 */
+-	mv sp, a0
++	mv sp, a0    /* sp = a0*/
+ 
+ 	/* restore the rest of the registers */
+-	csrw mcause, s1
+-	csrw mstatus, s2
++	csrw mcause, s1  /* mcause = s1 */
++	csrw mstatus, s2   /* mstatus = s2 */
+ 	restore_mepc
+ 	restore_regs
+ 
+ 	/* exit, this will also re-enable the interrupts */
+ 	mret
+ 	.size  _interrupt_handler, .-_interrupt_handler
++#else
++_interrupt_handler:
++    /* 此时CPU的sp = from_thread->sp */
++    /* 注意: 在这里,并没有将mepc的值赋值为from_thread栈中的epc,但后面会赋值 */
++    addi sp, sp, -32 * REGBYTES             /* sp = sp - 32 * 4 栈指针向下偏移32个寄存器长度,用来将CPU的寄存器保存到from_thread的栈中*/
++    STORE x1,   1 * REGBYTES(sp)            /* 将CPU的x1寄存器,即ra寄存器,保存到from_thread->栈中 */
++
++    li    t0,   0x80                        /* t0 = 0x80 */
++    STORE t0,   2 * REGBYTES(sp)            /* mstatus = t0, 即关闭全局中断 */
++
++    /* 将 CPU 的其他寄存器的值,保存到from_thread的任务栈中 */
++    STORE x4,   4 * REGBYTES(sp)
++    STORE x5,   5 * REGBYTES(sp)
++    STORE x6,   6 * REGBYTES(sp)
++    STORE x7,   7 * REGBYTES(sp)
++    STORE x8,   8 * REGBYTES(sp)
++    STORE x9,   9 * REGBYTES(sp)
++    STORE x10, 10 * REGBYTES(sp)
++    STORE x11, 11 * REGBYTES(sp)
++    STORE x12, 12 * REGBYTES(sp)
++    STORE x13, 13 * REGBYTES(sp)
++    STORE x14, 14 * REGBYTES(sp)
++    STORE x15, 15 * REGBYTES(sp)
++    STORE x16, 16 * REGBYTES(sp)
++    STORE x17, 17 * REGBYTES(sp)
++    STORE x18, 18 * REGBYTES(sp)
++    STORE x19, 19 * REGBYTES(sp)
++    STORE x20, 20 * REGBYTES(sp)
++    STORE x21, 21 * REGBYTES(sp)
++    STORE x22, 22 * REGBYTES(sp)
++    STORE x23, 23 * REGBYTES(sp)
++    STORE x24, 24 * REGBYTES(sp)
++    STORE x25, 25 * REGBYTES(sp)
++    STORE x26, 26 * REGBYTES(sp)
++    STORE x27, 27 * REGBYTES(sp)
++    STORE x28, 28 * REGBYTES(sp)
++    STORE x29, 29 * REGBYTES(sp)
++    STORE x30, 30 * REGBYTES(sp)
++    STORE x31, 31 * REGBYTES(sp)
++
++    /* 备份 CPU 的 sp (这时,CPU的sp其实就是from thread的sp指针) 寄存器的值到 s0 寄存器中,下面会使用s0,恢复 CPU 的寄存器 */
++    move  s0, sp    /* s0 = sp */
++
++    /* 在中断函数中,中断函数中调用的C函数,需要使用 sp, 这里,在中断函数中,使用的 sp 为,系统的栈资源 */
++    /* switch to interrupt stack */
++    la    sp, __stack_end__   /* sp = _sp */
++
++    /* interrupt handle */
++    /* 注意: 在调用C函数之前,比如sp的值为0x30001000, 在执行完C函数后,sp的值还是会变成 0x30001000 */
++    call  rt_interrupt_enter    /* 执行所有的中断函数前,调用该函数 */
++
++    csrr s1, mcause
++	csrr s2, mstatus
++
++    /* Save the interrupt threshold level */
++	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
++	lw s3, 0(t0)
++
++    li t2, 0x7fffffff
++	and t1, s1, t2		/* t1 = mcause & mask */
++	slli t1, t1, 2 		/* t1 = mcause * 4 */
++	la t2, INTC_INT_PRIO_REG(0)
++	add t1, t2, t1		/* t1 = INTC_INT_PRIO_REG + 4 * mcause */
++	lw t2, 0(t1)		/* t2 = INTC_INT_PRIO_REG[mcause] */
++	addi t2, t2, 1		/* t2 = t2 +1 */
++	sw t2, 0(t0)		/* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
++	fence
++
++    li t0, 0x8
++	csrrs t0, mstatus, t0
++
++    /* call the C dispatcher */
++	mv      a0, sp      /* argument 1, stack pointer */
++	mv      a1, s1      /* argument 2, interrupt number (mcause) */
++	/* mask off the interrupt flag of mcause */
++	li	    t0, 0x7fffffff
++	and     a1, a1, t0
++	jal     _global_interrupt_handler
++
++    li t0, 0x8
++	csrrc t0, mstatus, t0
++
++	/* restore the interrupt threshold level */
++	la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
++	sw s3, 0(t0)
++	fence
++
++    call  rt_interrupt_leave    /* 执行所有的中断函数后,调用该函数 */
++
++    /* 上面,将保存执行中断服务函数之前的CPU的sp寄存器到了s0所指向的位置处,当执行完中断服务函数,需要将之前的CPU寄存器,恢复一下,此时sp又变成了from thread的sp了 */
++    move  sp, s0    /* sp = s0 */
++
++    /* 下面两句话,相当于将 rt_thread_switch_interrupt_flag 值,赋值给了s2  */
++    /* 将 rt_thread_switch_interrupt_flag 的地址值,赋值给 s0 寄存器*/
++    la    s0, rt_thread_switch_interrupt_flag       /* s0 = &rt_thread_switch_interrupt_flag */
++    /* 将 s0 所指向的地址处的内容,取出来,赋值给 s2 寄存器,其实就是将  rt_thread_switch_interrupt_flag 的值,赋值给了 s2 寄存器*/
++    lw    s2, 0(s0)                 /* s2 = *s0 = rt_thread_switch_interrupt_flag */
++
++    /* 如果 s2的值,即 rt_thread_switch_interrupt_flag 值,如果不为0,则需要继续执行下一条指令,如果为0,则需要跳转到 spurious_interrupt 标号处 执行 */
++    /* 如果 s2的值等于0,rt_thread_switch_interrupt_flag等于0, 则不需要在中断处理函数中,进行上下文切换,反之则需要 */
++    /* 如果不需要上下文切换, */
++
++    /* 在这里,跳转到 spurious_interrupt的话,是不会进行上下文切换的,因为,此时CPU的sp指针还是from线程的*/
++    beqz  s2, spurious_interrupt    /* if (s2 == 0) goto spurious_interrupt; else 执行下一条语句*/
++
++    /* 需要上下文切换: 主要目的是将CPU的sp指针,赋值为to_thread的sp */
++    
++    /* 将 s0 所执向的地址的内容设置为0, 也就是,将变量 rt_thread_switch_interrupt_flag 赋值为了 0 */
++    /* s0存放的值是 rt_thread_switch_interrupt_flag 变量的地址*/
++    sw    zero, 0(s0)       /* *s0 = 0; 也就是 rt_thread_switch_interrupt_flag = 0 */
++    /* 将 mepc 的值,赋值给 a0 寄存器,mepc 的值是,跳转到中断函数执行之前的 PC 指针 */
++    /* 这时的mpec其实,还是from线程,在跳转到中断执行前的一个PC地址 */
++    csrr  a0, mepc  /* a0 = mepc */
++
++    /* 将 mpec 的值写回到freom thread任务栈中的 epc 中,待后续,恢复from线程时,使用 */
++    STORE a0, 0 * REGBYTES(sp)  /* from_thread->sp->epc = a0 ,中断入口处*/
++
++    /* 将from_thread的sp指针,赋值为CPU的sp指针 */
++    la    s0, rt_interrupt_from_thread  /* s0 = &rt_interrupt_from_thread 注意: rt_interrupt_from_thread = &(from_thread->sp) */
++    LOAD  s1, 0(s0)                     /* s1 = rt_interrupt_from_thread,也就是s1 = &(from_thread->sp) */
++    STORE sp, 0(s1)                     /* from_thread->sp = sp*/
++   
++    /* 接下来,需要开始恢复CPU的sp为to_thread的sp了 */
++    la    s0, rt_interrupt_to_thread    /* s0 = &rt_interrupt_to_thread 注意: rt_interrupt_to_thread = &(to_thred->sp)*/
++    LOAD  s1, 0(s0)                     /* s1 = rt_interrupt_to_thread, 也就是s1 = &(to_thred->sp) */
++    LOAD  sp, 0(s1)                     /* sp = (to_thred->sp)*/
++
++    /* 将CPU的 mepc设置为to_thred的mepc,待中断退出,执行mret指令后,将从该地址开始执行 */
++    LOAD  a0,  0 * REGBYTES(sp)         /* a0 = to_thread的mepc的值*/
++    csrw  mepc, a0                      /* mepc = a0 */
++
++
++spurious_interrupt:
++    LOAD  x1,   1 * REGBYTES(sp)
++
++    /* Remain in M-mode after mret */
++    li    t0, 0x00001800
++    csrs  mstatus, t0
++    LOAD  t0,   2 * REGBYTES(sp)
++    csrs  mstatus, t0
++
++    LOAD  x4,   4 * REGBYTES(sp)
++    LOAD  x5,   5 * REGBYTES(sp)
++    LOAD  x6,   6 * REGBYTES(sp)
++    LOAD  x7,   7 * REGBYTES(sp)
++    LOAD  x8,   8 * REGBYTES(sp)
++    LOAD  x9,   9 * REGBYTES(sp)
++    LOAD  x10, 10 * REGBYTES(sp)
++    LOAD  x11, 11 * REGBYTES(sp)
++    LOAD  x12, 12 * REGBYTES(sp)
++    LOAD  x13, 13 * REGBYTES(sp)
++    LOAD  x14, 14 * REGBYTES(sp)
++    LOAD  x15, 15 * REGBYTES(sp)
++    LOAD  x16, 16 * REGBYTES(sp)
++    LOAD  x17, 17 * REGBYTES(sp)
++    LOAD  x18, 18 * REGBYTES(sp)
++    LOAD  x19, 19 * REGBYTES(sp)
++    LOAD  x20, 20 * REGBYTES(sp)
++    LOAD  x21, 21 * REGBYTES(sp)
++    LOAD  x22, 22 * REGBYTES(sp)
++    LOAD  x23, 23 * REGBYTES(sp)
++    LOAD  x24, 24 * REGBYTES(sp)
++    LOAD  x25, 25 * REGBYTES(sp)
++    LOAD  x26, 26 * REGBYTES(sp)
++    LOAD  x27, 27 * REGBYTES(sp)
++    LOAD  x28, 28 * REGBYTES(sp)
++    LOAD  x29, 29 * REGBYTES(sp)
++    LOAD  x30, 30 * REGBYTES(sp)
++    LOAD  x31, 31 * REGBYTES(sp)
++
++    addi  sp, sp, 32 * REGBYTES
++    mret
++	.size  _interrupt_handler, .-_interrupt_handler
++#endif
+-- 
+2.32.0 (Apple Git-132)
+
+
+From d0d1f625543282df462af56cc18abaa5a47d4f40 Mon Sep 17 00:00:00 2001
+From: supperthomas <78900636@qq.com>
+Date: Sat, 9 Jul 2022 21:37:53 +0800
+Subject: [PATCH 2/4] remove submodule
+
+remove submodule
+---
+ .gitmodules | 111 ----------------------------------------------------
+ 1 file changed, 111 deletions(-)
+
+diff --git a/.gitmodules b/.gitmodules
+index 49edc68e10..8b13789179 100644
+--- a/.gitmodules
++++ b/.gitmodules
+@@ -1,112 +1 @@
+-#
+-# All the relative URL paths are intended to be GitHub ones
+-# For Espressif's public projects please use '../../espressif/proj', not a '../proj'
+-#
+ 
+-[submodule "components/esptool_py/esptool"]
+-	path = components/esptool_py/esptool
+-	url = ../../espressif/esptool.git
+-
+-[submodule "components/bt/controller/lib_esp32"]
+-	path = components/bt/controller/lib_esp32
+-        url = ../../espressif/esp32-bt-lib.git
+-
+-[submodule "components/bootloader/subproject/components/micro-ecc/micro-ecc"]
+-	path = components/bootloader/subproject/components/micro-ecc/micro-ecc
+-	url = ../../kmackay/micro-ecc.git
+-
+-[submodule "components/coap/libcoap"]
+-	path = components/coap/libcoap
+-	url = ../../obgm/libcoap.git
+-
+-[submodule "components/nghttp/nghttp2"]
+-	path = components/nghttp/nghttp2
+-	url = ../../nghttp2/nghttp2.git
+-
+-[submodule "components/libsodium/libsodium"]
+-	path = components/libsodium/libsodium
+-	url = ../../jedisct1/libsodium.git
+-
+-[submodule "components/spiffs/spiffs"]
+-	path = components/spiffs/spiffs
+-	url = ../../pellepl/spiffs.git
+-
+-[submodule "components/json/cJSON"]
+-	path = components/json/cJSON
+-	url = ../../DaveGamble/cJSON.git
+-
+-[submodule "components/mbedtls/mbedtls"]
+-	path = components/mbedtls/mbedtls
+-	url = ../../espressif/mbedtls.git
+-
+-[submodule "components/asio/asio"]
+-	path = components/asio/asio
+-	url = ../../espressif/asio.git
+-
+-[submodule "components/expat/expat"]
+-	path = components/expat/expat
+-	url = ../../libexpat/libexpat.git
+-
+-[submodule "components/lwip/lwip"]
+-	path = components/lwip/lwip
+-	url = ../../espressif/esp-lwip.git
+-
+-[submodule "components/mqtt/esp-mqtt"]
+-	path = components/mqtt/esp-mqtt
+-	url = ../../espressif/esp-mqtt.git
+-
+-[submodule "components/protobuf-c/protobuf-c"]
+-	path = components/protobuf-c/protobuf-c
+-	url = ../../protobuf-c/protobuf-c.git
+-
+-[submodule "components/unity/unity"]
+-	path = components/unity/unity
+-	url = ../../ThrowTheSwitch/Unity.git
+-
+-[submodule "examples/build_system/cmake/import_lib/main/lib/tinyxml2"]
+-	path = examples/build_system/cmake/import_lib/main/lib/tinyxml2
+-	url = ../../leethomason/tinyxml2.git
+-
+-[submodule "components/bt/host/nimble/nimble"]
+-	path = components/bt/host/nimble/nimble
+-	url = ../../espressif/esp-nimble.git
+-
+-[submodule "components/cbor/tinycbor"]
+-	path = components/cbor/tinycbor
+-	url = ../../intel/tinycbor.git
+-
+-[submodule "components/esp_wifi/lib"]
+-	path = components/esp_wifi/lib
+-	url = ../../espressif/esp32-wifi-lib.git
+-
+-[submodule "components/tinyusb/tinyusb"]
+-	path = components/tinyusb/tinyusb
+-	url = ../../espressif/tinyusb.git
+-
+-[submodule "examples/peripherals/secure_element/atecc608_ecdsa/components/esp-cryptoauthlib"]
+-	path = examples/peripherals/secure_element/atecc608_ecdsa/components/esp-cryptoauthlib
+-	url = ../../espressif/esp-cryptoauthlib.git
+-
+-[submodule "components/cmock/CMock"]
+-	path = components/cmock/CMock
+-	url = ../../ThrowTheSwitch/CMock.git
+-
+-[submodule "components/openthread/openthread"]
+-	path = components/openthread/openthread
+-	url = ../../espressif/openthread.git
+-
+-[submodule "components/bt/controller/lib_esp32c3_family"]
+-	path = components/bt/controller/lib_esp32c3_family
+-	url = ../../espressif/esp32c3-bt-lib.git
+-
+-[submodule "components/esp_phy/lib"]
+-	path = components/esp_phy/lib
+-	url = ../../espressif/esp-phy-lib.git
+-
+-[submodule "components/openthread/lib"]
+-	path = components/openthread/lib
+-	url = ../../espressif/esp-thread-lib.git
+-
+-[submodule "components/ieee802154/lib"]
+-	path = components/ieee802154/lib
+-	url = ../../espressif/esp-ieee802154-lib.git
+-- 
+2.32.0 (Apple Git-132)
+
+
+From 29b9d1ebe7fe1c817428b856e208561ae0dc574b Mon Sep 17 00:00:00 2001
+From: tangzz98 <tangz98@outlook.com>
+Date: Sat, 30 Jul 2022 15:08:12 +0800
+Subject: [PATCH 3/4] Add FreeRTOS wrapper
+
+---
+ .../include/esp_serial_slave_link/essl.h      |    2 +-
+ .../port/arch/riscv/expression_with_stack.c   |    6 +
+ components/esp_system/startup.c               |   12 +
+ components/freertos/CMakeLists.txt            |   73 +-
+ .../FreeRTOS/esp_additions/task_snapshot.c    |  212 ++
+ .../FreeRTOS/event_groups.c                   |  225 ++
+ .../FreeRTOS/freertos_v8_compat.c             |   33 +
+ .../esp_additions/freertos/FreeRTOSConfig.h   |  326 +++
+ .../esp_additions/freertos/task_snapshot.h    |   90 +
+ .../FreeRTOS/include/freertos/FreeRTOS.h      | 1198 +++++++++
+ .../FreeRTOS/include/freertos/event_groups.h  |  621 +++++
+ .../FreeRTOS/include/freertos/list.h          |  416 +++
+ .../FreeRTOS/include/freertos/portable.h      |  141 +
+ .../FreeRTOS/include/freertos/projdefs.h      |   64 +
+ .../FreeRTOS/include/freertos/queue.h         | 1188 +++++++++
+ .../FreeRTOS/include/freertos/semphr.h        | 1188 +++++++++
+ .../FreeRTOS/include/freertos/task.h          | 2265 +++++++++++++++++
+ .../FreeRTOS/include/freertos/timers.h        | 1185 +++++++++
+ .../FreeRTOS/list.c                           |  213 ++
+ .../FreeRTOS/port/MemMang/heap_1.c            |  145 ++
+ .../FreeRTOS/port/MemMang/heap_2.c            |  277 ++
+ .../FreeRTOS/port/MemMang/heap_3.c            |   78 +
+ .../FreeRTOS/port/MemMang/heap_4.c            |  447 ++++
+ .../FreeRTOS/port/MemMang/heap_5.c            |  506 ++++
+ .../FreeRTOS/port/port_common.c               |  203 ++
+ .../include/freertos/FreeRTOSConfig_arch.h    |  105 +
+ .../rt-thread/include/freertos/portmacro.h    |  107 +
+ .../include/freertos/portmacro_deprecated.h   |   94 +
+ .../include/freertos/portmacro_esp32c3.h      |  424 +++
+ .../FreeRTOS/port/rt-thread/port.c            |   44 +
+ .../FreeRTOS/port/rt-thread/port_esp32c3.c    |  197 ++
+ .../FreeRTOS/queue.c                          |  787 ++++++
+ .../FreeRTOS/tasks.c                          | 1254 +++++++++
+ .../FreeRTOS/timers.c                         |  328 +++
+ .../RT-Thread-wrapper-of-FreeRTOS/readme.md   |    3 +
+ 35 files changed, 14436 insertions(+), 21 deletions(-)
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/esp_additions/task_snapshot.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/event_groups.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/freertos_v8_compat.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/esp_additions/freertos/FreeRTOSConfig.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/esp_additions/freertos/task_snapshot.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/FreeRTOS.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/event_groups.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/list.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/portable.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/projdefs.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/queue.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/semphr.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/task.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/timers.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/list.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_1.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_2.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_3.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_4.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_5.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/port_common.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/FreeRTOSConfig_arch.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro_deprecated.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro_esp32c3.h
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/port.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/port_esp32c3.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/queue.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/tasks.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/timers.c
+ create mode 100644 components/freertos/RT-Thread-wrapper-of-FreeRTOS/readme.md
+
+diff --git a/components/esp_serial_slave_link/include/esp_serial_slave_link/essl.h b/components/esp_serial_slave_link/include/esp_serial_slave_link/essl.h
+index f03274a401..e9bc4939c0 100644
+--- a/components/esp_serial_slave_link/include/esp_serial_slave_link/essl.h
++++ b/components/esp_serial_slave_link/include/esp_serial_slave_link/essl.h
+@@ -160,7 +160,7 @@ esp_err_t essl_read_reg(essl_handle_t handle, uint8_t add, uint8_t *value_o, uin
+  *        - ESP_ERR_NOT_SUPPORTED: Current device does not support this function.
+  *        - ESP_ERR_TIMEOUT:       No interrupts before timeout.
+  */
+-esp_err_t essl_wait_int(essl_handle_t handle, uint32_t wait_ms);
++esp_err_t essl_wait_int(essl_handle_t handle, TickType_t wait_ms);
+ 
+ /** Clear interrupt bits of ESSL slave. All the bits set in the mask will be cleared, while other bits will stay the same.
+  *
+diff --git a/components/esp_system/port/arch/riscv/expression_with_stack.c b/components/esp_system/port/arch/riscv/expression_with_stack.c
+index 07d22bf3aa..64c1e0689d 100644
+--- a/components/esp_system/port/arch/riscv/expression_with_stack.c
++++ b/components/esp_system/port/arch/riscv/expression_with_stack.c
+@@ -18,6 +18,7 @@
+ #include "freertos/FreeRTOS.h"
+ #include "freertos/portmacro.h"
+ 
++#if !defined CONFIG_IDF_RTOS_RTTHREAD
+ static portMUX_TYPE shared_stack_spinlock = portMUX_INITIALIZER_UNLOCKED;
+ static void *current_task_stack = NULL;
+ 
+@@ -45,10 +46,12 @@ static StackType_t *esp_switch_stack_setup(StackType_t *stack, size_t stack_size
+ #endif
+     return ((StackType_t *)adjusted_top_of_stack);
+ }
++#endif
+ 
+ 
+ void esp_execute_shared_stack_function(SemaphoreHandle_t lock, void *stack, size_t stack_size, shared_stack_function function)
+ {
++#if !defined CONFIG_IDF_RTOS_RTTHREAD
+     assert(lock);
+     assert(stack);
+     assert(stack_size > 0 && stack_size >= CONFIG_ESP_MINIMAL_SHARED_STACK_SIZE);
+@@ -70,4 +73,7 @@ void esp_execute_shared_stack_function(SemaphoreHandle_t lock, void *stack, size
+     portEXIT_CRITICAL(&shared_stack_spinlock);
+ 
+     xSemaphoreGive(lock);
++#else
++    function();
++#endif
+ }
+diff --git a/components/esp_system/startup.c b/components/esp_system/startup.c
+index 139ae8b6a2..6c6acdf575 100644
+--- a/components/esp_system/startup.c
++++ b/components/esp_system/startup.c
+@@ -56,6 +56,10 @@
+ 
+ #include "esp_rom_sys.h"
+ 
++#if CONFIG_IDF_RTOS_RTTHREAD
++#include "rtthread.h"
++#endif
++
+ // [refactor-todo] make this file completely target-independent
+ #if CONFIG_IDF_TARGET_ESP32
+ #include "esp32/clk.h"
+@@ -235,6 +239,14 @@ static void do_core_init(void)
+        app CPU, and when that is not up yet, the memory will be inaccessible and heap_caps_init may
+        fail initializing it properly. */
+     heap_caps_init();
++#if CONFIG_IDF_RTOS_RTTHREAD
++#if defined RT_USING_HEAP
++    extern int __heap_start__;
++    extern int __heap_end__;
++    rt_system_heap_init((void *)&__heap_start__, (void *)&__heap_end__);
++#endif
++    rt_system_scheduler_init();
++#endif
+ 
+     // When apptrace module is enabled, there will be SEGGER_SYSVIEW calls in the newlib init.
+     // SEGGER_SYSVIEW relies on apptrace module
+diff --git a/components/freertos/CMakeLists.txt b/components/freertos/CMakeLists.txt
+index bd5acf5a2f..8db7883df6 100644
+--- a/components/freertos/CMakeLists.txt
++++ b/components/freertos/CMakeLists.txt
+@@ -6,7 +6,29 @@ endif()
+ 
+ idf_build_get_property(target IDF_TARGET)
+ 
+-if(CONFIG_IDF_TARGET_ARCH_XTENSA)
++if(CONFIG_IDF_RTOS_RTTHREAD)
++    set(freertos_root "RT-Thread-wrapper-of-FreeRTOS/FreeRTOS")
++else()
++    set(freertos_root ".")
++endif()
++
++if(CONFIG_IDF_RTOS_RTTHREAD)
++    set(srcs
++        "${freertos_root}/port/rt-thread/port.c"
++        "${freertos_root}/port/rt-thread/port_esp32c3.c")
++
++    set(include_dirs
++        "${freertos_root}/include"
++        "${freertos_root}/include/esp_additions/freertos"  # For files with #include "FreeRTOSConfig.h"
++        "${freertos_root}/port/rt-thread/include"            # For including arch-specific FreeRTOSConfig_arch.h in port/<arch>/include
++        "${freertos_root}/include/esp_additions")          # For files with #include "freertos/FreeRTOSConfig.h"
++
++    set(private_include_dirs
++        "${freertos_root}/port/rt-thread/include/freertos"
++        "${freertos_root}/port/rt-thread"
++        "${freertos_root}")
++
++elseif(CONFIG_IDF_TARGET_ARCH_XTENSA)
+     set(srcs
+         "port/xtensa/port.c"
+         "port/xtensa/portasm.S"
+@@ -48,21 +70,25 @@ elseif(CONFIG_IDF_TARGET_ARCH_RISCV)
+ endif()
+ 
+ list(APPEND srcs
+-    "esp_additions/task_snapshot.c"
+-    "port/port_common.c"
+-    "port/port_systick.c"
+-    "croutine.c"
+-    "event_groups.c"
+-    "list.c"
+-    "queue.c"
+-    "tasks.c"
+-    "timers.c"
+-    "stream_buffer.c"
+-    "FreeRTOS-openocd.c"
+-    "freertos_v8_compat.c")
++    "${freertos_root}/port/port_common.c"
++    "${freertos_root}/event_groups.c"
++    "${freertos_root}/queue.c"
++    "${freertos_root}/tasks.c"
++    "${freertos_root}/timers.c"
++    "${freertos_root}/list.c"
++    "${freertos_root}/freertos_v8_compat.c"
++    "${freertos_root}/esp_additions/task_snapshot.c")
++
++if(NOT CONFIG_IDF_RTOS_RTTHREAD)
++    list(APPEND srcs
++        "port/port_systick.c"
++        "croutine.c"
++        "stream_buffer.c"
++        "freertos_v8_compat.c")
++endif()
+ 
+ list(APPEND private_include_dirs
+-    "include/freertos")
++    "${freertos_root}/include/freertos")
+ 
+ if(CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY)
+     list(APPEND srcs "port/xtensa/xtensa_loadstore_handler.S")
+@@ -71,7 +97,7 @@ endif()
+ # esp_timer is required by FreeRTOS because we use esp_tiemr_get_time() to do profiling
+ # app_trace is required by FreeRTOS headers only when CONFIG_APPTRACE_SV_ENABLE=y,
+ # REQUIRES can't depend on config options, so always require it.
+-set(required_components app_trace esp_timer)
++set(required_components app_trace esp_timer main)
+ 
+ idf_component_register(SRCS "${srcs}"
+                     INCLUDE_DIRS ${include_dirs}
+@@ -88,15 +114,22 @@ if(CONFIG_FREERTOS_DEBUG_OCDAWARE)
+ endif()
+ 
+ set_source_files_properties(
+-    tasks.c
+-    event_groups.c
+-    timers.c
+-    queue.c
+-    stream_buffer.c
++    "${freertos_root}/tasks.c"
++    "${freertos_root}/event_groups.c"
++    "${freertos_root}/timers.c"
++    "${freertos_root}/queue.c"
+     PROPERTIES COMPILE_DEFINITIONS
+     _ESP_FREERTOS_INTERNAL
+     )
+ 
++if(NOT CONFIG_IDF_RTOS_RTTHREAD)
++    set_source_files_properties(
++        stream_buffer.c
++        PROPERTIES COMPILE_DEFINITIONS
++        _ESP_FREERTOS_INTERNAL
++    )
++endif()
++
+ # The freertos component provides the `start_app` and `start_app_other_cores`
+ # if it is included in the build. It then calls `app_main`
+ # from the main task created, which must be provided by the user.
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/esp_additions/task_snapshot.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/esp_additions/task_snapshot.c
+new file mode 100644
+index 0000000000..1244118b60
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/esp_additions/task_snapshot.c
+@@ -0,0 +1,212 @@
++/*
++ * SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
++ *
++ * SPDX-License-Identifier: Apache-2.0
++ */
++
++#include "freertos/FreeRTOS.h"
++#include "freertos/task_snapshot.h"
++
++#ifndef DIM
++#define DIM(t) (sizeof(t)/ sizeof(*(t)))
++#endif
++
++#if ( configENABLE_TASK_SNAPSHOT == 1 )
++
++	static void prvTaskGetSnapshot( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, void *pxTCB )
++	{
++		if (pxTCB == NULL) {
++			return;
++		}
++		pxTaskSnapshotArray[ *uxTask ].pxTCB = pxTCB;
++		pxTaskSnapshotArray[ *uxTask ].pxTopOfStack = (StackType_t *) pxTCBGetTopOfStack(pxTCB);
++		#if( portSTACK_GROWTH < 0 )
++		{
++			pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCBGetEndOfStack(pxTCB);
++		}
++		#else
++		{
++			pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCBGetStartOfStack(pxTCB);
++		}
++		#endif
++		(*uxTask)++;
++	}
++
++	static void prvTaskGetSnapshotsFromList( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, const UBaseType_t uxArraySize, List_t *pxList )
++	{
++		void *pxNextTCB = NULL;
++		void *pxFirstTCB = NULL;
++
++		if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
++		{
++			listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
++			do
++			{
++				if( *uxTask >= uxArraySize ) {
++					break;
++				}
++
++				listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
++				prvTaskGetSnapshot( pxTaskSnapshotArray, uxTask, pxNextTCB );
++			} while( pxNextTCB != pxFirstTCB );
++		}
++		else
++		{
++			mtCOVERAGE_TEST_MARKER();
++		}
++	}
++
++	UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray, const UBaseType_t uxArraySize, UBaseType_t * const pxTcbSz )
++	{
++		UBaseType_t uxTask = 0;
++		UBaseType_t i = 0;
++
++
++		*pxTcbSz = pxTCBGetSize();
++		/* Fill in an TaskStatus_t structure with information on each
++		task in the Ready state. */
++		i = configMAX_PRIORITIES;
++		do
++		{
++			i--;
++			prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, pxListGetReadyTask(i) );
++		} while( i > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
++
++		/* Fill in an TaskStatus_t structure with information on each
++		task in the Blocked state. */
++		prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, pxGetDelayedTaskList() );
++		prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, pxGetOverflowDelayedTaskList() );
++		for (i = 0; i < configNUM_CORES; i++) {
++			if( uxTask >= uxArraySize ) {
++				break;
++			}
++			prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, pxListGetReadyPendingTask(i) );
++		}
++
++		#if( INCLUDE_vTaskDelete == 1 )
++		{
++			prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, pxGetTasksWaitingTermination() );
++		}
++		#endif
++
++		#if ( INCLUDE_vTaskSuspend == 1 )
++		{
++			prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, pxGetSuspendedTaskList() );
++		}
++		#endif
++		return uxTask;
++	}
++
++	static void *prvFirstTaskGet( List_t *pxList )
++	{
++		ListItem_t *pxListItem = listGET_HEAD_ENTRY( pxList );
++		if( pxListItem != listGET_END_MARKER( pxList ) ) {
++			return listGET_LIST_ITEM_OWNER( pxListItem );
++		}
++		return NULL;
++	}
++
++	static void *prvNextTaskGet( void *pxTCB )
++	{
++		List_t *pxList = listLIST_ITEM_CONTAINER( pxTCBGetStateListItem(pxTCB) );
++		ListItem_t *pxListItem = listGET_NEXT( pxTCBGetStateListItem(pxTCB) );
++		if( pxListItem != listGET_END_MARKER( pxList ) ) {
++			return listGET_LIST_ITEM_OWNER( pxListItem );
++		}
++		return NULL;
++	}
++
++	void vTaskGetSnapshot( TaskHandle_t pxTask, TaskSnapshot_t *pxTaskSnapshot )
++	{
++		configASSERT( portVALID_TCB_MEM(pxTask) );
++		configASSERT( pxTaskSnapshot != NULL );
++		pxTaskSnapshot->pxTCB = (void*) pxTask;
++		pxTaskSnapshot->pxTopOfStack = pxTCBGetTopOfStack((void*) pxTask);
++		pxTaskSnapshot->pxEndOfStack = pxTCBGetEndOfStack((void*) pxTask);
++	}
++
++	TaskHandle_t pxTaskGetNext( TaskHandle_t pxTask )
++	{
++		void *pxTCB = pxTask;
++		List_t *pxTaskList = NULL;
++		UBaseType_t i = configMAX_PRIORITIES;
++		UBaseType_t bCurTaskListFound = pdFALSE;
++		List_t *task_lists[] = {
++			pxGetDelayedTaskList(),
++			pxGetOverflowDelayedTaskList(),
++		#if( INCLUDE_vTaskDelete == 1 )
++			pxGetTasksWaitingTermination(),
++		#endif
++		#if( INCLUDE_vTaskSuspend == 1 )
++			pxGetSuspendedTaskList()
++		#endif
++		};
++
++		if( pxTask != NULL && !portVALID_TCB_MEM(pxTask) ) {
++			return NULL;
++		}
++
++		if( pxTCB != NULL ) {
++			pxTCB = prvNextTaskGet( pxTCB );
++			if( pxTCB != NULL ) {
++				// take care not to return garbage
++				return portVALID_TCB_MEM(pxTCB) ? pxTCB : NULL;
++			}
++			pxTaskList = listLIST_ITEM_CONTAINER( pxTCBGetStateListItem(pxTask) );
++		}
++		/* ready tasks lists */
++		do
++		{
++			i--;
++			List_t *pxList = pxListGetReadyTask(i);
++			if( bCurTaskListFound == pdFALSE && pxTaskList != NULL ) {
++				/* need to find list the current task item from */
++				if( pxTaskList == pxList ) {
++					bCurTaskListFound = pdTRUE;
++				}
++				continue; /* go to the next 'ready list' */
++			}
++			pxTCB = prvFirstTaskGet( pxList );
++			if( pxTCB != NULL ) {
++				// take care not to return garbage
++				return portVALID_TCB_MEM(pxTCB) ? pxTCB : NULL;
++		}
++		}
++		while( i > tskIDLE_PRIORITY );
++		/* pending ready tasks lists */
++		for (i = 0; i < configNUM_CORES; i++) {
++			List_t *pxList = pxListGetReadyPendingTask(i);
++			if( bCurTaskListFound == pdFALSE && pxTaskList != NULL ) {
++				/* need to find list the current task item from */
++				if( pxTaskList == pxList ) {
++					bCurTaskListFound = pdTRUE;
++				}
++				continue; /* go to the next 'ready list' */
++			}
++			pxTCB = prvFirstTaskGet( pxList );
++			if( pxTCB != NULL ) {
++				// take care not to return garbage
++				return portVALID_TCB_MEM(pxTCB) ? pxTCB : NULL;
++			}
++		}
++		/* other tasks lists */
++		for (i = 0; i < DIM(task_lists); i++) {
++			List_t *pxList = task_lists[ i ];
++			if( bCurTaskListFound == pdFALSE && pxTaskList != NULL ) {
++				/* need to find list the current task item from */
++				if( pxTaskList == pxList ) {
++					bCurTaskListFound = pdTRUE;
++				}
++				continue; /* go to the next 'ready list' */
++			}
++			pxTCB = prvFirstTaskGet( pxList );
++			if( pxTCB != NULL ) {
++				// take care not to return garbage
++				return portVALID_TCB_MEM(pxTCB) ? pxTCB : NULL;
++			}
++		}
++
++		return NULL;
++	}
++
++#endif
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/event_groups.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/event_groups.c
+new file mode 100644
+index 0000000000..1b708564ed
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/event_groups.c
+@@ -0,0 +1,225 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++/* Standard includes. */
++#include <stdlib.h>
++
++/* FreeRTOS includes. */
++#include "FreeRTOS.h"
++#include "task.h"
++#include "event_groups.h"
++
++typedef struct EventGroupDef_t
++{
++    struct rt_event event;
++} EventGroup_t;
++
++static volatile rt_uint8_t event_index = 0;
++
++/*-----------------------------------------------------------*/
++
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++
++    EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer )
++    {
++        char name[RT_NAME_MAX] = {0};
++
++        /* A StaticEventGroup_t object must be provided. */
++        configASSERT( pxEventGroupBuffer );
++
++        rt_snprintf( name, RT_NAME_MAX, "event%02d", event_index++ );
++        rt_event_init( ( rt_event_t ) pxEventGroupBuffer, name, RT_IPC_FLAG_PRIO );
++
++        return ( EventGroupHandle_t ) pxEventGroupBuffer;
++    }
++
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++/*-----------------------------------------------------------*/
++
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++
++    EventGroupHandle_t xEventGroupCreate( void )
++    {
++        EventGroup_t * pxEventBits;
++        char name[RT_NAME_MAX] = {0};
++
++        rt_snprintf( name, RT_NAME_MAX, "event%02d", event_index++ );
++        pxEventBits = ( EventGroup_t * ) rt_event_create( name, RT_IPC_FLAG_PRIO );
++
++        return pxEventBits;
++    }
++
++#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
++/*-----------------------------------------------------------*/
++
++EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
++                                 const EventBits_t uxBitsToWaitFor,
++                                 const BaseType_t xClearOnExit,
++                                 const BaseType_t xWaitForAllBits,
++                                 TickType_t xTicksToWait )
++{
++    rt_event_t event = ( rt_event_t ) xEventGroup;
++    rt_uint8_t option = 0;
++    rt_uint32_t recved;
++    rt_base_t level;
++    rt_err_t err;
++
++    /* Check the user is not attempting to wait on the bits used by the kernel
++     * itself, and that at least one bit is being requested. */
++    configASSERT( xEventGroup );
++    configASSERT( uxBitsToWaitFor != 0 );
++    #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
++        {
++            configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
++        }
++    #endif
++
++    if ( xWaitForAllBits != pdFALSE )
++    {
++        option |= RT_EVENT_FLAG_AND;
++    }
++    else
++    {
++        option |= RT_EVENT_FLAG_OR;
++    }
++    if ( xClearOnExit != pdFALSE )
++    {
++        option |= RT_EVENT_FLAG_CLEAR;
++    }
++    err = rt_event_recv( event, ( rt_uint32_t ) uxBitsToWaitFor, option, ( rt_int32_t ) xTicksToWait, &recved );
++
++    if ( err != RT_EOK )
++    {
++        level = rt_hw_interrupt_disable();
++        recved = event->set;
++        rt_hw_interrupt_enable(level);
++    }
++
++    return ( EventBits_t ) recved;
++}
++/*-----------------------------------------------------------*/
++
++EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
++                                  const EventBits_t uxBitsToClear )
++{
++    rt_event_t event = ( rt_event_t ) xEventGroup;
++    EventBits_t uxReturn;
++    rt_base_t level;
++
++    configASSERT( xEventGroup );
++
++    level = rt_hw_interrupt_disable();
++    uxReturn = ( EventBits_t ) event->set;
++    event->set &= ~( ( rt_uint32_t ) uxBitsToClear );
++    rt_hw_interrupt_enable( level );
++
++    return uxReturn;
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup,
++                                        const EventBits_t uxBitsToClear )
++{
++    return xEventGroupClearBits( xEventGroup, uxBitsToClear );
++}
++
++EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
++{
++    rt_event_t event = ( rt_event_t ) xEventGroup;
++    EventBits_t uxReturn;
++    rt_base_t level;
++
++    level = rt_hw_interrupt_disable();
++    uxReturn = ( EventBits_t ) event->set;
++    rt_hw_interrupt_enable( level );
++
++    return uxReturn;
++}
++/*-----------------------------------------------------------*/
++
++EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
++                                const EventBits_t uxBitsToSet )
++{
++    rt_event_t event = ( rt_event_t ) xEventGroup;
++    rt_base_t level;
++    EventBits_t uxReturn;
++
++    configASSERT( xEventGroup );
++
++    rt_event_send( event, ( rt_uint32_t ) uxBitsToSet);
++
++    level = rt_hw_interrupt_disable();
++    uxReturn = ( EventBits_t ) event->set;
++    rt_hw_interrupt_enable(level);
++
++    return uxReturn;
++}
++/*-----------------------------------------------------------*/
++
++void vEventGroupDelete( EventGroupHandle_t xEventGroup )
++{
++    rt_event_t event = ( rt_event_t ) xEventGroup;
++
++    configASSERT( xEventGroup );
++
++#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++    if ( rt_object_is_systemobject( ( rt_object_t ) event ) )
++#endif
++    {
++    #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++        rt_event_detach( event );
++    #endif
++#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++    }
++    else
++    {
++#endif
++    #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++        rt_event_delete( event );
++    #endif
++    }
++}
++/*-----------------------------------------------------------*/
++
++#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
++
++    BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup,
++                                          const EventBits_t uxBitsToSet,
++                                          BaseType_t * pxHigherPriorityTaskWoken )
++    {
++        xEventGroupSetBits( xEventGroup, uxBitsToSet );
++        if ( pxHigherPriorityTaskWoken != NULL)
++        {
++            pxHigherPriorityTaskWoken = pdFALSE;
++        }
++
++        return pdPASS;
++    }
++
++#endif /* if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
++/*-----------------------------------------------------------*/
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/freertos_v8_compat.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/freertos_v8_compat.c
+new file mode 100644
+index 0000000000..fe8d689125
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/freertos_v8_compat.c
+@@ -0,0 +1,33 @@
++// Copyright 2020 Espressif Systems (Shanghai) Co., Ltd.
++//
++// Licensed under the Apache License, Version 2.0 (the "License");
++// you may not use this file except in compliance with the License.
++// You may obtain a copy of the License at
++//
++//     http://www.apache.org/licenses/LICENSE-2.0
++//
++// Unless required by applicable law or agreed to in writing, software
++// distributed under the License is distributed on an "AS IS" BASIS,
++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++// See the License for the specific language governing permissions and
++// limitations under the License.
++
++#include "FreeRTOS.h"
++#include "queue.h"
++#include "semphr.h"
++
++/* This API is kept for backward ABI compatibility with prebuilt libraries against FreeRTOS v8/v9 in ESP-IDF */
++BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xPeek )
++{
++	if ( xPeek == pdTRUE )
++	{
++		return xQueuePeek( xQueue, pvBuffer, xTicksToWait );
++	}
++
++	if ( pvBuffer == NULL )
++	{
++		return xQueueSemaphoreTake( xQueue, xTicksToWait );
++	}
++
++	return xQueueReceive( xQueue, pvBuffer, xTicksToWait );
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/esp_additions/freertos/FreeRTOSConfig.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/esp_additions/freertos/FreeRTOSConfig.h
+new file mode 100644
+index 0000000000..8a4739a3a5
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/esp_additions/freertos/FreeRTOSConfig.h
+@@ -0,0 +1,326 @@
++/*
++    FreeRTOS V10 - Copyright (C) 2021 Real Time Engineers Ltd.
++    All rights reserved
++
++    VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
++
++    This file is part of the FreeRTOS distribution.
++
++    FreeRTOS is free software; you can redistribute it and/or modify it under
++    the terms of the GNU General Public License (version 2) as published by the
++    Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
++
++	***************************************************************************
++    >>!   NOTE: The modification to the GPL is included to allow you to     !<<
++    >>!   distribute a combined work that includes FreeRTOS without being   !<<
++    >>!   obliged to provide the source code for proprietary components     !<<
++    >>!   outside of the FreeRTOS kernel.                                   !<<
++	***************************************************************************
++
++    FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
++    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++    FOR A PARTICULAR PURPOSE.  Full license text is available on the following
++    link: http://www.freertos.org/a00114.html
++
++    ***************************************************************************
++     *                                                                       *
++     *    FreeRTOS provides completely free yet professionally developed,    *
++     *    robust, strictly quality controlled, supported, and cross          *
++     *    platform software that is more than just the market leader, it     *
++     *    is the industry's de facto standard.                               *
++     *                                                                       *
++     *    Help yourself get started quickly while simultaneously helping     *
++     *    to support the FreeRTOS project by purchasing a FreeRTOS           *
++     *    tutorial book, reference manual, or both:                          *
++     *    http://www.FreeRTOS.org/Documentation                              *
++     *                                                                       *
++    ***************************************************************************
++
++    http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading
++	the FAQ page "My application does not run, what could be wrong?".  Have you
++	defined configASSERT()?
++
++	http://www.FreeRTOS.org/support - In return for receiving this top quality
++	embedded software for free we request you assist our global community by
++	participating in the support forum.
++
++	http://www.FreeRTOS.org/training - Investing in training allows your team to
++	be as productive as possible as early as possible.  Now you can receive
++	FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
++	Ltd, and the world's leading authority on the world's leading RTOS.
++
++    http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
++    including FreeRTOS+Trace - an indispensable productivity tool, a DOS
++    compatible FAT file system, and our tiny thread aware UDP/IP stack.
++
++    http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
++    Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
++
++    http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
++    Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS
++    licenses offer ticketed support, indemnification and commercial middleware.
++
++    http://www.SafeRTOS.com - High Integrity Systems also provide a safety
++    engineered and independently SIL3 certified version for use in safety and
++    mission critical applications that require provable dependability.
++
++    1 tab == 4 spaces!
++*/
++
++#ifndef FREERTOS_CONFIG_H
++#define FREERTOS_CONFIG_H
++
++#include "sdkconfig.h"
++
++/* for likely and unlikely */
++#include "esp_compiler.h"
++
++// The arch-specific FreeRTOSConfig_arch.h in port/<arch>/include.
++#include "freertos/FreeRTOSConfig_arch.h"
++
++#if !(defined(FREERTOS_CONFIG_XTENSA_H) \
++        || defined(FREERTOS_CONFIG_RISCV_H) \
++        || defined(FREERTOS_CONFIG_LINUX_H))
++#error "Needs architecture-speific FreeRTOSConfig.h!"
++#endif
++
++#ifndef CONFIG_FREERTOS_UNICORE
++#define portNUM_PROCESSORS                              2
++#else
++#define portNUM_PROCESSORS                              1
++#endif
++
++#define portUSING_MPU_WRAPPERS                          0
++#define configUSE_MUTEX                                 1
++
++#define configNUM_THREAD_LOCAL_STORAGE_POINTERS CONFIG_FREERTOS_THREAD_LOCAL_STORAGE_POINTERS
++#define configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS     1
++
++/* configASSERT behaviour */
++#ifndef __ASSEMBLER__
++#include <assert.h>
++
++// If CONFIG_FREERTOS_ASSERT_DISABLE is set then configASSERT is defined empty later in FreeRTOS.h and the macro
++// configASSERT_DEFINED remains unset (meaning some warnings are avoided)
++
++#if defined(CONFIG_FREERTOS_ASSERT_FAIL_PRINT_CONTINUE)
++#define configASSERT(a) if (unlikely(!(a))) {                               \
++        esp_rom_printf("%s:%d (%s)- assert failed!\n", __FILE__, __LINE__,  \
++                   __FUNCTION__);                                           \
++    }
++#elif defined(CONFIG_FREERTOS_ASSERT_FAIL_ABORT)
++#define configASSERT(a) assert(a)
++#endif
++
++#if CONFIG_FREERTOS_ASSERT_ON_UNTESTED_FUNCTION
++#define UNTESTED_FUNCTION() { esp_rom_printf("Untested FreeRTOS function %s\r\n", __FUNCTION__); configASSERT(false); } while(0)
++#else
++#define UNTESTED_FUNCTION()
++#endif
++
++#endif /* def __ASSEMBLER__ */
++
++/*-----------------------------------------------------------
++ * Application specific definitions.
++ *
++ * These definitions should be adjusted for your particular hardware and
++ * application requirements.
++ *
++ * Note that the default heap size is deliberately kept small so that
++ * the build is more likely to succeed for configurations with limited
++ * memory.
++ *
++ * THESE PARAMETERS ARE DESCRIBED WITHIN THE 'CONFIGURATION' SECTION OF THE
++ * FreeRTOS API DOCUMENTATION AVAILABLE ON THE FreeRTOS.org WEB SITE.
++ *----------------------------------------------------------*/
++
++#define configUSE_PREEMPTION                            1
++#define configUSE_IDLE_HOOK                             1
++#define configUSE_TICK_HOOK                             1
++#define configRECORD_STACK_HIGH_ADDRESS                 1
++#define configTICK_RATE_HZ                              ( CONFIG_FREERTOS_HZ )
++
++/* This has impact on speed of search for highest priority */
++#define configMAX_PRIORITIES                            ( 32 )
++
++/* Various things that impact minimum stack sizes */
++
++/* Higher stack checker modes cause overhead on each function call */
++#if CONFIG_STACK_CHECK_ALL || CONFIG_STACK_CHECK_STRONG
++#define configSTACK_OVERHEAD_CHECKER                    256
++#else
++#define configSTACK_OVERHEAD_CHECKER                    0
++#endif
++
++/* with optimizations disabled, scheduler uses additional stack */
++#if CONFIG_COMPILER_OPTIMIZATION_NONE
++#define configSTACK_OVERHEAD_OPTIMIZATION               320
++#else
++#define configSTACK_OVERHEAD_OPTIMIZATION               0
++#endif
++
++/* apptrace mdule increases minimum stack usage */
++#if CONFIG_APPTRACE_ENABLE
++#define configSTACK_OVERHEAD_APPTRACE                   1280
++#else
++#define configSTACK_OVERHEAD_APPTRACE                   0
++#endif
++
++/* Stack watchpoint decreases minimum usable stack size by up to 60 bytes.
++   See FreeRTOS FREERTOS_WATCHPOINT_END_OF_STACK option in Kconfig. */
++#if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
++#define configSTACK_OVERHEAD_WATCHPOINT                   60
++#else
++#define configSTACK_OVERHEAD_WATCHPOINT                   0
++#endif
++
++#define configSTACK_OVERHEAD_TOTAL (                                    \
++                                    configSTACK_OVERHEAD_CHECKER +      \
++                                    configSTACK_OVERHEAD_OPTIMIZATION + \
++                                    configSTACK_OVERHEAD_APPTRACE +     \
++                                    configSTACK_OVERHEAD_WATCHPOINT     \
++                                                                        )
++
++#define configMINIMAL_STACK_SIZE                        (768 + configSTACK_OVERHEAD_TOTAL)
++
++#ifndef configIDLE_TASK_STACK_SIZE
++#define configIDLE_TASK_STACK_SIZE CONFIG_FREERTOS_IDLE_TASK_STACKSIZE
++#endif
++
++/* Minimal heap size to make sure examples can run on memory limited
++   configs. Adjust this to suit your system. */
++
++
++//We define the heap to span all of the non-statically-allocated shared RAM. ToDo: Make sure there
++//is some space left for the app and main cpu when running outside of a thread.
++#define configAPPLICATION_ALLOCATED_HEAP                1
++#define configTOTAL_HEAP_SIZE                           (&_heap_end - &_heap_start)//( ( size_t ) (64 * 1024) )
++
++#define configMAX_TASK_NAME_LEN                         ( CONFIG_FREERTOS_MAX_TASK_NAME_LEN )
++
++#ifdef CONFIG_FREERTOS_USE_TRACE_FACILITY
++#define configUSE_TRACE_FACILITY                        1       /* Used by uxTaskGetSystemState(), and other trace facility functions */
++#endif
++
++#ifdef CONFIG_FREERTOS_USE_STATS_FORMATTING_FUNCTIONS
++#define configUSE_STATS_FORMATTING_FUNCTIONS            1   /* Used by vTaskList() */
++#endif
++
++#ifdef CONFIG_FREERTOS_VTASKLIST_INCLUDE_COREID
++#define configTASKLIST_INCLUDE_COREID                   1
++#endif
++
++#ifdef CONFIG_FREERTOS_GENERATE_RUN_TIME_STATS
++#define configGENERATE_RUN_TIME_STATS                   1       /* Used by vTaskGetRunTimeStats() */
++#endif
++
++#define configBENCHMARK                                 0
++#define configUSE_16_BIT_TICKS                          0
++#define configIDLE_SHOULD_YIELD                         0
++#define configQUEUE_REGISTRY_SIZE                       CONFIG_FREERTOS_QUEUE_REGISTRY_SIZE
++
++#define configUSE_MUTEXES                               1
++#define configUSE_RECURSIVE_MUTEXES                     1
++#define configUSE_COUNTING_SEMAPHORES                   1
++
++#if CONFIG_FREERTOS_CHECK_STACKOVERFLOW_NONE
++#define configCHECK_FOR_STACK_OVERFLOW                  0
++#elif CONFIG_FREERTOS_CHECK_STACKOVERFLOW_PTRVAL
++#define configCHECK_FOR_STACK_OVERFLOW                  1
++#elif CONFIG_FREERTOS_CHECK_STACKOVERFLOW_CANARY
++#define configCHECK_FOR_STACK_OVERFLOW                  2
++#endif
++
++
++/* Co-routine definitions. */
++#define configUSE_CO_ROUTINES                           0
++#define configMAX_CO_ROUTINE_PRIORITIES                 ( 2 )
++
++/* Set the following definitions to 1 to include the API function, or zero
++   to exclude the API function. */
++
++#define INCLUDE_vTaskPrioritySet                        1
++#define INCLUDE_uxTaskPriorityGet                       1
++#define INCLUDE_vTaskDelete                             1
++#define INCLUDE_vTaskCleanUpResources                   0
++#define INCLUDE_vTaskSuspend                            1
++#define INCLUDE_vTaskDelayUntil                         1
++#define INCLUDE_vTaskDelay                              1
++#define INCLUDE_uxTaskGetStackHighWaterMark             1
++#define INCLUDE_pcTaskGetTaskName                       1
++#define INCLUDE_xTaskGetIdleTaskHandle                  1
++#define INCLUDE_pxTaskGetStackStart                     1
++#define INCLUDE_eTaskGetState                           1
++#define INCLUDE_xTaskAbortDelay                         1
++#define INCLUDE_xTaskGetHandle                          1
++#define INCLUDE_xSemaphoreGetMutexHolder                1
++#define INCLUDE_xTimerPendFunctionCall                  1
++#define INCLUDE_xTimerGetTimerDaemonTaskHandle          0   //Currently there is no need for this API
++
++/* The priority at which the tick interrupt runs.  This should probably be
++   kept at 1. */
++#define configKERNEL_INTERRUPT_PRIORITY                 1
++
++#if !CONFIG_IDF_TARGET_LINUX
++#define configUSE_NEWLIB_REENTRANT                      1
++#endif
++
++#define configSUPPORT_DYNAMIC_ALLOCATION                1
++#define configSUPPORT_STATIC_ALLOCATION                 1
++
++#ifndef __ASSEMBLER__
++#if CONFIG_FREERTOS_ENABLE_STATIC_TASK_CLEAN_UP
++extern void vPortCleanUpTCB ( void *pxTCB );
++#define portCLEAN_UP_TCB( pxTCB )           vPortCleanUpTCB( pxTCB )
++#endif
++#endif
++
++/* Test FreeRTOS timers (with timer task) and more. */
++/* Some files don't compile if this flag is disabled */
++#define configUSE_TIMERS                                1
++#define configTIMER_TASK_PRIORITY                       CONFIG_FREERTOS_TIMER_TASK_PRIORITY
++#define configTIMER_QUEUE_LENGTH                        CONFIG_FREERTOS_TIMER_QUEUE_LENGTH
++#define configTIMER_TASK_STACK_DEPTH                    CONFIG_FREERTOS_TIMER_TASK_STACK_DEPTH
++
++#define configUSE_QUEUE_SETS                            1
++
++#define configUSE_TICKLESS_IDLE                         CONFIG_FREERTOS_USE_TICKLESS_IDLE
++#if configUSE_TICKLESS_IDLE
++#define configEXPECTED_IDLE_TIME_BEFORE_SLEEP           CONFIG_FREERTOS_IDLE_TIME_BEFORE_SLEEP
++#endif //configUSE_TICKLESS_IDLE
++
++
++#if CONFIG_FREERTOS_ENABLE_TASK_SNAPSHOT
++#define configENABLE_TASK_SNAPSHOT                      1
++#endif
++#ifndef configENABLE_TASK_SNAPSHOT
++#define configENABLE_TASK_SNAPSHOT                      0
++#endif
++
++#if CONFIG_SYSVIEW_ENABLE
++#ifndef __ASSEMBLER__
++#include "SEGGER_SYSVIEW_FreeRTOS.h"
++#undef INLINE // to avoid redefinition
++#endif /* def __ASSEMBLER__ */
++#endif
++
++#if CONFIG_FREERTOS_CHECK_MUTEX_GIVEN_BY_OWNER
++#define configCHECK_MUTEX_GIVEN_BY_OWNER                1
++#else
++#define configCHECK_MUTEX_GIVEN_BY_OWNER                0
++#endif
++
++
++#define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H       1
++
++#define configTASK_NOTIFICATION_ARRAY_ENTRIES           1
++
++// backward compatibility for 4.4
++#define xTaskRemoveFromUnorderedEventList vTaskRemoveFromUnorderedEventList
++
++#define configNUM_CORES                                 portNUM_PROCESSORS
++
++/* RT-Thread wrapper */
++#define INCLUDE_xTaskGetCurrentTaskHandle   1
++
++#endif /* FREERTOS_CONFIG_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/esp_additions/freertos/task_snapshot.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/esp_additions/freertos/task_snapshot.h
+new file mode 100644
+index 0000000000..1ad04cce69
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/esp_additions/freertos/task_snapshot.h
+@@ -0,0 +1,90 @@
++// Copyright 2015-2021 Espressif Systems (Shanghai) PTE LTD
++//
++// Licensed under the Apache License, Version 2.0 (the "License");
++// you may not use this file except in compliance with the License.
++// You may obtain a copy of the License at
++//
++//     http://www.apache.org/licenses/LICENSE-2.0
++//
++// Unless required by applicable law or agreed to in writing, software
++// distributed under the License is distributed on an "AS IS" BASIS,
++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++// See the License for the specific language governing permissions and
++// limitations under the License.
++
++#pragma once
++
++#include "freertos/FreeRTOS.h"
++#include "freertos/task.h"
++
++#if ( configENABLE_TASK_SNAPSHOT == 1 )
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/**
++ * Check `freertos_tasks_c_additions.h` file for more info
++ * about these functions declaration.
++ */
++UBaseType_t pxTCBGetSize ( void );
++ListItem_t*	pxTCBGetStateListItem ( void *pxTCB );
++StackType_t* pxTCBGetStartOfStack ( void *pxTCB );
++StackType_t* pxTCBGetTopOfStack ( void *pxTCB );
++StackType_t* pxTCBGetEndOfStack ( void *pxTCB );
++List_t* pxListGetReadyTask ( UBaseType_t idx );
++List_t* pxListGetReadyPendingTask ( UBaseType_t idx );
++List_t* pxGetDelayedTaskList ( void );
++List_t* pxGetOverflowDelayedTaskList ( void );
++List_t* pxGetTasksWaitingTermination ( void );
++List_t* pxGetSuspendedTaskList ( void );
++
++/**
++ * Used with the uxTaskGetSnapshotAll() function to save memory snapshot of each task in the system.
++ * We need this struct because TCB_t is defined (hidden) in tasks.c.
++ */
++typedef struct xTASK_SNAPSHOT
++{
++	void        *pxTCB;         /*!< Address of task control block. */
++	StackType_t *pxTopOfStack;  /*!< Points to the location of the last item placed on the tasks stack. */
++	StackType_t *pxEndOfStack;  /*!< Points to the end of the stack. pxTopOfStack < pxEndOfStack, stack grows hi2lo
++									pxTopOfStack > pxEndOfStack, stack grows lo2hi*/
++} TaskSnapshot_t;
++
++
++/*
++ * This function fills array with TaskSnapshot_t structures for every task in the system.
++ * Used by panic handling code to get snapshots of all tasks in the system.
++ * Only available when configENABLE_TASK_SNAPSHOT is set to 1.
++ * @param pxTaskSnapshotArray Pointer to array of TaskSnapshot_t structures to store tasks snapshot data.
++ * @param uxArraySize Size of tasks snapshots array.
++ * @param pxTcbSz Pointer to store size of TCB.
++ * @return Number of elements stored in array.
++ */
++UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray, const UBaseType_t uxArraySize, UBaseType_t * const pxTcbSz );
++
++/*
++ * This function iterates over all tasks in the system.
++ * Used by panic handling code to iterate over tasks in the system.
++ * Only available when configENABLE_TASK_SNAPSHOT is set to 1.
++ * @note This function should not be used while FreeRTOS is running (as it doesn't acquire any locks).
++ * @param pxTask task handle.
++ * @return Handle for the next task. If pxTask is NULL, returns hadnle for the first task.
++ */
++TaskHandle_t pxTaskGetNext( TaskHandle_t pxTask );
++
++/*
++ * This function fills TaskSnapshot_t structure for specified task.
++ * Used by panic handling code to get snapshot of a task.
++ * Only available when configENABLE_TASK_SNAPSHOT is set to 1.
++ * @note This function should not be used while FreeRTOS is running (as it doesn't acquire any locks).
++ * @param pxTask task handle.
++ * @param pxTaskSnapshot address of TaskSnapshot_t structure to fill.
++ */
++void vTaskGetSnapshot( TaskHandle_t pxTask, TaskSnapshot_t *pxTaskSnapshot );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/FreeRTOS.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/FreeRTOS.h
+new file mode 100644
+index 0000000000..b3efa13f20
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/FreeRTOS.h
+@@ -0,0 +1,1198 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++#ifndef INC_FREERTOS_H
++#define INC_FREERTOS_H
++
++/*
++ * Include the generic headers required for the FreeRTOS port being used.
++ */
++#include <stddef.h>
++
++/*
++ * If stdint.h cannot be located then:
++ *   + If using GCC ensure the -nostdint options is *not* being used.
++ *   + Ensure the project's include path includes the directory in which your
++ *     compiler stores stdint.h.
++ *   + Set any compiler options necessary for it to support C99, as technically
++ *     stdint.h is only mandatory with C99 (FreeRTOS does not require C99 in any
++ *     other way).
++ *   + The FreeRTOS download includes a simple stdint.h definition that can be
++ *     used in cases where none is provided by the compiler.  The files only
++ *     contains the typedefs required to build FreeRTOS.  Read the instructions
++ *     in FreeRTOS/source/stdint.readme for more information.
++ */
++#include <stdint.h> /* READ COMMENT ABOVE. */
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    extern "C" {
++#endif
++/* *INDENT-ON* */
++
++#include <rtthread.h>
++#include <rthw.h>
++
++/* Application specific configuration options. */
++#include "FreeRTOSConfig.h"
++
++/* Basic FreeRTOS definitions. */
++#include "projdefs.h"
++
++/* Definitions specific to the port being used. */
++#include "portable.h"
++
++/* Must be defaulted before configUSE_NEWLIB_REENTRANT is used below. */
++#ifndef configUSE_NEWLIB_REENTRANT
++    #define configUSE_NEWLIB_REENTRANT    0
++#endif
++
++/* Required if struct _reent is used. */
++#if ( configUSE_NEWLIB_REENTRANT == 1 )
++    #include <reent.h>
++#endif
++
++/*
++ * Check all the required application specific macros have been defined.
++ * These macros are application specific and (as downloaded) are defined
++ * within FreeRTOSConfig.h.
++ */
++
++#ifndef configMINIMAL_STACK_SIZE
++    #error Missing definition:  configMINIMAL_STACK_SIZE must be defined in FreeRTOSConfig.h.  configMINIMAL_STACK_SIZE defines the size (in words) of the stack allocated to the idle task.  Refer to the demo project provided for your port for a suitable value.
++#endif
++
++#ifndef configMAX_PRIORITIES
++    #error Missing definition:  configMAX_PRIORITIES must be defined in FreeRTOSConfig.h.  See the Configuration section of the FreeRTOS API documentation for details.
++#endif
++
++#if configMAX_PRIORITIES < 1
++    #error configMAX_PRIORITIES must be defined to be greater than or equal to 1.
++#endif
++
++#ifndef configUSE_PREEMPTION
++    #error Missing definition:  configUSE_PREEMPTION must be defined in FreeRTOSConfig.h as either 1 or 0.  See the Configuration section of the FreeRTOS API documentation for details.
++#endif
++
++#ifndef configUSE_IDLE_HOOK
++    #error Missing definition:  configUSE_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0.  See the Configuration section of the FreeRTOS API documentation for details.
++#endif
++
++#ifndef configUSE_TICK_HOOK
++    #error Missing definition:  configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0.  See the Configuration section of the FreeRTOS API documentation for details.
++#endif
++
++#ifndef configUSE_16_BIT_TICKS
++    #error Missing definition:  configUSE_16_BIT_TICKS must be defined in FreeRTOSConfig.h as either 1 or 0.  See the Configuration section of the FreeRTOS API documentation for details.
++#endif
++
++#ifndef configUSE_CO_ROUTINES
++    #define configUSE_CO_ROUTINES    0
++#endif
++
++#ifndef INCLUDE_vTaskPrioritySet
++    #define INCLUDE_vTaskPrioritySet    0
++#endif
++
++#ifndef INCLUDE_uxTaskPriorityGet
++    #define INCLUDE_uxTaskPriorityGet    0
++#endif
++
++#ifndef INCLUDE_vTaskDelete
++    #define INCLUDE_vTaskDelete    0
++#endif
++
++#ifndef INCLUDE_vTaskSuspend
++    #define INCLUDE_vTaskSuspend    0
++#endif
++
++#ifdef INCLUDE_xTaskDelayUntil
++    #ifdef INCLUDE_vTaskDelayUntil
++
++/* INCLUDE_vTaskDelayUntil was replaced by INCLUDE_xTaskDelayUntil.  Backward
++ * compatibility is maintained if only one or the other is defined, but
++ * there is a conflict if both are defined. */
++        #error INCLUDE_vTaskDelayUntil and INCLUDE_xTaskDelayUntil are both defined.  INCLUDE_vTaskDelayUntil is no longer required and should be removed
++    #endif
++#endif
++
++#ifndef INCLUDE_xTaskDelayUntil
++    #ifdef INCLUDE_vTaskDelayUntil
++
++/* If INCLUDE_vTaskDelayUntil is set but INCLUDE_xTaskDelayUntil is not then
++ * the project's FreeRTOSConfig.h probably pre-dates the introduction of
++ * xTaskDelayUntil and setting INCLUDE_xTaskDelayUntil to whatever
++ * INCLUDE_vTaskDelayUntil is set to will ensure backward compatibility.
++ */
++        #define INCLUDE_xTaskDelayUntil    INCLUDE_vTaskDelayUntil
++    #endif
++#endif
++
++#ifndef INCLUDE_xTaskDelayUntil
++    #define INCLUDE_xTaskDelayUntil    0
++#endif
++
++#ifndef INCLUDE_vTaskDelay
++    #define INCLUDE_vTaskDelay    0
++#endif
++
++#ifndef INCLUDE_xTaskGetIdleTaskHandle
++    #define INCLUDE_xTaskGetIdleTaskHandle    0
++#endif
++
++#ifndef INCLUDE_xTaskAbortDelay
++    #define INCLUDE_xTaskAbortDelay    0
++#endif
++
++#ifndef INCLUDE_xQueueGetMutexHolder
++    #define INCLUDE_xQueueGetMutexHolder    0
++#endif
++
++#ifndef INCLUDE_xSemaphoreGetMutexHolder
++    #define INCLUDE_xSemaphoreGetMutexHolder    INCLUDE_xQueueGetMutexHolder
++#endif
++
++#ifndef INCLUDE_xTaskGetHandle
++    #define INCLUDE_xTaskGetHandle    0
++#endif
++
++#ifndef INCLUDE_uxTaskGetStackHighWaterMark
++    #define INCLUDE_uxTaskGetStackHighWaterMark    0
++#endif
++
++#ifndef INCLUDE_uxTaskGetStackHighWaterMark2
++    #define INCLUDE_uxTaskGetStackHighWaterMark2    0
++#endif
++
++#ifndef INCLUDE_eTaskGetState
++    #define INCLUDE_eTaskGetState    0
++#endif
++
++#ifndef INCLUDE_xTaskResumeFromISR
++    #define INCLUDE_xTaskResumeFromISR    1
++#endif
++
++#ifndef INCLUDE_xTimerPendFunctionCall
++    #define INCLUDE_xTimerPendFunctionCall    0
++#endif
++
++#ifndef INCLUDE_xTaskGetSchedulerState
++    #define INCLUDE_xTaskGetSchedulerState    0
++#endif
++
++#ifndef INCLUDE_xTaskGetCurrentTaskHandle
++    #define INCLUDE_xTaskGetCurrentTaskHandle    0
++#endif
++
++#if configUSE_CO_ROUTINES != 0
++    #ifndef configMAX_CO_ROUTINE_PRIORITIES
++        #error configMAX_CO_ROUTINE_PRIORITIES must be greater than or equal to 1.
++    #endif
++#endif
++
++#ifndef configUSE_DAEMON_TASK_STARTUP_HOOK
++    #define configUSE_DAEMON_TASK_STARTUP_HOOK    0
++#endif
++
++#ifndef configUSE_APPLICATION_TASK_TAG
++    #define configUSE_APPLICATION_TASK_TAG    0
++#endif
++
++#ifndef configNUM_THREAD_LOCAL_STORAGE_POINTERS
++    #define configNUM_THREAD_LOCAL_STORAGE_POINTERS    0
++#endif
++
++#ifndef configUSE_RECURSIVE_MUTEXES
++    #define configUSE_RECURSIVE_MUTEXES    0
++#endif
++
++#ifndef configUSE_MUTEXES
++    #define configUSE_MUTEXES    0
++#endif
++
++#ifndef configUSE_TIMERS
++    #define configUSE_TIMERS    0
++#endif
++
++#ifndef configUSE_COUNTING_SEMAPHORES
++    #define configUSE_COUNTING_SEMAPHORES    0
++#endif
++
++#ifndef configUSE_ALTERNATIVE_API
++    #define configUSE_ALTERNATIVE_API    0
++#endif
++
++#ifndef portCRITICAL_NESTING_IN_TCB
++    #define portCRITICAL_NESTING_IN_TCB    0
++#endif
++
++#ifndef configMAX_TASK_NAME_LEN
++    #define configMAX_TASK_NAME_LEN    16
++#endif
++
++#ifndef configIDLE_SHOULD_YIELD
++    #define configIDLE_SHOULD_YIELD    1
++#endif
++
++#if configMAX_TASK_NAME_LEN < 1
++    #error configMAX_TASK_NAME_LEN must be set to a minimum of 1 in FreeRTOSConfig.h
++#endif
++
++#ifndef configASSERT
++    #define configASSERT( x )
++    #define configASSERT_DEFINED    0
++#else
++    #define configASSERT_DEFINED    1
++#endif
++
++/* configPRECONDITION should be defined as configASSERT.
++ * The CBMC proofs need a way to track assumptions and assertions.
++ * A configPRECONDITION statement should express an implicit invariant or
++ * assumption made.  A configASSERT statement should express an invariant that must
++ * hold explicit before calling the code. */
++#ifndef configPRECONDITION
++    #define configPRECONDITION( X )    configASSERT( X )
++    #define configPRECONDITION_DEFINED    0
++#else
++    #define configPRECONDITION_DEFINED    1
++#endif
++
++#ifndef portMEMORY_BARRIER
++    #define portMEMORY_BARRIER()
++#endif
++
++#ifndef portSOFTWARE_BARRIER
++    #define portSOFTWARE_BARRIER()
++#endif
++
++/* The timers module relies on xTaskGetSchedulerState(). */
++#if configUSE_TIMERS == 1
++
++    #ifndef configTIMER_TASK_PRIORITY
++        #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_PRIORITY must also be defined.
++    #endif /* configTIMER_TASK_PRIORITY */
++
++    #ifndef configTIMER_QUEUE_LENGTH
++        #error If configUSE_TIMERS is set to 1 then configTIMER_QUEUE_LENGTH must also be defined.
++    #endif /* configTIMER_QUEUE_LENGTH */
++
++    #ifndef configTIMER_TASK_STACK_DEPTH
++        #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_STACK_DEPTH must also be defined.
++    #endif /* configTIMER_TASK_STACK_DEPTH */
++
++#endif /* configUSE_TIMERS */
++
++#ifndef portSET_INTERRUPT_MASK_FROM_ISR
++    #define portSET_INTERRUPT_MASK_FROM_ISR()    0
++#endif
++
++#ifndef portCLEAR_INTERRUPT_MASK_FROM_ISR
++    #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedStatusValue )    ( void ) uxSavedStatusValue
++#endif
++
++#ifndef portCLEAN_UP_TCB
++    #define portCLEAN_UP_TCB( pxTCB )    ( void ) pxTCB
++#endif
++
++#ifndef portPRE_TASK_DELETE_HOOK
++    #define portPRE_TASK_DELETE_HOOK( pvTaskToDelete, pxYieldPending )
++#endif
++
++#ifndef portSETUP_TCB
++    #define portSETUP_TCB( pxTCB )    ( void ) pxTCB
++#endif
++
++#ifndef configQUEUE_REGISTRY_SIZE
++    #define configQUEUE_REGISTRY_SIZE    0U
++#endif
++
++#if ( configQUEUE_REGISTRY_SIZE < 1 )
++    #define vQueueAddToRegistry( xQueue, pcName )
++    #define vQueueUnregisterQueue( xQueue )
++    #define pcQueueGetName( xQueue )
++#endif
++
++#ifndef portPOINTER_SIZE_TYPE
++    #define portPOINTER_SIZE_TYPE    uint32_t
++#endif
++
++/* Remove any unused trace macros. */
++#ifndef traceSTART
++
++/* Used to perform any necessary initialisation - for example, open a file
++ * into which trace is to be written. */
++    #define traceSTART()
++#endif
++
++#ifndef traceEND
++
++/* Use to close a trace, for example close a file into which trace has been
++ * written. */
++    #define traceEND()
++#endif
++
++#ifndef traceTASK_SWITCHED_IN
++
++/* Called after a task has been selected to run.  pxCurrentTCB holds a pointer
++ * to the task control block of the selected task. */
++    #define traceTASK_SWITCHED_IN()
++#endif
++
++#ifndef traceINCREASE_TICK_COUNT
++
++/* Called before stepping the tick count after waking from tickless idle
++ * sleep. */
++    #define traceINCREASE_TICK_COUNT( x )
++#endif
++
++#ifndef traceLOW_POWER_IDLE_BEGIN
++    /* Called immediately before entering tickless idle. */
++    #define traceLOW_POWER_IDLE_BEGIN()
++#endif
++
++#ifndef traceLOW_POWER_IDLE_END
++    /* Called when returning to the Idle task after a tickless idle. */
++    #define traceLOW_POWER_IDLE_END()
++#endif
++
++#ifndef traceTASK_SWITCHED_OUT
++
++/* Called before a task has been selected to run.  pxCurrentTCB holds a pointer
++ * to the task control block of the task being switched out. */
++    #define traceTASK_SWITCHED_OUT()
++#endif
++
++#ifndef traceTASK_PRIORITY_INHERIT
++
++/* Called when a task attempts to take a mutex that is already held by a
++ * lower priority task.  pxTCBOfMutexHolder is a pointer to the TCB of the task
++ * that holds the mutex.  uxInheritedPriority is the priority the mutex holder
++ * will inherit (the priority of the task that is attempting to obtain the
++ * muted. */
++    #define traceTASK_PRIORITY_INHERIT( pxTCBOfMutexHolder, uxInheritedPriority )
++#endif
++
++#ifndef traceTASK_PRIORITY_DISINHERIT
++
++/* Called when a task releases a mutex, the holding of which had resulted in
++ * the task inheriting the priority of a higher priority task.
++ * pxTCBOfMutexHolder is a pointer to the TCB of the task that is releasing the
++ * mutex.  uxOriginalPriority is the task's configured (base) priority. */
++    #define traceTASK_PRIORITY_DISINHERIT( pxTCBOfMutexHolder, uxOriginalPriority )
++#endif
++
++#ifndef traceBLOCKING_ON_QUEUE_RECEIVE
++
++/* Task is about to block because it cannot read from a
++ * queue/mutex/semaphore.  pxQueue is a pointer to the queue/mutex/semaphore
++ * upon which the read was attempted.  pxCurrentTCB points to the TCB of the
++ * task that attempted the read. */
++    #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue )
++#endif
++
++#ifndef traceBLOCKING_ON_QUEUE_PEEK
++
++/* Task is about to block because it cannot read from a
++ * queue/mutex/semaphore.  pxQueue is a pointer to the queue/mutex/semaphore
++ * upon which the read was attempted.  pxCurrentTCB points to the TCB of the
++ * task that attempted the read. */
++    #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue )
++#endif
++
++#ifndef traceBLOCKING_ON_QUEUE_SEND
++
++/* Task is about to block because it cannot write to a
++ * queue/mutex/semaphore.  pxQueue is a pointer to the queue/mutex/semaphore
++ * upon which the write was attempted.  pxCurrentTCB points to the TCB of the
++ * task that attempted the write. */
++    #define traceBLOCKING_ON_QUEUE_SEND( pxQueue )
++#endif
++
++#ifndef configCHECK_FOR_STACK_OVERFLOW
++    #define configCHECK_FOR_STACK_OVERFLOW    0
++#endif
++
++#ifndef configRECORD_STACK_HIGH_ADDRESS
++    #define configRECORD_STACK_HIGH_ADDRESS    0
++#endif
++
++#ifndef configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H
++    #define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H    0
++#endif
++
++/* The following event macros are embedded in the kernel API calls. */
++
++#ifndef traceMOVED_TASK_TO_READY_STATE
++    #define traceMOVED_TASK_TO_READY_STATE( pxTCB )
++#endif
++
++#ifndef tracePOST_MOVED_TASK_TO_READY_STATE
++    #define tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
++#endif
++
++#ifndef traceQUEUE_CREATE
++    #define traceQUEUE_CREATE( pxNewQueue )
++#endif
++
++#ifndef traceQUEUE_CREATE_FAILED
++    #define traceQUEUE_CREATE_FAILED( ucQueueType )
++#endif
++
++#ifndef traceCREATE_MUTEX
++    #define traceCREATE_MUTEX( pxNewQueue )
++#endif
++
++#ifndef traceCREATE_MUTEX_FAILED
++    #define traceCREATE_MUTEX_FAILED()
++#endif
++
++#ifndef traceGIVE_MUTEX_RECURSIVE
++    #define traceGIVE_MUTEX_RECURSIVE( pxMutex )
++#endif
++
++#ifndef traceGIVE_MUTEX_RECURSIVE_FAILED
++    #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex )
++#endif
++
++#ifndef traceTAKE_MUTEX_RECURSIVE
++    #define traceTAKE_MUTEX_RECURSIVE( pxMutex )
++#endif
++
++#ifndef traceTAKE_MUTEX_RECURSIVE_FAILED
++    #define traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex )
++#endif
++
++#ifndef traceCREATE_COUNTING_SEMAPHORE
++    #define traceCREATE_COUNTING_SEMAPHORE()
++#endif
++
++#ifndef traceCREATE_COUNTING_SEMAPHORE_FAILED
++    #define traceCREATE_COUNTING_SEMAPHORE_FAILED()
++#endif
++
++#ifndef traceQUEUE_SET_SEND
++    #define traceQUEUE_SET_SEND    traceQUEUE_SEND
++#endif
++
++#ifndef traceQUEUE_SEND
++    #define traceQUEUE_SEND( pxQueue )
++#endif
++
++#ifndef traceQUEUE_SEND_FAILED
++    #define traceQUEUE_SEND_FAILED( pxQueue )
++#endif
++
++#ifndef traceQUEUE_RECEIVE
++    #define traceQUEUE_RECEIVE( pxQueue )
++#endif
++
++#ifndef traceQUEUE_PEEK
++    #define traceQUEUE_PEEK( pxQueue )
++#endif
++
++#ifndef traceQUEUE_PEEK_FAILED
++    #define traceQUEUE_PEEK_FAILED( pxQueue )
++#endif
++
++#ifndef traceQUEUE_PEEK_FROM_ISR
++    #define traceQUEUE_PEEK_FROM_ISR( pxQueue )
++#endif
++
++#ifndef traceQUEUE_RECEIVE_FAILED
++    #define traceQUEUE_RECEIVE_FAILED( pxQueue )
++#endif
++
++#ifndef traceQUEUE_SEND_FROM_ISR
++    #define traceQUEUE_SEND_FROM_ISR( pxQueue )
++#endif
++
++#ifndef traceQUEUE_SEND_FROM_ISR_FAILED
++    #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue )
++#endif
++
++#ifndef traceQUEUE_RECEIVE_FROM_ISR
++    #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue )
++#endif
++
++#ifndef traceQUEUE_RECEIVE_FROM_ISR_FAILED
++    #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue )
++#endif
++
++#ifndef traceQUEUE_PEEK_FROM_ISR_FAILED
++    #define traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue )
++#endif
++
++#ifndef traceQUEUE_DELETE
++    #define traceQUEUE_DELETE( pxQueue )
++#endif
++
++#ifndef traceTASK_CREATE
++    #define traceTASK_CREATE( pxNewTCB )
++#endif
++
++#ifndef traceTASK_CREATE_FAILED
++    #define traceTASK_CREATE_FAILED()
++#endif
++
++#ifndef traceTASK_DELETE
++    #define traceTASK_DELETE( pxTaskToDelete )
++#endif
++
++#ifndef traceTASK_DELAY_UNTIL
++    #define traceTASK_DELAY_UNTIL( x )
++#endif
++
++#ifndef traceTASK_DELAY
++    #define traceTASK_DELAY()
++#endif
++
++#ifndef traceTASK_PRIORITY_SET
++    #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority )
++#endif
++
++#ifndef traceTASK_SUSPEND
++    #define traceTASK_SUSPEND( pxTaskToSuspend )
++#endif
++
++#ifndef traceTASK_RESUME
++    #define traceTASK_RESUME( pxTaskToResume )
++#endif
++
++#ifndef traceTASK_RESUME_FROM_ISR
++    #define traceTASK_RESUME_FROM_ISR( pxTaskToResume )
++#endif
++
++#ifndef traceTASK_INCREMENT_TICK
++    #define traceTASK_INCREMENT_TICK( xTickCount )
++#endif
++
++#ifndef traceTIMER_CREATE
++    #define traceTIMER_CREATE( pxNewTimer )
++#endif
++
++#ifndef traceTIMER_CREATE_FAILED
++    #define traceTIMER_CREATE_FAILED()
++#endif
++
++#ifndef traceTIMER_COMMAND_SEND
++    #define traceTIMER_COMMAND_SEND( xTimer, xMessageID, xMessageValueValue, xReturn )
++#endif
++
++#ifndef traceTIMER_EXPIRED
++    #define traceTIMER_EXPIRED( pxTimer )
++#endif
++
++#ifndef traceTIMER_COMMAND_RECEIVED
++    #define traceTIMER_COMMAND_RECEIVED( pxTimer, xMessageID, xMessageValue )
++#endif
++
++#ifndef traceMALLOC
++    #define traceMALLOC( pvAddress, uiSize )
++#endif
++
++#ifndef traceFREE
++    #define traceFREE( pvAddress, uiSize )
++#endif
++
++#ifndef traceEVENT_GROUP_CREATE
++    #define traceEVENT_GROUP_CREATE( xEventGroup )
++#endif
++
++#ifndef traceEVENT_GROUP_CREATE_FAILED
++    #define traceEVENT_GROUP_CREATE_FAILED()
++#endif
++
++#ifndef traceEVENT_GROUP_SYNC_BLOCK
++    #define traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor )
++#endif
++
++#ifndef traceEVENT_GROUP_SYNC_END
++    #define traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred )    ( void ) xTimeoutOccurred
++#endif
++
++#ifndef traceEVENT_GROUP_WAIT_BITS_BLOCK
++    #define traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor )
++#endif
++
++#ifndef traceEVENT_GROUP_WAIT_BITS_END
++    #define traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred )    ( void ) xTimeoutOccurred
++#endif
++
++#ifndef traceEVENT_GROUP_CLEAR_BITS
++    #define traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear )
++#endif
++
++#ifndef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
++    #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear )
++#endif
++
++#ifndef traceEVENT_GROUP_SET_BITS
++    #define traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet )
++#endif
++
++#ifndef traceEVENT_GROUP_SET_BITS_FROM_ISR
++    #define traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet )
++#endif
++
++#ifndef traceEVENT_GROUP_DELETE
++    #define traceEVENT_GROUP_DELETE( xEventGroup )
++#endif
++
++#ifndef tracePEND_FUNC_CALL
++    #define tracePEND_FUNC_CALL( xFunctionToPend, pvParameter1, ulParameter2, ret )
++#endif
++
++#ifndef tracePEND_FUNC_CALL_FROM_ISR
++    #define tracePEND_FUNC_CALL_FROM_ISR( xFunctionToPend, pvParameter1, ulParameter2, ret )
++#endif
++
++#ifndef traceQUEUE_REGISTRY_ADD
++    #define traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName )
++#endif
++
++#ifndef traceTASK_NOTIFY_TAKE_BLOCK
++    #define traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWait )
++#endif
++
++#ifndef traceTASK_NOTIFY_TAKE
++    #define traceTASK_NOTIFY_TAKE( uxIndexToWait )
++#endif
++
++#ifndef traceTASK_NOTIFY_WAIT_BLOCK
++    #define traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWait )
++#endif
++
++#ifndef traceTASK_NOTIFY_WAIT
++    #define traceTASK_NOTIFY_WAIT( uxIndexToWait )
++#endif
++
++#ifndef traceTASK_NOTIFY
++    #define traceTASK_NOTIFY( uxIndexToNotify )
++#endif
++
++#ifndef traceTASK_NOTIFY_FROM_ISR
++    #define traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify )
++#endif
++
++#ifndef traceTASK_NOTIFY_GIVE_FROM_ISR
++    #define traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify )
++#endif
++
++#ifndef traceSTREAM_BUFFER_CREATE_FAILED
++    #define traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer )
++#endif
++
++#ifndef traceSTREAM_BUFFER_CREATE_STATIC_FAILED
++    #define traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer )
++#endif
++
++#ifndef traceSTREAM_BUFFER_CREATE
++    #define traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer )
++#endif
++
++#ifndef traceSTREAM_BUFFER_DELETE
++    #define traceSTREAM_BUFFER_DELETE( xStreamBuffer )
++#endif
++
++#ifndef traceSTREAM_BUFFER_RESET
++    #define traceSTREAM_BUFFER_RESET( xStreamBuffer )
++#endif
++
++#ifndef traceBLOCKING_ON_STREAM_BUFFER_SEND
++    #define traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer )
++#endif
++
++#ifndef traceSTREAM_BUFFER_SEND
++    #define traceSTREAM_BUFFER_SEND( xStreamBuffer, xBytesSent )
++#endif
++
++#ifndef traceSTREAM_BUFFER_SEND_FAILED
++    #define traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer )
++#endif
++
++#ifndef traceSTREAM_BUFFER_SEND_FROM_ISR
++    #define traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xBytesSent )
++#endif
++
++#ifndef traceBLOCKING_ON_STREAM_BUFFER_RECEIVE
++    #define traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer )
++#endif
++
++#ifndef traceSTREAM_BUFFER_RECEIVE
++    #define traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength )
++#endif
++
++#ifndef traceSTREAM_BUFFER_RECEIVE_FAILED
++    #define traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer )
++#endif
++
++#ifndef traceSTREAM_BUFFER_RECEIVE_FROM_ISR
++    #define traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength )
++#endif
++
++#ifdef ESP_PLATFORM
++#ifndef traceISR_EXIT_TO_SCHEDULER
++    #define traceISR_EXIT_TO_SCHEDULER()
++#endif
++
++#ifndef traceISR_EXIT
++    #define traceISR_EXIT()
++#endif
++
++#ifndef traceISR_ENTER
++    #define traceISR_ENTER(_n_)
++#endif
++#endif // ESP_PLATFORM
++
++#ifndef configGENERATE_RUN_TIME_STATS
++    #define configGENERATE_RUN_TIME_STATS    0
++#endif
++
++#if ( configGENERATE_RUN_TIME_STATS == 1 )
++
++    #ifndef portCONFIGURE_TIMER_FOR_RUN_TIME_STATS
++        #error If configGENERATE_RUN_TIME_STATS is defined then portCONFIGURE_TIMER_FOR_RUN_TIME_STATS must also be defined.  portCONFIGURE_TIMER_FOR_RUN_TIME_STATS should call a port layer function to setup a peripheral timer/counter that can then be used as the run time counter time base.
++    #endif /* portCONFIGURE_TIMER_FOR_RUN_TIME_STATS */
++
++    #ifndef portGET_RUN_TIME_COUNTER_VALUE
++        #ifndef portALT_GET_RUN_TIME_COUNTER_VALUE
++            #error If configGENERATE_RUN_TIME_STATS is defined then either portGET_RUN_TIME_COUNTER_VALUE or portALT_GET_RUN_TIME_COUNTER_VALUE must also be defined.  See the examples provided and the FreeRTOS web site for more information.
++        #endif /* portALT_GET_RUN_TIME_COUNTER_VALUE */
++    #endif /* portGET_RUN_TIME_COUNTER_VALUE */
++
++#endif /* configGENERATE_RUN_TIME_STATS */
++
++#ifndef portCONFIGURE_TIMER_FOR_RUN_TIME_STATS
++    #define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()
++#endif
++
++#ifndef configUSE_MALLOC_FAILED_HOOK
++    #define configUSE_MALLOC_FAILED_HOOK    0
++#endif
++
++#ifndef portPRIVILEGE_BIT
++    #define portPRIVILEGE_BIT    ( ( UBaseType_t ) 0x00 )
++#endif
++
++#ifndef portYIELD_WITHIN_API
++    #define portYIELD_WITHIN_API    portYIELD
++#endif
++
++#ifndef portSUPPRESS_TICKS_AND_SLEEP
++    #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime )
++#endif
++
++#ifndef configEXPECTED_IDLE_TIME_BEFORE_SLEEP
++    #define configEXPECTED_IDLE_TIME_BEFORE_SLEEP    2
++#endif
++
++#if configEXPECTED_IDLE_TIME_BEFORE_SLEEP < 2
++    #error configEXPECTED_IDLE_TIME_BEFORE_SLEEP must not be less than 2
++#endif
++
++#ifndef configUSE_TICKLESS_IDLE
++    #define configUSE_TICKLESS_IDLE    0
++#endif
++
++#ifndef configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING
++    #define configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( x )
++#endif
++
++#ifndef configPRE_SLEEP_PROCESSING
++    #define configPRE_SLEEP_PROCESSING( x )
++#endif
++
++#ifndef configPOST_SLEEP_PROCESSING
++    #define configPOST_SLEEP_PROCESSING( x )
++#endif
++
++#ifndef configUSE_QUEUE_SETS
++    #define configUSE_QUEUE_SETS    0
++#endif
++
++#ifndef portTASK_USES_FLOATING_POINT
++    #define portTASK_USES_FLOATING_POINT()
++#endif
++
++#ifndef portALLOCATE_SECURE_CONTEXT
++    #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
++#endif
++
++#ifndef portDONT_DISCARD
++    #define portDONT_DISCARD
++#endif
++
++#ifndef configUSE_TIME_SLICING
++    #define configUSE_TIME_SLICING    1
++#endif
++
++#ifndef configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS
++    #define configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS    0
++#endif
++
++#ifndef configUSE_STATS_FORMATTING_FUNCTIONS
++    #define configUSE_STATS_FORMATTING_FUNCTIONS    0
++#endif
++
++#ifndef portASSERT_IF_INTERRUPT_PRIORITY_INVALID
++    #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
++#endif
++
++#ifndef configUSE_TRACE_FACILITY
++    #define configUSE_TRACE_FACILITY    0
++#endif
++
++#ifndef mtCOVERAGE_TEST_MARKER
++    #define mtCOVERAGE_TEST_MARKER()
++#endif
++
++#ifndef mtCOVERAGE_TEST_DELAY
++    #define mtCOVERAGE_TEST_DELAY()
++#endif
++
++#ifndef portASSERT_IF_IN_ISR
++    #define portASSERT_IF_IN_ISR()
++#endif
++
++#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
++    #define configUSE_PORT_OPTIMISED_TASK_SELECTION    0
++#endif
++
++#ifndef configAPPLICATION_ALLOCATED_HEAP
++    #define configAPPLICATION_ALLOCATED_HEAP    0
++#endif
++
++#ifndef configUSE_TASK_NOTIFICATIONS
++    #define configUSE_TASK_NOTIFICATIONS    1
++#endif
++
++#ifndef configTASK_NOTIFICATION_ARRAY_ENTRIES
++    #define configTASK_NOTIFICATION_ARRAY_ENTRIES    1
++#endif
++
++#if configTASK_NOTIFICATION_ARRAY_ENTRIES < 1
++    #error configTASK_NOTIFICATION_ARRAY_ENTRIES must be at least 1
++#endif
++
++#ifndef configUSE_POSIX_ERRNO
++    #define configUSE_POSIX_ERRNO    0
++#endif
++
++#ifndef portTICK_TYPE_IS_ATOMIC
++    #define portTICK_TYPE_IS_ATOMIC    0
++#endif
++
++#ifndef configSUPPORT_STATIC_ALLOCATION
++    /* Defaults to 0 for backward compatibility. */
++    #define configSUPPORT_STATIC_ALLOCATION    0
++#endif
++
++#ifndef configSUPPORT_DYNAMIC_ALLOCATION
++    /* Defaults to 1 for backward compatibility. */
++    #define configSUPPORT_DYNAMIC_ALLOCATION    1
++#endif
++
++#ifndef configSTACK_DEPTH_TYPE
++
++/* Defaults to uint16_t for backward compatibility, but can be overridden
++ * in FreeRTOSConfig.h if uint16_t is too restrictive. */
++    #define configSTACK_DEPTH_TYPE    uint16_t
++#endif
++
++#ifndef configRUN_TIME_COUNTER_TYPE
++
++/* Defaults to uint32_t for backward compatibility, but can be overridden in
++ * FreeRTOSConfig.h if uint32_t is too restrictive. */
++
++    #define configRUN_TIME_COUNTER_TYPE    uint32_t
++#endif
++
++#ifndef configMESSAGE_BUFFER_LENGTH_TYPE
++
++/* Defaults to size_t for backward compatibility, but can be overridden
++ * in FreeRTOSConfig.h if lengths will always be less than the number of bytes
++ * in a size_t. */
++    #define configMESSAGE_BUFFER_LENGTH_TYPE    size_t
++#endif
++
++/* Sanity check the configuration. */
++#if ( configUSE_TICKLESS_IDLE != 0 )
++    #if ( INCLUDE_vTaskSuspend != 1 )
++        #error INCLUDE_vTaskSuspend must be set to 1 if configUSE_TICKLESS_IDLE is not set to 0
++    #endif /* INCLUDE_vTaskSuspend */
++#endif /* configUSE_TICKLESS_IDLE */
++
++#if ( ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) )
++    #error configSUPPORT_STATIC_ALLOCATION and configSUPPORT_DYNAMIC_ALLOCATION cannot both be 0, but can both be 1.
++#endif
++
++#if ( ( configUSE_RECURSIVE_MUTEXES == 1 ) && ( configUSE_MUTEXES != 1 ) )
++    #error configUSE_MUTEXES must be set to 1 to use recursive mutexes
++#endif
++
++#ifndef configINITIAL_TICK_COUNT
++    #define configINITIAL_TICK_COUNT    0
++#endif
++
++#if ( portTICK_TYPE_IS_ATOMIC == 0 )
++
++/* Either variables of tick type cannot be read atomically, or
++ * portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when
++ * the tick count is returned to the standard critical section macros. */
++    #define portTICK_TYPE_ENTER_CRITICAL()                      portENTER_CRITICAL()
++    #define portTICK_TYPE_EXIT_CRITICAL()                       portEXIT_CRITICAL()
++    #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR()         portSET_INTERRUPT_MASK_FROM_ISR()
++    #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x )    portCLEAR_INTERRUPT_MASK_FROM_ISR( ( x ) )
++#else
++
++/* The tick type can be read atomically, so critical sections used when the
++ * tick count is returned can be defined away. */
++    #define portTICK_TYPE_ENTER_CRITICAL()
++    #define portTICK_TYPE_EXIT_CRITICAL()
++    #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR()         0
++    #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x )    ( void ) x
++#endif /* if ( portTICK_TYPE_IS_ATOMIC == 0 ) */
++
++/* Definitions to allow backward compatibility with FreeRTOS versions prior to
++ * V8 if desired. */
++#ifndef configENABLE_BACKWARD_COMPATIBILITY
++    #define configENABLE_BACKWARD_COMPATIBILITY    1
++#endif
++
++#ifndef configPRINTF
++
++/* configPRINTF() was not defined, so define it away to nothing.  To use
++ * configPRINTF() then define it as follows (where MyPrintFunction() is
++ * provided by the application writer):
++ *
++ * void MyPrintFunction(const char *pcFormat, ... );
++ #define configPRINTF( X )   MyPrintFunction X
++ *
++ * Then call like a standard printf() function, but placing brackets around
++ * all parameters so they are passed as a single parameter.  For example:
++ * configPRINTF( ("Value = %d", MyVariable) ); */
++    #define configPRINTF( X )
++#endif
++
++#ifndef configMAX
++
++/* The application writer has not provided their own MAX macro, so define
++ * the following generic implementation. */
++    #define configMAX( a, b )    ( ( ( a ) > ( b ) ) ? ( a ) : ( b ) )
++#endif
++
++#ifndef configMIN
++
++/* The application writer has not provided their own MIN macro, so define
++ * the following generic implementation. */
++    #define configMIN( a, b )    ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) )
++#endif
++
++#if configENABLE_BACKWARD_COMPATIBILITY == 1
++    #define eTaskStateGet                 eTaskGetState
++    #define portTickType                  TickType_t
++    #define xTaskHandle                   TaskHandle_t
++    #define xQueueHandle                  QueueHandle_t
++    #define xSemaphoreHandle              SemaphoreHandle_t
++    #define xQueueSetHandle               QueueSetHandle_t
++    #define xQueueSetMemberHandle         QueueSetMemberHandle_t
++    #define xTimeOutType                  TimeOut_t
++    #define xMemoryRegion                 MemoryRegion_t
++    #define xTaskParameters               TaskParameters_t
++    #define xTaskStatusType               TaskStatus_t
++    #define xTimerHandle                  TimerHandle_t
++    #define xCoRoutineHandle              CoRoutineHandle_t
++    #define pdTASK_HOOK_CODE              TaskHookFunction_t
++    #define portTICK_RATE_MS              portTICK_PERIOD_MS
++    #define pcTaskGetTaskName             pcTaskGetName
++    #define pcTimerGetTimerName           pcTimerGetName
++    #define pcQueueGetQueueName           pcQueueGetName
++    #define vTaskGetTaskInfo              vTaskGetInfo
++    #define xTaskGetIdleRunTimeCounter    ulTaskGetIdleRunTimeCounter
++
++/* Backward compatibility within the scheduler code only - these definitions
++ * are not really required but are included for completeness. */
++    #define tmrTIMER_CALLBACK             TimerCallbackFunction_t
++    #define pdTASK_CODE                   TaskFunction_t
++    #define xListItem                     ListItem_t
++    #define xList                         List_t
++
++/* For libraries that break the list data hiding, and access list structure
++ * members directly (which is not supposed to be done). */
++    #define pxContainer                   pvContainer
++#endif /* configENABLE_BACKWARD_COMPATIBILITY */
++
++#if ( configUSE_ALTERNATIVE_API != 0 )
++    #error The alternative API was deprecated some time ago, and was removed in FreeRTOS V9.0 0
++#endif
++
++/* Set configUSE_TASK_FPU_SUPPORT to 0 to omit floating point support even
++ * if floating point hardware is otherwise supported by the FreeRTOS port in use.
++ * This constant is not supported by all FreeRTOS ports that include floating
++ * point support. */
++#ifndef configUSE_TASK_FPU_SUPPORT
++    #define configUSE_TASK_FPU_SUPPORT    1
++#endif
++
++/* Set configENABLE_MPU to 1 to enable MPU support and 0 to disable it. This is
++ * currently used in ARMv8M ports. */
++#ifndef configENABLE_MPU
++    #define configENABLE_MPU    0
++#endif
++
++/* Set configENABLE_FPU to 1 to enable FPU support and 0 to disable it. This is
++ * currently used in ARMv8M ports. */
++#ifndef configENABLE_FPU
++    #define configENABLE_FPU    1
++#endif
++
++/* Set configENABLE_TRUSTZONE to 1 enable TrustZone support and 0 to disable it.
++ * This is currently used in ARMv8M ports. */
++#ifndef configENABLE_TRUSTZONE
++    #define configENABLE_TRUSTZONE    1
++#endif
++
++/* Set configRUN_FREERTOS_SECURE_ONLY to 1 to run the FreeRTOS ARMv8M port on
++ * the Secure Side only. */
++#ifndef configRUN_FREERTOS_SECURE_ONLY
++    #define configRUN_FREERTOS_SECURE_ONLY    0
++#endif
++
++#ifndef configRUN_ADDITIONAL_TESTS
++    #define configRUN_ADDITIONAL_TESTS    0
++#endif
++
++
++/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using
++ * dynamically allocated RAM, in which case when any task is deleted it is known
++ * that both the task's stack and TCB need to be freed.  Sometimes the
++ * FreeRTOSConfig.h settings only allow a task to be created using statically
++ * allocated RAM, in which case when any task is deleted it is known that neither
++ * the task's stack or TCB should be freed.  Sometimes the FreeRTOSConfig.h
++ * settings allow a task to be created using either statically or dynamically
++ * allocated RAM, in which case a member of the TCB is used to record whether the
++ * stack and/or TCB were allocated statically or dynamically, so when a task is
++ * deleted the RAM that was allocated dynamically is freed again and no attempt is
++ * made to free the RAM that was allocated statically.
++ * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a
++ * task to be created using either statically or dynamically allocated RAM.  Note
++ * that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with
++ * a statically allocated stack and a dynamically allocated TCB.
++ *
++ * The following table lists various combinations of portUSING_MPU_WRAPPERS,
++ * configSUPPORT_DYNAMIC_ALLOCATION and configSUPPORT_STATIC_ALLOCATION and
++ * when it is possible to have both static and dynamic allocation:
++ *  +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+
++ * | MPU | Dynamic | Static |     Available Functions     |       Possible Allocations        | Both Dynamic and | Need Free |
++ * |     |         |        |                             |                                   | Static Possible  |           |
++ * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+
++ * | 0   | 0       | 1      | xTaskCreateStatic           | TCB - Static, Stack - Static      | No               | No        |
++ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
++ * | 0   | 1       | 0      | xTaskCreate                 | TCB - Dynamic, Stack - Dynamic    | No               | Yes       |
++ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
++ * | 0   | 1       | 1      | xTaskCreate,                | 1. TCB - Dynamic, Stack - Dynamic | Yes              | Yes       |
++ * |     |         |        | xTaskCreateStatic           | 2. TCB - Static, Stack - Static   |                  |           |
++ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
++ * | 1   | 0       | 1      | xTaskCreateStatic,          | TCB - Static, Stack - Static      | No               | No        |
++ * |     |         |        | xTaskCreateRestrictedStatic |                                   |                  |           |
++ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
++ * | 1   | 1       | 0      | xTaskCreate,                | 1. TCB - Dynamic, Stack - Dynamic | Yes              | Yes       |
++ * |     |         |        | xTaskCreateRestricted       | 2. TCB - Dynamic, Stack - Static  |                  |           |
++ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
++ * | 1   | 1       | 1      | xTaskCreate,                | 1. TCB - Dynamic, Stack - Dynamic | Yes              | Yes       |
++ * |     |         |        | xTaskCreateStatic,          | 2. TCB - Dynamic, Stack - Static  |                  |           |
++ * |     |         |        | xTaskCreateRestricted,      | 3. TCB - Static, Stack - Static   |                  |           |
++ * |     |         |        | xTaskCreateRestrictedStatic |                                   |                  |           |
++ * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+
++ */
++#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE                                                                                     \
++    ( ( ( portUSING_MPU_WRAPPERS == 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) || \
++      ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) )
++
++/*
++ * In line with software engineering best practice, especially when supplying a
++ * library that is likely to change in future versions, FreeRTOS implements a
++ * strict data hiding policy.  This means the Task structure used internally by
++ * FreeRTOS is not accessible to application code.  However, if the application
++ * writer wants to statically allocate the memory required to create a task then
++ * the size of the task object needs to be known.  The StaticTask_t structure
++ * below is provided for this purpose.  Its sizes and alignment requirements are
++ * guaranteed to match those of the genuine structure, no matter which
++ * architecture is being used, and no matter how the values in FreeRTOSConfig.h
++ * are set.  Its contents are somewhat obfuscated in the hope users will
++ * recognise that it would be unwise to make direct use of the structure members.
++ */
++typedef struct xSTATIC_TCB
++{
++    struct rt_thread thread;
++    #if ( configUSE_APPLICATION_TASK_TAG == 1 )
++        void * pxTaskTag;
++    #endif
++    #if ( configUSE_TASK_NOTIFICATIONS == 1 )
++        uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
++        uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
++    #endif
++    #if ( INCLUDE_xTaskAbortDelay == 1 )
++        uint8_t ucDelayAborted;
++    #endif
++} StaticTask_t;
++
++typedef struct
++{
++    struct rt_ipc_object *rt_ipc;
++    struct rt_messagequeue ipc_obj;
++} StaticQueue_t;
++
++typedef struct
++{
++    struct rt_ipc_object *rt_ipc;
++    union
++    {
++        struct rt_semaphore_wrapper semaphore;
++        struct rt_mutex mutex;
++    } ipc_obj;
++} StaticSemaphore_t;
++
++typedef struct xSTATIC_EVENT_GROUP
++{
++    struct rt_event event;
++} StaticEventGroup_t;
++
++typedef struct xSTATIC_TIMER
++{
++    struct rt_timer timer;
++    void * pvTimerID;
++} StaticTimer_t;
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    }
++#endif
++/* *INDENT-ON* */
++
++#endif /* INC_FREERTOS_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/event_groups.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/event_groups.h
+new file mode 100644
+index 0000000000..591814d37f
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/event_groups.h
+@@ -0,0 +1,621 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++#ifndef EVENT_GROUPS_H
++#define EVENT_GROUPS_H
++
++#ifndef INC_FREERTOS_H
++    #error "include FreeRTOS.h" must appear in source files before "include event_groups.h"
++#endif
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    extern "C" {
++#endif
++/* *INDENT-ON* */
++
++/**
++ * An event group is a collection of bits to which an application can assign a
++ * meaning.  For example, an application may create an event group to convey
++ * the status of various CAN bus related events in which bit 0 might mean "A CAN
++ * message has been received and is ready for processing", bit 1 might mean "The
++ * application has queued a message that is ready for sending onto the CAN
++ * network", and bit 2 might mean "It is time to send a SYNC message onto the
++ * CAN network" etc.  A task can then test the bit values to see which events
++ * are active, and optionally enter the Blocked state to wait for a specified
++ * bit or a group of specified bits to be active.  To continue the CAN bus
++ * example, a CAN controlling task can enter the Blocked state (and therefore
++ * not consume any processing time) until either bit 0, bit 1 or bit 2 are
++ * active, at which time the bit that was actually active would inform the task
++ * which action it had to take (process a received message, send a message, or
++ * send a SYNC).
++ *
++ * The event groups implementation contains intelligence to avoid race
++ * conditions that would otherwise occur were an application to use a simple
++ * variable for the same purpose.  This is particularly important with respect
++ * to when a bit within an event group is to be cleared, and when bits have to
++ * be set and then tested atomically - as is the case where event groups are
++ * used to create a synchronisation point between multiple tasks (a
++ * 'rendezvous').
++ *
++ * \defgroup EventGroup
++ */
++
++
++
++/**
++ * event_groups.h
++ *
++ * Type by which event groups are referenced.  For example, a call to
++ * xEventGroupCreate() returns an EventGroupHandle_t variable that can then
++ * be used as a parameter to other event group functions.
++ *
++ * \defgroup EventGroupHandle_t EventGroupHandle_t
++ * \ingroup EventGroup
++ */
++struct EventGroupDef_t;
++typedef struct EventGroupDef_t   * EventGroupHandle_t;
++
++/*
++ * The type that holds event bits always matches TickType_t - therefore the
++ * number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1,
++ * 32 bits if set to 0.
++ *
++ * \defgroup EventBits_t EventBits_t
++ * \ingroup EventGroup
++ */
++typedef TickType_t               EventBits_t;
++
++/**
++ * event_groups.h
++ * @code{c}
++ * EventGroupHandle_t xEventGroupCreate( void );
++ * @endcode
++ *
++ * Create a new event group.
++ *
++ * Internally, within the FreeRTOS implementation, event groups use a [small]
++ * block of memory, in which the event group's structure is stored.  If an event
++ * groups is created using xEventGroupCreate() then the required memory is
++ * automatically dynamically allocated inside the xEventGroupCreate() function.
++ * (see https://www.FreeRTOS.org/a00111.html).  If an event group is created
++ * using xEventGroupCreateStatic() then the application writer must instead
++ * provide the memory that will get used by the event group.
++ * xEventGroupCreateStatic() therefore allows an event group to be created
++ * without using any dynamic memory allocation.
++ *
++ * Although event groups are not related to ticks, for internal implementation
++ * reasons the number of bits available for use in an event group is dependent
++ * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h.  If
++ * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit
++ * 0 to bit 7).  If configUSE_16_BIT_TICKS is set to 0 then each event group has
++ * 24 usable bits (bit 0 to bit 23).  The EventBits_t type is used to store
++ * event bits within an event group.
++ *
++ * @return If the event group was created then a handle to the event group is
++ * returned.  If there was insufficient FreeRTOS heap available to create the
++ * event group then NULL is returned.  See https://www.FreeRTOS.org/a00111.html
++ *
++ * Example usage:
++ * @code{c}
++ *  // Declare a variable to hold the created event group.
++ *  EventGroupHandle_t xCreatedEventGroup;
++ *
++ *  // Attempt to create the event group.
++ *  xCreatedEventGroup = xEventGroupCreate();
++ *
++ *  // Was the event group created successfully?
++ *  if( xCreatedEventGroup == NULL )
++ *  {
++ *      // The event group was not created because there was insufficient
++ *      // FreeRTOS heap available.
++ *  }
++ *  else
++ *  {
++ *      // The event group was created.
++ *  }
++ * @endcode
++ * \defgroup xEventGroupCreate xEventGroupCreate
++ * \ingroup EventGroup
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    EventGroupHandle_t xEventGroupCreate( void );
++#endif
++
++/**
++ * event_groups.h
++ * @code{c}
++ * EventGroupHandle_t xEventGroupCreateStatic( EventGroupHandle_t * pxEventGroupBuffer );
++ * @endcode
++ *
++ * Create a new event group.
++ *
++ * Internally, within the FreeRTOS implementation, event groups use a [small]
++ * block of memory, in which the event group's structure is stored.  If an event
++ * groups is created using xEventGroupCreate() then the required memory is
++ * automatically dynamically allocated inside the xEventGroupCreate() function.
++ * (see https://www.FreeRTOS.org/a00111.html).  If an event group is created
++ * using xEventGroupCreateStatic() then the application writer must instead
++ * provide the memory that will get used by the event group.
++ * xEventGroupCreateStatic() therefore allows an event group to be created
++ * without using any dynamic memory allocation.
++ *
++ * Although event groups are not related to ticks, for internal implementation
++ * reasons the number of bits available for use in an event group is dependent
++ * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h.  If
++ * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit
++ * 0 to bit 7).  If configUSE_16_BIT_TICKS is set to 0 then each event group has
++ * 24 usable bits (bit 0 to bit 23).  The EventBits_t type is used to store
++ * event bits within an event group.
++ *
++ * @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type
++ * StaticEventGroup_t, which will be then be used to hold the event group's data
++ * structures, removing the need for the memory to be allocated dynamically.
++ *
++ * @return If the event group was created then a handle to the event group is
++ * returned.  If pxEventGroupBuffer was NULL then NULL is returned.
++ *
++ * Example usage:
++ * @code{c}
++ *  // StaticEventGroup_t is a publicly accessible structure that has the same
++ *  // size and alignment requirements as the real event group structure.  It is
++ *  // provided as a mechanism for applications to know the size of the event
++ *  // group (which is dependent on the architecture and configuration file
++ *  // settings) without breaking the strict data hiding policy by exposing the
++ *  // real event group internals.  This StaticEventGroup_t variable is passed
++ *  // into the xSemaphoreCreateEventGroupStatic() function and is used to store
++ *  // the event group's data structures
++ *  StaticEventGroup_t xEventGroupBuffer;
++ *
++ *  // Create the event group without dynamically allocating any memory.
++ *  xEventGroup = xEventGroupCreateStatic( &xEventGroupBuffer );
++ * @endcode
++ */
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++    EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer );
++#endif
++
++/**
++ * event_groups.h
++ * @code{c}
++ *  EventBits_t xEventGroupWaitBits(    EventGroupHandle_t xEventGroup,
++ *                                      const EventBits_t uxBitsToWaitFor,
++ *                                      const BaseType_t xClearOnExit,
++ *                                      const BaseType_t xWaitForAllBits,
++ *                                      const TickType_t xTicksToWait );
++ * @endcode
++ *
++ * [Potentially] block to wait for one or more bits to be set within a
++ * previously created event group.
++ *
++ * This function cannot be called from an interrupt.
++ *
++ * @param xEventGroup The event group in which the bits are being tested.  The
++ * event group must have previously been created using a call to
++ * xEventGroupCreate().
++ *
++ * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test
++ * inside the event group.  For example, to wait for bit 0 and/or bit 2 set
++ * uxBitsToWaitFor to 0x05.  To wait for bits 0 and/or bit 1 and/or bit 2 set
++ * uxBitsToWaitFor to 0x07.  Etc.
++ *
++ * @param xClearOnExit If xClearOnExit is set to pdTRUE then any bits within
++ * uxBitsToWaitFor that are set within the event group will be cleared before
++ * xEventGroupWaitBits() returns if the wait condition was met (if the function
++ * returns for a reason other than a timeout).  If xClearOnExit is set to
++ * pdFALSE then the bits set in the event group are not altered when the call to
++ * xEventGroupWaitBits() returns.
++ *
++ * @param xWaitForAllBits If xWaitForAllBits is set to pdTRUE then
++ * xEventGroupWaitBits() will return when either all the bits in uxBitsToWaitFor
++ * are set or the specified block time expires.  If xWaitForAllBits is set to
++ * pdFALSE then xEventGroupWaitBits() will return when any one of the bits set
++ * in uxBitsToWaitFor is set or the specified block time expires.  The block
++ * time is specified by the xTicksToWait parameter.
++ *
++ * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait
++ * for one/all (depending on the xWaitForAllBits value) of the bits specified by
++ * uxBitsToWaitFor to become set.
++ *
++ * @return The value of the event group at the time either the bits being waited
++ * for became set, or the block time expired.  Test the return value to know
++ * which bits were set.  If xEventGroupWaitBits() returned because its timeout
++ * expired then not all the bits being waited for will be set.  If
++ * xEventGroupWaitBits() returned because the bits it was waiting for were set
++ * then the returned value is the event group value before any bits were
++ * automatically cleared in the case that xClearOnExit parameter was set to
++ * pdTRUE.
++ *
++ * Example usage:
++ * @code{c}
++ * #define BIT_0 ( 1 << 0 )
++ * #define BIT_4 ( 1 << 4 )
++ *
++ * void aFunction( EventGroupHandle_t xEventGroup )
++ * {
++ * EventBits_t uxBits;
++ * const TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
++ *
++ *      // Wait a maximum of 100ms for either bit 0 or bit 4 to be set within
++ *      // the event group.  Clear the bits before exiting.
++ *      uxBits = xEventGroupWaitBits(
++ *                  xEventGroup,    // The event group being tested.
++ *                  BIT_0 | BIT_4,  // The bits within the event group to wait for.
++ *                  pdTRUE,         // BIT_0 and BIT_4 should be cleared before returning.
++ *                  pdFALSE,        // Don't wait for both bits, either bit will do.
++ *                  xTicksToWait ); // Wait a maximum of 100ms for either bit to be set.
++ *
++ *      if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
++ *      {
++ *          // xEventGroupWaitBits() returned because both bits were set.
++ *      }
++ *      else if( ( uxBits & BIT_0 ) != 0 )
++ *      {
++ *          // xEventGroupWaitBits() returned because just BIT_0 was set.
++ *      }
++ *      else if( ( uxBits & BIT_4 ) != 0 )
++ *      {
++ *          // xEventGroupWaitBits() returned because just BIT_4 was set.
++ *      }
++ *      else
++ *      {
++ *          // xEventGroupWaitBits() returned because xTicksToWait ticks passed
++ *          // without either BIT_0 or BIT_4 becoming set.
++ *      }
++ * }
++ * @endcode
++ * \defgroup xEventGroupWaitBits xEventGroupWaitBits
++ * \ingroup EventGroup
++ */
++EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
++                                 const EventBits_t uxBitsToWaitFor,
++                                 const BaseType_t xClearOnExit,
++                                 const BaseType_t xWaitForAllBits,
++                                 TickType_t xTicksToWait );
++
++/**
++ * event_groups.h
++ * @code{c}
++ *  EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear );
++ * @endcode
++ *
++ * Clear bits within an event group.  This function cannot be called from an
++ * interrupt.
++ *
++ * @param xEventGroup The event group in which the bits are to be cleared.
++ *
++ * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear
++ * in the event group.  For example, to clear bit 3 only, set uxBitsToClear to
++ * 0x08.  To clear bit 3 and bit 0 set uxBitsToClear to 0x09.
++ *
++ * @return The value of the event group before the specified bits were cleared.
++ *
++ * Example usage:
++ * @code{c}
++ * #define BIT_0 ( 1 << 0 )
++ * #define BIT_4 ( 1 << 4 )
++ *
++ * void aFunction( EventGroupHandle_t xEventGroup )
++ * {
++ * EventBits_t uxBits;
++ *
++ *      // Clear bit 0 and bit 4 in xEventGroup.
++ *      uxBits = xEventGroupClearBits(
++ *                              xEventGroup,    // The event group being updated.
++ *                              BIT_0 | BIT_4 );// The bits being cleared.
++ *
++ *      if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
++ *      {
++ *          // Both bit 0 and bit 4 were set before xEventGroupClearBits() was
++ *          // called.  Both will now be clear (not set).
++ *      }
++ *      else if( ( uxBits & BIT_0 ) != 0 )
++ *      {
++ *          // Bit 0 was set before xEventGroupClearBits() was called.  It will
++ *          // now be clear.
++ *      }
++ *      else if( ( uxBits & BIT_4 ) != 0 )
++ *      {
++ *          // Bit 4 was set before xEventGroupClearBits() was called.  It will
++ *          // now be clear.
++ *      }
++ *      else
++ *      {
++ *          // Neither bit 0 nor bit 4 were set in the first place.
++ *      }
++ * }
++ * @endcode
++ * \defgroup xEventGroupClearBits xEventGroupClearBits
++ * \ingroup EventGroup
++ */
++EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
++                                  const EventBits_t uxBitsToClear );
++
++/**
++ * event_groups.h
++ * @code{c}
++ *  BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
++ * @endcode
++ *
++ * A version of xEventGroupClearBits() that can be called from an interrupt.
++ *
++ * Setting bits in an event group is not a deterministic operation because there
++ * are an unknown number of tasks that may be waiting for the bit or bits being
++ * set.  FreeRTOS does not allow nondeterministic operations to be performed
++ * while interrupts are disabled, so protects event groups that are accessed
++ * from tasks by suspending the scheduler rather than disabling interrupts.  As
++ * a result event groups cannot be accessed directly from an interrupt service
++ * routine.  Therefore xEventGroupClearBitsFromISR() sends a message to the
++ * timer task to have the clear operation performed in the context of the timer
++ * task.
++ *
++ * @param xEventGroup The event group in which the bits are to be cleared.
++ *
++ * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear.
++ * For example, to clear bit 3 only, set uxBitsToClear to 0x08.  To clear bit 3
++ * and bit 0 set uxBitsToClear to 0x09.
++ *
++ * @return If the request to execute the function was posted successfully then
++ * pdPASS is returned, otherwise pdFALSE is returned.  pdFALSE will be returned
++ * if the timer service queue was full.
++ *
++ * Example usage:
++ * @code{c}
++ * #define BIT_0 ( 1 << 0 )
++ * #define BIT_4 ( 1 << 4 )
++ *
++ * // An event group which it is assumed has already been created by a call to
++ * // xEventGroupCreate().
++ * EventGroupHandle_t xEventGroup;
++ *
++ * void anInterruptHandler( void )
++ * {
++ *      // Clear bit 0 and bit 4 in xEventGroup.
++ *      xResult = xEventGroupClearBitsFromISR(
++ *                          xEventGroup,     // The event group being updated.
++ *                          BIT_0 | BIT_4 ); // The bits being set.
++ *
++ *      if( xResult == pdPASS )
++ *      {
++ *          // The message was posted successfully.
++ *      }
++ * }
++ * @endcode
++ * \defgroup xEventGroupClearBitsFromISR xEventGroupClearBitsFromISR
++ * \ingroup EventGroup
++ */
++BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup,
++                                        const EventBits_t uxBitsToClear );
++
++/**
++ * event_groups.h
++ * @code{c}
++ *  EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
++ * @endcode
++ *
++ * Set bits within an event group.
++ * This function cannot be called from an interrupt.  xEventGroupSetBitsFromISR()
++ * is a version that can be called from an interrupt.
++ *
++ * Setting bits in an event group will automatically unblock tasks that are
++ * blocked waiting for the bits.
++ *
++ * @param xEventGroup The event group in which the bits are to be set.
++ *
++ * @param uxBitsToSet A bitwise value that indicates the bit or bits to set.
++ * For example, to set bit 3 only, set uxBitsToSet to 0x08.  To set bit 3
++ * and bit 0 set uxBitsToSet to 0x09.
++ *
++ * @return The value of the event group at the time the call to
++ * xEventGroupSetBits() returns.  There are two reasons why the returned value
++ * might have the bits specified by the uxBitsToSet parameter cleared.  First,
++ * if setting a bit results in a task that was waiting for the bit leaving the
++ * blocked state then it is possible the bit will be cleared automatically
++ * (see the xClearBitOnExit parameter of xEventGroupWaitBits()).  Second, any
++ * unblocked (or otherwise Ready state) task that has a priority above that of
++ * the task that called xEventGroupSetBits() will execute and may change the
++ * event group value before the call to xEventGroupSetBits() returns.
++ *
++ * Example usage:
++ * @code{c}
++ * #define BIT_0 ( 1 << 0 )
++ * #define BIT_4 ( 1 << 4 )
++ *
++ * void aFunction( EventGroupHandle_t xEventGroup )
++ * {
++ * EventBits_t uxBits;
++ *
++ *      // Set bit 0 and bit 4 in xEventGroup.
++ *      uxBits = xEventGroupSetBits(
++ *                          xEventGroup,    // The event group being updated.
++ *                          BIT_0 | BIT_4 );// The bits being set.
++ *
++ *      if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
++ *      {
++ *          // Both bit 0 and bit 4 remained set when the function returned.
++ *      }
++ *      else if( ( uxBits & BIT_0 ) != 0 )
++ *      {
++ *          // Bit 0 remained set when the function returned, but bit 4 was
++ *          // cleared.  It might be that bit 4 was cleared automatically as a
++ *          // task that was waiting for bit 4 was removed from the Blocked
++ *          // state.
++ *      }
++ *      else if( ( uxBits & BIT_4 ) != 0 )
++ *      {
++ *          // Bit 4 remained set when the function returned, but bit 0 was
++ *          // cleared.  It might be that bit 0 was cleared automatically as a
++ *          // task that was waiting for bit 0 was removed from the Blocked
++ *          // state.
++ *      }
++ *      else
++ *      {
++ *          // Neither bit 0 nor bit 4 remained set.  It might be that a task
++ *          // was waiting for both of the bits to be set, and the bits were
++ *          // cleared as the task left the Blocked state.
++ *      }
++ * }
++ * @endcode
++ * \defgroup xEventGroupSetBits xEventGroupSetBits
++ * \ingroup EventGroup
++ */
++EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
++                                const EventBits_t uxBitsToSet );
++
++/**
++ * event_groups.h
++ * @code{c}
++ *  BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken );
++ * @endcode
++ *
++ * A version of xEventGroupSetBits() that can be called from an interrupt.
++ *
++ * Setting bits in an event group is not a deterministic operation because there
++ * are an unknown number of tasks that may be waiting for the bit or bits being
++ * set.  FreeRTOS does not allow nondeterministic operations to be performed in
++ * interrupts or from critical sections.  Therefore xEventGroupSetBitsFromISR()
++ * sends a message to the timer task to have the set operation performed in the
++ * context of the timer task - where a scheduler lock is used in place of a
++ * critical section.
++ *
++ * @param xEventGroup The event group in which the bits are to be set.
++ *
++ * @param uxBitsToSet A bitwise value that indicates the bit or bits to set.
++ * For example, to set bit 3 only, set uxBitsToSet to 0x08.  To set bit 3
++ * and bit 0 set uxBitsToSet to 0x09.
++ *
++ * @param pxHigherPriorityTaskWoken As mentioned above, calling this function
++ * will result in a message being sent to the timer daemon task.  If the
++ * priority of the timer daemon task is higher than the priority of the
++ * currently running task (the task the interrupt interrupted) then
++ * *pxHigherPriorityTaskWoken will be set to pdTRUE by
++ * xEventGroupSetBitsFromISR(), indicating that a context switch should be
++ * requested before the interrupt exits.  For that reason
++ * *pxHigherPriorityTaskWoken must be initialised to pdFALSE.  See the
++ * example code below.
++ *
++ * @return If the request to execute the function was posted successfully then
++ * pdPASS is returned, otherwise pdFALSE is returned.  pdFALSE will be returned
++ * if the timer service queue was full.
++ *
++ * Example usage:
++ * @code{c}
++ * #define BIT_0 ( 1 << 0 )
++ * #define BIT_4 ( 1 << 4 )
++ *
++ * // An event group which it is assumed has already been created by a call to
++ * // xEventGroupCreate().
++ * EventGroupHandle_t xEventGroup;
++ *
++ * void anInterruptHandler( void )
++ * {
++ * BaseType_t xHigherPriorityTaskWoken, xResult;
++ *
++ *      // xHigherPriorityTaskWoken must be initialised to pdFALSE.
++ *      xHigherPriorityTaskWoken = pdFALSE;
++ *
++ *      // Set bit 0 and bit 4 in xEventGroup.
++ *      xResult = xEventGroupSetBitsFromISR(
++ *                          xEventGroup,    // The event group being updated.
++ *                          BIT_0 | BIT_4   // The bits being set.
++ *                          &xHigherPriorityTaskWoken );
++ *
++ *      // Was the message posted successfully?
++ *      if( xResult == pdPASS )
++ *      {
++ *          // If xHigherPriorityTaskWoken is now set to pdTRUE then a context
++ *          // switch should be requested.  The macro used is port specific and
++ *          // will be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() -
++ *          // refer to the documentation page for the port being used.
++ *          portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
++ *      }
++ * }
++ * @endcode
++ * \defgroup xEventGroupSetBitsFromISR xEventGroupSetBitsFromISR
++ * \ingroup EventGroup
++ */
++BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup,
++                                      const EventBits_t uxBitsToSet,
++                                      BaseType_t * pxHigherPriorityTaskWoken );
++
++/**
++ * event_groups.h
++ * @code{c}
++ *  EventBits_t xEventGroupGetBits( EventGroupHandle_t xEventGroup );
++ * @endcode
++ *
++ * Returns the current value of the bits in an event group.  This function
++ * cannot be used from an interrupt.
++ *
++ * @param xEventGroup The event group being queried.
++ *
++ * @return The event group bits at the time xEventGroupGetBits() was called.
++ *
++ * \defgroup xEventGroupGetBits xEventGroupGetBits
++ * \ingroup EventGroup
++ */
++#define xEventGroupGetBits( xEventGroup )    xEventGroupClearBits( xEventGroup, 0 )
++
++/**
++ * event_groups.h
++ * @code{c}
++ *  EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup );
++ * @endcode
++ *
++ * A version of xEventGroupGetBits() that can be called from an ISR.
++ *
++ * @param xEventGroup The event group being queried.
++ *
++ * @return The event group bits at the time xEventGroupGetBitsFromISR() was called.
++ *
++ * \defgroup xEventGroupGetBitsFromISR xEventGroupGetBitsFromISR
++ * \ingroup EventGroup
++ */
++EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup );
++
++/**
++ * event_groups.h
++ * @code{c}
++ *  void xEventGroupDelete( EventGroupHandle_t xEventGroup );
++ * @endcode
++ *
++ * Delete an event group that was previously created by a call to
++ * xEventGroupCreate().  Tasks that are blocked on the event group will be
++ * unblocked and obtain 0 as the event group's value.
++ *
++ * @param xEventGroup The event group being deleted.
++ */
++void vEventGroupDelete( EventGroupHandle_t xEventGroup );
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    }
++#endif
++/* *INDENT-ON* */
++
++#endif /* EVENT_GROUPS_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/list.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/list.h
+new file mode 100644
+index 0000000000..dde1f3572e
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/list.h
+@@ -0,0 +1,416 @@
++/*
++ * FreeRTOS Kernel V10.4.3
++ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++/*
++ * This is the list implementation used by the scheduler.  While it is tailored
++ * heavily for the schedulers needs, it is also available for use by
++ * application code.
++ *
++ * list_ts can only store pointers to list_item_ts.  Each ListItem_t contains a
++ * numeric value (xItemValue).  Most of the time the lists are sorted in
++ * descending item value order.
++ *
++ * Lists are created already containing one list item.  The value of this
++ * item is the maximum possible that can be stored, it is therefore always at
++ * the end of the list and acts as a marker.  The list member pxHead always
++ * points to this marker - even though it is at the tail of the list.  This
++ * is because the tail contains a wrap back pointer to the true head of
++ * the list.
++ *
++ * In addition to it's value, each list item contains a pointer to the next
++ * item in the list (pxNext), a pointer to the list it is in (pxContainer)
++ * and a pointer to back to the object that contains it.  These later two
++ * pointers are included for efficiency of list manipulation.  There is
++ * effectively a two way link between the object containing the list item and
++ * the list item itself.
++ *
++ *
++ * \page ListIntroduction List Implementation
++ * \ingroup FreeRTOSIntro
++ */
++
++#ifndef INC_FREERTOS_H
++    #error "FreeRTOS.h must be included before list.h"
++#endif
++
++#ifndef LIST_H
++#define LIST_H
++
++/*
++ * The list structure members are modified from within interrupts, and therefore
++ * by rights should be declared volatile.  However, they are only modified in a
++ * functionally atomic way (within critical sections of with the scheduler
++ * suspended) and are either passed by reference into a function or indexed via
++ * a volatile variable.  Therefore, in all use cases tested so far, the volatile
++ * qualifier can be omitted in order to provide a moderate performance
++ * improvement without adversely affecting functional behaviour.  The assembly
++ * instructions generated by the IAR, ARM and GCC compilers when the respective
++ * compiler's options were set for maximum optimisation has been inspected and
++ * deemed to be as intended.  That said, as compiler technology advances, and
++ * especially if aggressive cross module optimisation is used (a use case that
++ * has not been exercised to any great extend) then it is feasible that the
++ * volatile qualifier will be needed for correct optimisation.  It is expected
++ * that a compiler removing essential code because, without the volatile
++ * qualifier on the list structure members and with aggressive cross module
++ * optimisation, the compiler deemed the code unnecessary will result in
++ * complete and obvious failure of the scheduler.  If this is ever experienced
++ * then the volatile qualifier can be inserted in the relevant places within the
++ * list structures by simply defining configLIST_VOLATILE to volatile in
++ * FreeRTOSConfig.h (as per the example at the bottom of this comment block).
++ * If configLIST_VOLATILE is not defined then the preprocessor directives below
++ * will simply #define configLIST_VOLATILE away completely.
++ *
++ * To use volatile list structure members then add the following line to
++ * FreeRTOSConfig.h (without the quotes):
++ * "#define configLIST_VOLATILE volatile"
++ */
++#ifndef configLIST_VOLATILE
++    #define configLIST_VOLATILE
++#endif /* configSUPPORT_CROSS_MODULE_OPTIMISATION */
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    extern "C" {
++#endif
++/* *INDENT-ON* */
++
++/* Macros that can be used to place known values within the list structures,
++ * then check that the known values do not get corrupted during the execution of
++ * the application.   These may catch the list data structures being overwritten in
++ * memory.  They will not catch data errors caused by incorrect configuration or
++ * use of FreeRTOS.*/
++#if ( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 )
++    /* Define the macros to do nothing. */
++    #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE
++    #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE
++    #define listFIRST_LIST_INTEGRITY_CHECK_VALUE
++    #define listSECOND_LIST_INTEGRITY_CHECK_VALUE
++    #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem )
++    #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem )
++    #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList )
++    #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList )
++    #define listTEST_LIST_ITEM_INTEGRITY( pxItem )
++    #define listTEST_LIST_INTEGRITY( pxList )
++#else /* if ( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 ) */
++    /* Define macros that add new members into the list structures. */
++    #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE     TickType_t xListItemIntegrityValue1;
++    #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE    TickType_t xListItemIntegrityValue2;
++    #define listFIRST_LIST_INTEGRITY_CHECK_VALUE          TickType_t xListIntegrityValue1;
++    #define listSECOND_LIST_INTEGRITY_CHECK_VALUE         TickType_t xListIntegrityValue2;
++
++/* Define macros that set the new structure members to known values. */
++    #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem )     ( pxItem )->xListItemIntegrityValue1 = pdINTEGRITY_CHECK_VALUE
++    #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem )    ( pxItem )->xListItemIntegrityValue2 = pdINTEGRITY_CHECK_VALUE
++    #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList )              ( pxList )->xListIntegrityValue1 = pdINTEGRITY_CHECK_VALUE
++    #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList )              ( pxList )->xListIntegrityValue2 = pdINTEGRITY_CHECK_VALUE
++
++/* Define macros that will assert if one of the structure members does not
++ * contain its expected value. */
++    #define listTEST_LIST_ITEM_INTEGRITY( pxItem )                      configASSERT( ( ( pxItem )->xListItemIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxItem )->xListItemIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) )
++    #define listTEST_LIST_INTEGRITY( pxList )                           configASSERT( ( ( pxList )->xListIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxList )->xListIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) )
++#endif /* configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES */
++
++
++/*
++ * Definition of the only type of object that a list can contain.
++ */
++struct xLIST;
++struct xLIST_ITEM
++{
++    listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE               /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
++    configLIST_VOLATILE TickType_t xItemValue;              /*< The value being listed.  In most cases this is used to sort the list in descending order. */
++    struct xLIST_ITEM * configLIST_VOLATILE pxNext;         /*< Pointer to the next ListItem_t in the list. */
++    struct xLIST_ITEM * configLIST_VOLATILE pxPrevious;     /*< Pointer to the previous ListItem_t in the list. */
++    void * pvOwner;                                         /*< Pointer to the object (normally a TCB) that contains the list item.  There is therefore a two way link between the object containing the list item and the list item itself. */
++    struct xLIST * configLIST_VOLATILE pxContainer;         /*< Pointer to the list in which this list item is placed (if any). */
++    listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE              /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
++};
++typedef struct xLIST_ITEM ListItem_t;                       /* For some reason lint wants this as two separate definitions. */
++
++struct xMINI_LIST_ITEM
++{
++    listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE     /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
++    configLIST_VOLATILE TickType_t xItemValue;
++    struct xLIST_ITEM * configLIST_VOLATILE pxNext;
++    struct xLIST_ITEM * configLIST_VOLATILE pxPrevious;
++};
++typedef struct xMINI_LIST_ITEM MiniListItem_t;
++
++/*
++ * Definition of the type of queue used by the scheduler.
++ */
++typedef struct xLIST
++{
++    listFIRST_LIST_INTEGRITY_CHECK_VALUE          /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
++    volatile UBaseType_t uxNumberOfItems;
++    ListItem_t * configLIST_VOLATILE pxIndex;     /*< Used to walk through the list.  Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */
++    MiniListItem_t xListEnd;                      /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */
++    listSECOND_LIST_INTEGRITY_CHECK_VALUE         /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
++} List_t;
++
++/*
++ * Access macro to set the owner of a list item.  The owner of a list item
++ * is the object (usually a TCB) that contains the list item.
++ *
++ * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER
++ * \ingroup LinkedList
++ */
++#define listSET_LIST_ITEM_OWNER( pxListItem, pxOwner )    ( ( pxListItem )->pvOwner = ( void * ) ( pxOwner ) )
++
++/*
++ * Access macro to get the owner of a list item.  The owner of a list item
++ * is the object (usually a TCB) that contains the list item.
++ *
++ * \page listGET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER
++ * \ingroup LinkedList
++ */
++#define listGET_LIST_ITEM_OWNER( pxListItem )             ( ( pxListItem )->pvOwner )
++
++/*
++ * Access macro to set the value of the list item.  In most cases the value is
++ * used to sort the list in descending order.
++ *
++ * \page listSET_LIST_ITEM_VALUE listSET_LIST_ITEM_VALUE
++ * \ingroup LinkedList
++ */
++#define listSET_LIST_ITEM_VALUE( pxListItem, xValue )     ( ( pxListItem )->xItemValue = ( xValue ) )
++
++/*
++ * Access macro to retrieve the value of the list item.  The value can
++ * represent anything - for example the priority of a task, or the time at
++ * which a task should be unblocked.
++ *
++ * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE
++ * \ingroup LinkedList
++ */
++#define listGET_LIST_ITEM_VALUE( pxListItem )             ( ( pxListItem )->xItemValue )
++
++/*
++ * Access macro to retrieve the value of the list item at the head of a given
++ * list.
++ *
++ * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE
++ * \ingroup LinkedList
++ */
++#define listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxList )        ( ( ( pxList )->xListEnd ).pxNext->xItemValue )
++
++/*
++ * Return the list item at the head of the list.
++ *
++ * \page listGET_HEAD_ENTRY listGET_HEAD_ENTRY
++ * \ingroup LinkedList
++ */
++#define listGET_HEAD_ENTRY( pxList )                      ( ( ( pxList )->xListEnd ).pxNext )
++
++/*
++ * Return the next list item.
++ *
++ * \page listGET_NEXT listGET_NEXT
++ * \ingroup LinkedList
++ */
++#define listGET_NEXT( pxListItem )                        ( ( pxListItem )->pxNext )
++
++/*
++ * Return the list item that marks the end of the list
++ *
++ * \page listGET_END_MARKER listGET_END_MARKER
++ * \ingroup LinkedList
++ */
++#define listGET_END_MARKER( pxList )                      ( ( ListItem_t const * ) ( &( ( pxList )->xListEnd ) ) )
++
++/*
++ * Access macro to determine if a list contains any items.  The macro will
++ * only have the value true if the list is empty.
++ *
++ * \page listLIST_IS_EMPTY listLIST_IS_EMPTY
++ * \ingroup LinkedList
++ */
++#define listLIST_IS_EMPTY( pxList )                       ( ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ? pdTRUE : pdFALSE )
++
++/*
++ * Access macro to return the number of items in the list.
++ */
++#define listCURRENT_LIST_LENGTH( pxList )                 ( ( pxList )->uxNumberOfItems )
++
++/*
++ * Access function to obtain the owner of the next entry in a list.
++ *
++ * The list member pxIndex is used to walk through a list.  Calling
++ * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list
++ * and returns that entry's pxOwner parameter.  Using multiple calls to this
++ * function it is therefore possible to move through every item contained in
++ * a list.
++ *
++ * The pxOwner parameter of a list item is a pointer to the object that owns
++ * the list item.  In the scheduler this is normally a task control block.
++ * The pxOwner parameter effectively creates a two way link between the list
++ * item and its owner.
++ *
++ * @param pxTCB pxTCB is set to the address of the owner of the next list item.
++ * @param pxList The list from which the next item owner is to be returned.
++ *
++ * \page listGET_OWNER_OF_NEXT_ENTRY listGET_OWNER_OF_NEXT_ENTRY
++ * \ingroup LinkedList
++ */
++#define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList )                                           \
++    {                                                                                          \
++        List_t * const pxConstList = ( pxList );                                               \
++        /* Increment the index to the next item and return the item, ensuring */               \
++        /* we don't return the marker used at the end of the list.  */                         \
++        ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext;                           \
++        if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \
++        {                                                                                      \
++            ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext;                       \
++        }                                                                                      \
++        ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner;                                         \
++    }
++
++
++/*
++ * Access function to obtain the owner of the first entry in a list.  Lists
++ * are normally sorted in ascending item value order.
++ *
++ * This function returns the pxOwner member of the first item in the list.
++ * The pxOwner parameter of a list item is a pointer to the object that owns
++ * the list item.  In the scheduler this is normally a task control block.
++ * The pxOwner parameter effectively creates a two way link between the list
++ * item and its owner.
++ *
++ * @param pxList The list from which the owner of the head item is to be
++ * returned.
++ *
++ * \page listGET_OWNER_OF_HEAD_ENTRY listGET_OWNER_OF_HEAD_ENTRY
++ * \ingroup LinkedList
++ */
++#define listGET_OWNER_OF_HEAD_ENTRY( pxList )            ( ( &( ( pxList )->xListEnd ) )->pxNext->pvOwner )
++
++/*
++ * Check to see if a list item is within a list.  The list item maintains a
++ * "container" pointer that points to the list it is in.  All this macro does
++ * is check to see if the container and the list match.
++ *
++ * @param pxList The list we want to know if the list item is within.
++ * @param pxListItem The list item we want to know if is in the list.
++ * @return pdTRUE if the list item is in the list, otherwise pdFALSE.
++ */
++#define listIS_CONTAINED_WITHIN( pxList, pxListItem )    ( ( ( pxListItem )->pxContainer == ( pxList ) ) ? ( pdTRUE ) : ( pdFALSE ) )
++
++/*
++ * Return the list a list item is contained within (referenced from).
++ *
++ * @param pxListItem The list item being queried.
++ * @return A pointer to the List_t object that references the pxListItem
++ */
++#define listLIST_ITEM_CONTAINER( pxListItem )            ( ( pxListItem )->pxContainer )
++
++/*
++ * This provides a crude means of knowing if a list has been initialised, as
++ * pxList->xListEnd.xItemValue is set to portMAX_DELAY by the vListInitialise()
++ * function.
++ */
++#define listLIST_IS_INITIALISED( pxList )                ( ( pxList )->xListEnd.xItemValue == portMAX_DELAY )
++
++/*
++ * Must be called before a list is used!  This initialises all the members
++ * of the list structure and inserts the xListEnd item into the list as a
++ * marker to the back of the list.
++ *
++ * @param pxList Pointer to the list being initialised.
++ *
++ * \page vListInitialise vListInitialise
++ * \ingroup LinkedList
++ */
++void vListInitialise( List_t * const pxList );
++
++/*
++ * Must be called before a list item is used.  This sets the list container to
++ * null so the item does not think that it is already contained in a list.
++ *
++ * @param pxItem Pointer to the list item being initialised.
++ *
++ * \page vListInitialiseItem vListInitialiseItem
++ * \ingroup LinkedList
++ */
++void vListInitialiseItem( ListItem_t * const pxItem );
++
++/*
++ * Insert a list item into a list.  The item will be inserted into the list in
++ * a position determined by its item value (descending item value order).
++ *
++ * @param pxList The list into which the item is to be inserted.
++ *
++ * @param pxNewListItem The item that is to be placed in the list.
++ *
++ * \page vListInsert vListInsert
++ * \ingroup LinkedList
++ */
++void vListInsert( List_t * const pxList,
++                  ListItem_t * const pxNewListItem );
++
++/*
++ * Insert a list item into a list.  The item will be inserted in a position
++ * such that it will be the last item within the list returned by multiple
++ * calls to listGET_OWNER_OF_NEXT_ENTRY.
++ *
++ * The list member pxIndex is used to walk through a list.  Calling
++ * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list.
++ * Placing an item in a list using vListInsertEnd effectively places the item
++ * in the list position pointed to by pxIndex.  This means that every other
++ * item within the list will be returned by listGET_OWNER_OF_NEXT_ENTRY before
++ * the pxIndex parameter again points to the item being inserted.
++ *
++ * @param pxList The list into which the item is to be inserted.
++ *
++ * @param pxNewListItem The list item to be inserted into the list.
++ *
++ * \page vListInsertEnd vListInsertEnd
++ * \ingroup LinkedList
++ */
++void vListInsertEnd( List_t * const pxList,
++                     ListItem_t * const pxNewListItem );
++
++/*
++ * Remove an item from a list.  The list item has a pointer to the list that
++ * it is in, so only the list item need be passed into the function.
++ *
++ * @param uxListRemove The item to be removed.  The item will remove itself from
++ * the list pointed to by it's pxContainer parameter.
++ *
++ * @return The number of items that remain in the list after the list item has
++ * been removed.
++ *
++ * \page uxListRemove uxListRemove
++ * \ingroup LinkedList
++ */
++UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove );
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    }
++#endif
++/* *INDENT-ON* */
++
++#endif /* ifndef LIST_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/portable.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/portable.h
+new file mode 100644
+index 0000000000..f0b56ae166
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/portable.h
+@@ -0,0 +1,141 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++/*-----------------------------------------------------------
++* Portable layer API.  Each function must be defined for each port.
++*----------------------------------------------------------*/
++
++#ifndef PORTABLE_H
++#define PORTABLE_H
++
++#include "freertos/portmacro.h"
++
++#if portBYTE_ALIGNMENT == 32
++    #define portBYTE_ALIGNMENT_MASK    ( 0x001f )
++#elif portBYTE_ALIGNMENT == 16
++    #define portBYTE_ALIGNMENT_MASK    ( 0x000f )
++#elif portBYTE_ALIGNMENT == 8
++    #define portBYTE_ALIGNMENT_MASK    ( 0x0007 )
++#elif portBYTE_ALIGNMENT == 4
++    #define portBYTE_ALIGNMENT_MASK    ( 0x0003 )
++#elif portBYTE_ALIGNMENT == 2
++    #define portBYTE_ALIGNMENT_MASK    ( 0x0001 )
++#elif portBYTE_ALIGNMENT == 1
++    #define portBYTE_ALIGNMENT_MASK    ( 0x0000 )
++#else /* if portBYTE_ALIGNMENT == 32 */
++    #error "Invalid portBYTE_ALIGNMENT definition"
++#endif /* if portBYTE_ALIGNMENT == 32 */
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    extern "C" {
++#endif
++/* *INDENT-ON* */
++
++#ifdef configUSE_FREERTOS_PROVIDED_HEAP
++
++/* Used by heap_5.c to define the start address and size of each memory region
++ * that together comprise the total FreeRTOS heap space. */
++typedef struct HeapRegion
++{
++    uint8_t * pucStartAddress;
++    size_t xSizeInBytes;
++} HeapRegion_t;
++
++/* Used to pass information about the heap out of vPortGetHeapStats(). */
++typedef struct xHeapStats
++{
++    size_t xAvailableHeapSpaceInBytes;          /* The total heap size currently available - this is the sum of all the free blocks, not the largest block that can be allocated. */
++    size_t xSizeOfLargestFreeBlockInBytes;      /* The maximum size, in bytes, of all the free blocks within the heap at the time vPortGetHeapStats() is called. */
++    size_t xSizeOfSmallestFreeBlockInBytes;     /* The minimum size, in bytes, of all the free blocks within the heap at the time vPortGetHeapStats() is called. */
++    size_t xNumberOfFreeBlocks;                 /* The number of free memory blocks within the heap at the time vPortGetHeapStats() is called. */
++    size_t xMinimumEverFreeBytesRemaining;      /* The minimum amount of total free memory (sum of all free blocks) there has been in the heap since the system booted. */
++    size_t xNumberOfSuccessfulAllocations;      /* The number of calls to pvPortMalloc() that have returned a valid memory block. */
++    size_t xNumberOfSuccessfulFrees;            /* The number of calls to vPortFree() that has successfully freed a block of memory. */
++} HeapStats_t;
++
++/*
++ * Used to define multiple heap regions for use by heap_5.c.  This function
++ * must be called before any calls to pvPortMalloc() - not creating a task,
++ * queue, semaphore, mutex, software timer, event group, etc. will result in
++ * pvPortMalloc being called.
++ *
++ * pxHeapRegions passes in an array of HeapRegion_t structures - each of which
++ * defines a region of memory that can be used as the heap.  The array is
++ * terminated by a HeapRegions_t structure that has a size of 0.  The region
++ * with the lowest start address must appear first in the array.
++ */
++void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) PRIVILEGED_FUNCTION;
++
++/*
++ * Returns a HeapStats_t structure filled with information about the current
++ * heap state.
++ */
++void vPortGetHeapStats( HeapStats_t * pxHeapStats );
++
++/*
++ * Map to the memory management routines required for the port.
++ */
++void * pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION;
++void vPortFree( void * pv ) PRIVILEGED_FUNCTION;
++void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION;
++size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION;
++size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION;
++
++#if( configSTACK_ALLOCATION_FROM_SEPARATE_HEAP == 1 )
++    void *pvPortMallocStack( size_t xSize ) PRIVILEGED_FUNCTION;
++    void vPortFreeStack( void *pv ) PRIVILEGED_FUNCTION;
++#else
++    #define pvPortMallocStack pvPortMalloc
++    #define vPortFreeStack vPortFree
++#endif
++#else  // configUSE_FREERTOS_PROVIDED_HEAP
++
++/*
++ * Map to the memory management routines required for the port.
++ *
++ * Note that libc standard malloc/free are also available for
++ * non-FreeRTOS-specific code, and behave the same as
++ * pvPortMalloc()/vPortFree().
++ */
++#define pvPortMalloc malloc
++#define vPortFree free
++#define xPortGetFreeHeapSize esp_get_free_heap_size
++#define xPortGetMinimumEverFreeHeapSize esp_get_minimum_free_heap_size
++
++#endif
++
++void vPortEndScheduler( void );
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    }
++#endif
++/* *INDENT-ON* */
++
++#endif /* PORTABLE_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/projdefs.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/projdefs.h
+new file mode 100644
+index 0000000000..8b7c01bee7
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/projdefs.h
+@@ -0,0 +1,64 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++#ifndef PROJDEFS_H
++#define PROJDEFS_H
++
++/*
++ * Defines the prototype to which task functions must conform.  Defined in this
++ * file to ensure the type is known before portable.h is included.
++ */
++typedef void (* TaskFunction_t)( void * );
++
++/* Converts a time in milliseconds to a time in ticks.  This macro can be
++ * overridden by a macro of the same name defined in FreeRTOSConfig.h in case the
++ * definition here is not suitable for your application. */
++#ifndef pdMS_TO_TICKS
++    #define pdMS_TO_TICKS( xTimeInMs )    ( ( TickType_t ) rt_tick_from_millisecond( (rt_int32_t) xTimeInMs ) )
++#endif
++
++#ifdef ESP_PLATFORM
++#ifndef pdTICKS_TO_MS
++    #define pdTICKS_TO_MS( xTicks )   ( ( uint32_t ) ( xTicks ) * 1000 / configTICK_RATE_HZ )
++#endif
++#endif // ESP_PLATFORM
++
++#define pdFALSE                                  ( ( BaseType_t ) 0 )
++#define pdTRUE                                   ( ( BaseType_t ) 1 )
++
++#define pdPASS                                   ( pdTRUE )
++#define pdFAIL                                   ( pdFALSE )
++#define errQUEUE_EMPTY                           ( ( BaseType_t ) 0 )
++#define errQUEUE_FULL                            ( ( BaseType_t ) 0 )
++
++/* FreeRTOS error definitions. */
++#define errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY    ( -1 )
++#define errQUEUE_BLOCKED                         ( -4 )
++#define errQUEUE_YIELD                           ( -5 )
++
++#endif /* PROJDEFS_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/queue.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/queue.h
+new file mode 100644
+index 0000000000..57fe10ee8d
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/queue.h
+@@ -0,0 +1,1188 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++
++#ifndef QUEUE_H
++#define QUEUE_H
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    extern "C" {
++#endif
++/* *INDENT-ON* */
++
++#include "task.h"
++
++/**
++ * Type by which queues are referenced.  For example, a call to xQueueCreate()
++ * returns an QueueHandle_t variable that can then be used as a parameter to
++ * xQueueSend(), xQueueReceive(), etc.
++ */
++struct QueueDefinition; /* Using old naming convention so as not to break kernel aware debuggers. */
++typedef struct QueueDefinition   * QueueHandle_t;
++
++/* For internal use only. */
++#define queueSEND_TO_BACK                     ( ( BaseType_t ) 0 )
++#define queueSEND_TO_FRONT                    ( ( BaseType_t ) 1 )
++#define queueOVERWRITE                        ( ( BaseType_t ) 2 )
++
++/* For internal use only.  These definitions *must* match those in queue.c. */
++#define queueQUEUE_TYPE_BASE                  ( ( uint8_t ) 0U )
++#define queueQUEUE_TYPE_SET                   ( ( uint8_t ) 0U )
++#define queueQUEUE_TYPE_MUTEX                 ( ( uint8_t ) 1U )
++#define queueQUEUE_TYPE_COUNTING_SEMAPHORE    ( ( uint8_t ) 2U )
++#define queueQUEUE_TYPE_BINARY_SEMAPHORE      ( ( uint8_t ) 3U )
++#define queueQUEUE_TYPE_RECURSIVE_MUTEX       ( ( uint8_t ) 4U )
++
++/**
++ * queue. h
++ * @code{c}
++ * QueueHandle_t xQueueCreate(
++ *                            UBaseType_t uxQueueLength,
++ *                            UBaseType_t uxItemSize
++ *                        );
++ * @endcode
++ *
++ * Creates a new queue instance, and returns a handle by which the new queue
++ * can be referenced.
++ *
++ * Internally, within the FreeRTOS implementation, queues use two blocks of
++ * memory.  The first block is used to hold the queue's data structures.  The
++ * second block is used to hold items placed into the queue.  If a queue is
++ * created using xQueueCreate() then both blocks of memory are automatically
++ * dynamically allocated inside the xQueueCreate() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a queue is created using
++ * xQueueCreateStatic() then the application writer must provide the memory that
++ * will get used by the queue.  xQueueCreateStatic() therefore allows a queue to
++ * be created without using any dynamic memory allocation.
++ *
++ * https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
++ *
++ * @param uxQueueLength The maximum number of items that the queue can contain.
++ *
++ * @param uxItemSize The number of bytes each item in the queue will require.
++ * Items are queued by copy, not by reference, so this is the number of bytes
++ * that will be copied for each posted item.  Each item on the queue must be
++ * the same size.
++ *
++ * @return If the queue is successfully create then a handle to the newly
++ * created queue is returned.  If the queue cannot be created then 0 is
++ * returned.
++ *
++ * Example usage:
++ * @code{c}
++ * struct AMessage
++ * {
++ *  char ucMessageID;
++ *  char ucData[ 20 ];
++ * };
++ *
++ * void vATask( void *pvParameters )
++ * {
++ * QueueHandle_t xQueue1, xQueue2;
++ *
++ *  // Create a queue capable of containing 10 uint32_t values.
++ *  xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
++ *  if( xQueue1 == 0 )
++ *  {
++ *      // Queue was not created and must not be used.
++ *  }
++ *
++ *  // Create a queue capable of containing 10 pointers to AMessage structures.
++ *  // These should be passed by pointer as they contain a lot of data.
++ *  xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
++ *  if( xQueue2 == 0 )
++ *  {
++ *      // Queue was not created and must not be used.
++ *  }
++ *
++ *  // ... Rest of task code.
++ * }
++ * @endcode
++ * \defgroup xQueueCreate xQueueCreate
++ * \ingroup QueueManagement
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    #define xQueueCreate( uxQueueLength, uxItemSize )    xQueueGenericCreate( ( uxQueueLength ), ( uxItemSize ), ( queueQUEUE_TYPE_BASE ) )
++#endif
++
++/**
++ * queue. h
++ * @code{c}
++ * QueueHandle_t xQueueCreateStatic(
++ *                            UBaseType_t uxQueueLength,
++ *                            UBaseType_t uxItemSize,
++ *                            uint8_t *pucQueueStorage,
++ *                            StaticQueue_t *pxQueueBuffer
++ *                        );
++ * @endcode
++ *
++ * Creates a new queue instance, and returns a handle by which the new queue
++ * can be referenced.
++ *
++ * Internally, within the FreeRTOS implementation, queues use two blocks of
++ * memory.  The first block is used to hold the queue's data structures.  The
++ * second block is used to hold items placed into the queue.  If a queue is
++ * created using xQueueCreate() then both blocks of memory are automatically
++ * dynamically allocated inside the xQueueCreate() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a queue is created using
++ * xQueueCreateStatic() then the application writer must provide the memory that
++ * will get used by the queue.  xQueueCreateStatic() therefore allows a queue to
++ * be created without using any dynamic memory allocation.
++ *
++ * https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
++ *
++ * @param uxQueueLength The maximum number of items that the queue can contain.
++ *
++ * @param uxItemSize The number of bytes each item in the queue will require.
++ * Items are queued by copy, not by reference, so this is the number of bytes
++ * that will be copied for each posted item.  Each item on the queue must be
++ * the same size.
++ *
++ * @param pucQueueStorage If uxItemSize is not zero then
++ * pucQueueStorage must point to a uint8_t array that is at least large
++ * enough to hold the maximum number of items that can be in the queue at any
++ * one time - which is ( uxQueueLength * uxItemsSize ) bytes.  If uxItemSize is
++ * zero then pucQueueStorage can be NULL.
++ *
++ * @param pxQueueBuffer Must point to a variable of type StaticQueue_t, which
++ * will be used to hold the queue's data structure.
++ *
++ * @return If the queue is created then a handle to the created queue is
++ * returned.  If pxQueueBuffer is NULL then NULL is returned.
++ *
++ * Example usage:
++ * @code{c}
++ * struct AMessage
++ * {
++ *  char ucMessageID;
++ *  char ucData[ 20 ];
++ * };
++ *
++ #define QUEUE_LENGTH 10
++ #define ITEM_SIZE sizeof( uint32_t )
++ *
++ * // xQueueBuffer will hold the queue structure.
++ * StaticQueue_t xQueueBuffer;
++ *
++ * // ucQueueStorage will hold the items posted to the queue.  Must be at least
++ * // [(queue length) * ( queue item size)] bytes long.
++ * uint8_t ucQueueStorage[ QUEUE_LENGTH * ITEM_SIZE ];
++ *
++ * void vATask( void *pvParameters )
++ * {
++ *  QueueHandle_t xQueue1;
++ *
++ *  // Create a queue capable of containing 10 uint32_t values.
++ *  xQueue1 = xQueueCreate( QUEUE_LENGTH, // The number of items the queue can hold.
++ *                          ITEM_SIZE     // The size of each item in the queue
++ *                          &( ucQueueStorage[ 0 ] ), // The buffer that will hold the items in the queue.
++ *                          &xQueueBuffer ); // The buffer that will hold the queue structure.
++ *
++ *  // The queue is guaranteed to be created successfully as no dynamic memory
++ *  // allocation is used.  Therefore xQueue1 is now a handle to a valid queue.
++ *
++ *  // ... Rest of task code.
++ * }
++ * @endcode
++ * \defgroup xQueueCreateStatic xQueueCreateStatic
++ * \ingroup QueueManagement
++ */
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++    #define xQueueCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer )    xQueueGenericCreateStatic( ( uxQueueLength ), ( uxItemSize ), ( pucQueueStorage ), ( pxQueueBuffer ), ( queueQUEUE_TYPE_BASE ) )
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueSendToToFront(
++ *                                 QueueHandle_t    xQueue,
++ *                                 const void       *pvItemToQueue,
++ *                                 TickType_t       xTicksToWait
++ *                             );
++ * @endcode
++ *
++ * Post an item to the front of a queue.  The item is queued by copy, not by
++ * reference.  This function must not be called from an interrupt service
++ * routine.  See xQueueSendFromISR () for an alternative which may be used
++ * in an ISR.
++ *
++ * @param xQueue The handle to the queue on which the item is to be posted.
++ *
++ * @param pvItemToQueue A pointer to the item that is to be placed on the
++ * queue.  The size of the items the queue will hold was defined when the
++ * queue was created, so this many bytes will be copied from pvItemToQueue
++ * into the queue storage area.
++ *
++ * @param xTicksToWait The maximum amount of time the task should block
++ * waiting for space to become available on the queue, should it already
++ * be full.  The call will return immediately if this is set to 0 and the
++ * queue is full.  The time is defined in tick periods so the constant
++ * portTICK_PERIOD_MS should be used to convert to real time if this is required.
++ *
++ * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
++ *
++ * Example usage:
++ * @code{c}
++ * struct AMessage
++ * {
++ *  char ucMessageID;
++ *  char ucData[ 20 ];
++ * } xMessage;
++ *
++ * uint32_t ulVar = 10UL;
++ *
++ * void vATask( void *pvParameters )
++ * {
++ * QueueHandle_t xQueue1, xQueue2;
++ * struct AMessage *pxMessage;
++ *
++ *  // Create a queue capable of containing 10 uint32_t values.
++ *  xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
++ *
++ *  // Create a queue capable of containing 10 pointers to AMessage structures.
++ *  // These should be passed by pointer as they contain a lot of data.
++ *  xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
++ *
++ *  // ...
++ *
++ *  if( xQueue1 != 0 )
++ *  {
++ *      // Send an uint32_t.  Wait for 10 ticks for space to become
++ *      // available if necessary.
++ *      if( xQueueSendToFront( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
++ *      {
++ *          // Failed to post the message, even after 10 ticks.
++ *      }
++ *  }
++ *
++ *  if( xQueue2 != 0 )
++ *  {
++ *      // Send a pointer to a struct AMessage object.  Don't block if the
++ *      // queue is already full.
++ *      pxMessage = & xMessage;
++ *      xQueueSendToFront( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
++ *  }
++ *
++ *  // ... Rest of task code.
++ * }
++ * @endcode
++ * \defgroup xQueueSend xQueueSend
++ * \ingroup QueueManagement
++ */
++#define xQueueSendToFront( xQueue, pvItemToQueue, xTicksToWait ) \
++    xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT )
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueSendToBack(
++ *                                 QueueHandle_t    xQueue,
++ *                                 const void       *pvItemToQueue,
++ *                                 TickType_t       xTicksToWait
++ *                             );
++ * @endcode
++ *
++ * This is a macro that calls xQueueGenericSend().
++ *
++ * Post an item to the back of a queue.  The item is queued by copy, not by
++ * reference.  This function must not be called from an interrupt service
++ * routine.  See xQueueSendFromISR () for an alternative which may be used
++ * in an ISR.
++ *
++ * @param xQueue The handle to the queue on which the item is to be posted.
++ *
++ * @param pvItemToQueue A pointer to the item that is to be placed on the
++ * queue.  The size of the items the queue will hold was defined when the
++ * queue was created, so this many bytes will be copied from pvItemToQueue
++ * into the queue storage area.
++ *
++ * @param xTicksToWait The maximum amount of time the task should block
++ * waiting for space to become available on the queue, should it already
++ * be full.  The call will return immediately if this is set to 0 and the queue
++ * is full.  The  time is defined in tick periods so the constant
++ * portTICK_PERIOD_MS should be used to convert to real time if this is required.
++ *
++ * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
++ *
++ * Example usage:
++ * @code{c}
++ * struct AMessage
++ * {
++ *  char ucMessageID;
++ *  char ucData[ 20 ];
++ * } xMessage;
++ *
++ * uint32_t ulVar = 10UL;
++ *
++ * void vATask( void *pvParameters )
++ * {
++ * QueueHandle_t xQueue1, xQueue2;
++ * struct AMessage *pxMessage;
++ *
++ *  // Create a queue capable of containing 10 uint32_t values.
++ *  xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
++ *
++ *  // Create a queue capable of containing 10 pointers to AMessage structures.
++ *  // These should be passed by pointer as they contain a lot of data.
++ *  xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
++ *
++ *  // ...
++ *
++ *  if( xQueue1 != 0 )
++ *  {
++ *      // Send an uint32_t.  Wait for 10 ticks for space to become
++ *      // available if necessary.
++ *      if( xQueueSendToBack( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
++ *      {
++ *          // Failed to post the message, even after 10 ticks.
++ *      }
++ *  }
++ *
++ *  if( xQueue2 != 0 )
++ *  {
++ *      // Send a pointer to a struct AMessage object.  Don't block if the
++ *      // queue is already full.
++ *      pxMessage = & xMessage;
++ *      xQueueSendToBack( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
++ *  }
++ *
++ *  // ... Rest of task code.
++ * }
++ * @endcode
++ * \defgroup xQueueSend xQueueSend
++ * \ingroup QueueManagement
++ */
++#define xQueueSendToBack( xQueue, pvItemToQueue, xTicksToWait ) \
++    xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueSend(
++ *                            QueueHandle_t xQueue,
++ *                            const void * pvItemToQueue,
++ *                            TickType_t xTicksToWait
++ *                       );
++ * @endcode
++ *
++ * This is a macro that calls xQueueGenericSend().  It is included for
++ * backward compatibility with versions of FreeRTOS.org that did not
++ * include the xQueueSendToFront() and xQueueSendToBack() macros.  It is
++ * equivalent to xQueueSendToBack().
++ *
++ * Post an item on a queue.  The item is queued by copy, not by reference.
++ * This function must not be called from an interrupt service routine.
++ * See xQueueSendFromISR () for an alternative which may be used in an ISR.
++ *
++ * @param xQueue The handle to the queue on which the item is to be posted.
++ *
++ * @param pvItemToQueue A pointer to the item that is to be placed on the
++ * queue.  The size of the items the queue will hold was defined when the
++ * queue was created, so this many bytes will be copied from pvItemToQueue
++ * into the queue storage area.
++ *
++ * @param xTicksToWait The maximum amount of time the task should block
++ * waiting for space to become available on the queue, should it already
++ * be full.  The call will return immediately if this is set to 0 and the
++ * queue is full.  The time is defined in tick periods so the constant
++ * portTICK_PERIOD_MS should be used to convert to real time if this is required.
++ *
++ * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
++ *
++ * Example usage:
++ * @code{c}
++ * struct AMessage
++ * {
++ *  char ucMessageID;
++ *  char ucData[ 20 ];
++ * } xMessage;
++ *
++ * uint32_t ulVar = 10UL;
++ *
++ * void vATask( void *pvParameters )
++ * {
++ * QueueHandle_t xQueue1, xQueue2;
++ * struct AMessage *pxMessage;
++ *
++ *  // Create a queue capable of containing 10 uint32_t values.
++ *  xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
++ *
++ *  // Create a queue capable of containing 10 pointers to AMessage structures.
++ *  // These should be passed by pointer as they contain a lot of data.
++ *  xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
++ *
++ *  // ...
++ *
++ *  if( xQueue1 != 0 )
++ *  {
++ *      // Send an uint32_t.  Wait for 10 ticks for space to become
++ *      // available if necessary.
++ *      if( xQueueSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
++ *      {
++ *          // Failed to post the message, even after 10 ticks.
++ *      }
++ *  }
++ *
++ *  if( xQueue2 != 0 )
++ *  {
++ *      // Send a pointer to a struct AMessage object.  Don't block if the
++ *      // queue is already full.
++ *      pxMessage = & xMessage;
++ *      xQueueSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
++ *  }
++ *
++ *  // ... Rest of task code.
++ * }
++ * @endcode
++ * \defgroup xQueueSend xQueueSend
++ * \ingroup QueueManagement
++ */
++#define xQueueSend( xQueue, pvItemToQueue, xTicksToWait ) \
++    xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueGenericSend(
++ *                                  QueueHandle_t xQueue,
++ *                                  const void * pvItemToQueue,
++ *                                  TickType_t xTicksToWait
++ *                                  BaseType_t xCopyPosition
++ *                              );
++ * @endcode
++ *
++ * It is preferred that the macros xQueueSend(), xQueueSendToFront() and
++ * xQueueSendToBack() are used in place of calling this function directly.
++ *
++ * Post an item on a queue.  The item is queued by copy, not by reference.
++ * This function must not be called from an interrupt service routine.
++ * See xQueueSendFromISR () for an alternative which may be used in an ISR.
++ *
++ * @param xQueue The handle to the queue on which the item is to be posted.
++ *
++ * @param pvItemToQueue A pointer to the item that is to be placed on the
++ * queue.  The size of the items the queue will hold was defined when the
++ * queue was created, so this many bytes will be copied from pvItemToQueue
++ * into the queue storage area.
++ *
++ * @param xTicksToWait The maximum amount of time the task should block
++ * waiting for space to become available on the queue, should it already
++ * be full.  The call will return immediately if this is set to 0 and the
++ * queue is full.  The time is defined in tick periods so the constant
++ * portTICK_PERIOD_MS should be used to convert to real time if this is required.
++ *
++ * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the
++ * item at the back of the queue, or queueSEND_TO_FRONT to place the item
++ * at the front of the queue (for high priority messages).
++ *
++ * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
++ *
++ * Example usage:
++ * @code{c}
++ * struct AMessage
++ * {
++ *  char ucMessageID;
++ *  char ucData[ 20 ];
++ * } xMessage;
++ *
++ * uint32_t ulVar = 10UL;
++ *
++ * void vATask( void *pvParameters )
++ * {
++ * QueueHandle_t xQueue1, xQueue2;
++ * struct AMessage *pxMessage;
++ *
++ *  // Create a queue capable of containing 10 uint32_t values.
++ *  xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
++ *
++ *  // Create a queue capable of containing 10 pointers to AMessage structures.
++ *  // These should be passed by pointer as they contain a lot of data.
++ *  xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
++ *
++ *  // ...
++ *
++ *  if( xQueue1 != 0 )
++ *  {
++ *      // Send an uint32_t.  Wait for 10 ticks for space to become
++ *      // available if necessary.
++ *      if( xQueueGenericSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10, queueSEND_TO_BACK ) != pdPASS )
++ *      {
++ *          // Failed to post the message, even after 10 ticks.
++ *      }
++ *  }
++ *
++ *  if( xQueue2 != 0 )
++ *  {
++ *      // Send a pointer to a struct AMessage object.  Don't block if the
++ *      // queue is already full.
++ *      pxMessage = & xMessage;
++ *      xQueueGenericSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0, queueSEND_TO_BACK );
++ *  }
++ *
++ *  // ... Rest of task code.
++ * }
++ * @endcode
++ * \defgroup xQueueSend xQueueSend
++ * \ingroup QueueManagement
++ */
++BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
++                              const void * const pvItemToQueue,
++                              TickType_t xTicksToWait,
++                              const BaseType_t xCopyPosition );
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueReceive(
++ *                               QueueHandle_t xQueue,
++ *                               void *pvBuffer,
++ *                               TickType_t xTicksToWait
++ *                          );
++ * @endcode
++ *
++ * Receive an item from a queue.  The item is received by copy so a buffer of
++ * adequate size must be provided.  The number of bytes copied into the buffer
++ * was defined when the queue was created.
++ *
++ * Successfully received items are removed from the queue.
++ *
++ * This function must not be used in an interrupt service routine.  See
++ * xQueueReceiveFromISR for an alternative that can.
++ *
++ * @param xQueue The handle to the queue from which the item is to be
++ * received.
++ *
++ * @param pvBuffer Pointer to the buffer into which the received item will
++ * be copied.
++ *
++ * @param xTicksToWait The maximum amount of time the task should block
++ * waiting for an item to receive should the queue be empty at the time
++ * of the call. xQueueReceive() will return immediately if xTicksToWait
++ * is zero and the queue is empty.  The time is defined in tick periods so the
++ * constant portTICK_PERIOD_MS should be used to convert to real time if this is
++ * required.
++ *
++ * @return pdTRUE if an item was successfully received from the queue,
++ * otherwise pdFALSE.
++ *
++ * Example usage:
++ * @code{c}
++ * struct AMessage
++ * {
++ *  char ucMessageID;
++ *  char ucData[ 20 ];
++ * } xMessage;
++ *
++ * QueueHandle_t xQueue;
++ *
++ * // Task to create a queue and post a value.
++ * void vATask( void *pvParameters )
++ * {
++ * struct AMessage *pxMessage;
++ *
++ *  // Create a queue capable of containing 10 pointers to AMessage structures.
++ *  // These should be passed by pointer as they contain a lot of data.
++ *  xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
++ *  if( xQueue == 0 )
++ *  {
++ *      // Failed to create the queue.
++ *  }
++ *
++ *  // ...
++ *
++ *  // Send a pointer to a struct AMessage object.  Don't block if the
++ *  // queue is already full.
++ *  pxMessage = & xMessage;
++ *  xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
++ *
++ *  // ... Rest of task code.
++ * }
++ *
++ * // Task to receive from the queue.
++ * void vADifferentTask( void *pvParameters )
++ * {
++ * struct AMessage *pxRxedMessage;
++ *
++ *  if( xQueue != 0 )
++ *  {
++ *      // Receive a message on the created queue.  Block for 10 ticks if a
++ *      // message is not immediately available.
++ *      if( xQueueReceive( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
++ *      {
++ *          // pcRxedMessage now points to the struct AMessage variable posted
++ *          // by vATask.
++ *      }
++ *  }
++ *
++ *  // ... Rest of task code.
++ * }
++ * @endcode
++ * \defgroup xQueueReceive xQueueReceive
++ * \ingroup QueueManagement
++ */
++BaseType_t xQueueReceive( QueueHandle_t xQueue,
++                          void * const pvBuffer,
++                          TickType_t xTicksToWait );
++
++/**
++ * queue. h
++ * @code{c}
++ * UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );
++ * @endcode
++ *
++ * Return the number of messages stored in a queue.
++ *
++ * @param xQueue A handle to the queue being queried.
++ *
++ * @return The number of messages available in the queue.
++ *
++ * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
++ * \ingroup QueueManagement
++ */
++UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );
++
++/**
++ * queue. h
++ * @code{c}
++ * UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );
++ * @endcode
++ *
++ * Return the number of free spaces available in a queue.  This is equal to the
++ * number of items that can be sent to the queue before the queue becomes full
++ * if no items are removed.
++ *
++ * @param xQueue A handle to the queue being queried.
++ *
++ * @return The number of spaces available in the queue.
++ *
++ * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
++ * \ingroup QueueManagement
++ */
++UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );
++
++/**
++ * queue. h
++ * @code{c}
++ * void vQueueDelete( QueueHandle_t xQueue );
++ * @endcode
++ *
++ * Delete a queue - freeing all the memory allocated for storing of items
++ * placed on the queue.
++ *
++ * @param xQueue A handle to the queue to be deleted.
++ *
++ * \defgroup vQueueDelete vQueueDelete
++ * \ingroup QueueManagement
++ */
++void vQueueDelete( QueueHandle_t xQueue );
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueSendToFrontFromISR(
++ *                                       QueueHandle_t xQueue,
++ *                                       const void *pvItemToQueue,
++ *                                       BaseType_t *pxHigherPriorityTaskWoken
++ *                                    );
++ * @endcode
++ *
++ * This is a macro that calls xQueueGenericSendFromISR().
++ *
++ * Post an item to the front of a queue.  It is safe to use this macro from
++ * within an interrupt service routine.
++ *
++ * Items are queued by copy not reference so it is preferable to only
++ * queue small items, especially when called from an ISR.  In most cases
++ * it would be preferable to store a pointer to the item being queued.
++ *
++ * @param xQueue The handle to the queue on which the item is to be posted.
++ *
++ * @param pvItemToQueue A pointer to the item that is to be placed on the
++ * queue.  The size of the items the queue will hold was defined when the
++ * queue was created, so this many bytes will be copied from pvItemToQueue
++ * into the queue storage area.
++ *
++ * @param pxHigherPriorityTaskWoken xQueueSendToFrontFromISR() will set
++ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
++ * to unblock, and the unblocked task has a priority higher than the currently
++ * running task.  If xQueueSendToFromFromISR() sets this value to pdTRUE then
++ * a context switch should be requested before the interrupt is exited.
++ *
++ * @return pdTRUE if the data was successfully sent to the queue, otherwise
++ * errQUEUE_FULL.
++ *
++ * Example usage for buffered IO (where the ISR can obtain more than one value
++ * per call):
++ * @code{c}
++ * void vBufferISR( void )
++ * {
++ * char cIn;
++ * BaseType_t xHigherPrioritTaskWoken;
++ *
++ *  // We have not woken a task at the start of the ISR.
++ *  xHigherPriorityTaskWoken = pdFALSE;
++ *
++ *  // Loop until the buffer is empty.
++ *  do
++ *  {
++ *      // Obtain a byte from the buffer.
++ *      cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
++ *
++ *      // Post the byte.
++ *      xQueueSendToFrontFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
++ *
++ *  } while( portINPUT_BYTE( BUFFER_COUNT ) );
++ *
++ *  // Now the buffer is empty we can switch context if necessary.
++ *  if( xHigherPriorityTaskWoken )
++ *  {
++ *      taskYIELD ();
++ *  }
++ * }
++ * @endcode
++ *
++ * \defgroup xQueueSendFromISR xQueueSendFromISR
++ * \ingroup QueueManagement
++ */
++#define xQueueSendToFrontFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) \
++    xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_FRONT )
++
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueSendToBackFromISR(
++ *                                       QueueHandle_t xQueue,
++ *                                       const void *pvItemToQueue,
++ *                                       BaseType_t *pxHigherPriorityTaskWoken
++ *                                    );
++ * @endcode
++ *
++ * This is a macro that calls xQueueGenericSendFromISR().
++ *
++ * Post an item to the back of a queue.  It is safe to use this macro from
++ * within an interrupt service routine.
++ *
++ * Items are queued by copy not reference so it is preferable to only
++ * queue small items, especially when called from an ISR.  In most cases
++ * it would be preferable to store a pointer to the item being queued.
++ *
++ * @param xQueue The handle to the queue on which the item is to be posted.
++ *
++ * @param pvItemToQueue A pointer to the item that is to be placed on the
++ * queue.  The size of the items the queue will hold was defined when the
++ * queue was created, so this many bytes will be copied from pvItemToQueue
++ * into the queue storage area.
++ *
++ * @param pxHigherPriorityTaskWoken xQueueSendToBackFromISR() will set
++ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
++ * to unblock, and the unblocked task has a priority higher than the currently
++ * running task.  If xQueueSendToBackFromISR() sets this value to pdTRUE then
++ * a context switch should be requested before the interrupt is exited.
++ *
++ * @return pdTRUE if the data was successfully sent to the queue, otherwise
++ * errQUEUE_FULL.
++ *
++ * Example usage for buffered IO (where the ISR can obtain more than one value
++ * per call):
++ * @code{c}
++ * void vBufferISR( void )
++ * {
++ * char cIn;
++ * BaseType_t xHigherPriorityTaskWoken;
++ *
++ *  // We have not woken a task at the start of the ISR.
++ *  xHigherPriorityTaskWoken = pdFALSE;
++ *
++ *  // Loop until the buffer is empty.
++ *  do
++ *  {
++ *      // Obtain a byte from the buffer.
++ *      cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
++ *
++ *      // Post the byte.
++ *      xQueueSendToBackFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
++ *
++ *  } while( portINPUT_BYTE( BUFFER_COUNT ) );
++ *
++ *  // Now the buffer is empty we can switch context if necessary.
++ *  if( xHigherPriorityTaskWoken )
++ *  {
++ *      taskYIELD ();
++ *  }
++ * }
++ * @endcode
++ *
++ * \defgroup xQueueSendFromISR xQueueSendFromISR
++ * \ingroup QueueManagement
++ */
++#define xQueueSendToBackFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) \
++    xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK )
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueSendFromISR(
++ *                                   QueueHandle_t xQueue,
++ *                                   const void *pvItemToQueue,
++ *                                   BaseType_t *pxHigherPriorityTaskWoken
++ *                              );
++ * @endcode
++ *
++ * This is a macro that calls xQueueGenericSendFromISR().  It is included
++ * for backward compatibility with versions of FreeRTOS.org that did not
++ * include the xQueueSendToBackFromISR() and xQueueSendToFrontFromISR()
++ * macros.
++ *
++ * Post an item to the back of a queue.  It is safe to use this function from
++ * within an interrupt service routine.
++ *
++ * Items are queued by copy not reference so it is preferable to only
++ * queue small items, especially when called from an ISR.  In most cases
++ * it would be preferable to store a pointer to the item being queued.
++ *
++ * @param xQueue The handle to the queue on which the item is to be posted.
++ *
++ * @param pvItemToQueue A pointer to the item that is to be placed on the
++ * queue.  The size of the items the queue will hold was defined when the
++ * queue was created, so this many bytes will be copied from pvItemToQueue
++ * into the queue storage area.
++ *
++ * @param pxHigherPriorityTaskWoken xQueueSendFromISR() will set
++ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
++ * to unblock, and the unblocked task has a priority higher than the currently
++ * running task.  If xQueueSendFromISR() sets this value to pdTRUE then
++ * a context switch should be requested before the interrupt is exited.
++ *
++ * @return pdTRUE if the data was successfully sent to the queue, otherwise
++ * errQUEUE_FULL.
++ *
++ * Example usage for buffered IO (where the ISR can obtain more than one value
++ * per call):
++ * @code{c}
++ * void vBufferISR( void )
++ * {
++ * char cIn;
++ * BaseType_t xHigherPriorityTaskWoken;
++ *
++ *  // We have not woken a task at the start of the ISR.
++ *  xHigherPriorityTaskWoken = pdFALSE;
++ *
++ *  // Loop until the buffer is empty.
++ *  do
++ *  {
++ *      // Obtain a byte from the buffer.
++ *      cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
++ *
++ *      // Post the byte.
++ *      xQueueSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
++ *
++ *  } while( portINPUT_BYTE( BUFFER_COUNT ) );
++ *
++ *  // Now the buffer is empty we can switch context if necessary.
++ *  if( xHigherPriorityTaskWoken )
++ *  {
++ *      // Actual macro used here is port specific.
++ *      portYIELD_FROM_ISR ();
++ *  }
++ * }
++ * @endcode
++ *
++ * \defgroup xQueueSendFromISR xQueueSendFromISR
++ * \ingroup QueueManagement
++ */
++#define xQueueSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) \
++    xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK )
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueGenericSendFromISR(
++ *                                         QueueHandle_t    xQueue,
++ *                                         const    void    *pvItemToQueue,
++ *                                         BaseType_t  *pxHigherPriorityTaskWoken,
++ *                                         BaseType_t  xCopyPosition
++ *                                     );
++ * @endcode
++ *
++ * It is preferred that the macros xQueueSendFromISR(),
++ * xQueueSendToFrontFromISR() and xQueueSendToBackFromISR() be used in place
++ * of calling this function directly.  xQueueGiveFromISR() is an
++ * equivalent for use by semaphores that don't actually copy any data.
++ *
++ * Post an item on a queue.  It is safe to use this function from within an
++ * interrupt service routine.
++ *
++ * Items are queued by copy not reference so it is preferable to only
++ * queue small items, especially when called from an ISR.  In most cases
++ * it would be preferable to store a pointer to the item being queued.
++ *
++ * @param xQueue The handle to the queue on which the item is to be posted.
++ *
++ * @param pvItemToQueue A pointer to the item that is to be placed on the
++ * queue.  The size of the items the queue will hold was defined when the
++ * queue was created, so this many bytes will be copied from pvItemToQueue
++ * into the queue storage area.
++ *
++ * @param pxHigherPriorityTaskWoken xQueueGenericSendFromISR() will set
++ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
++ * to unblock, and the unblocked task has a priority higher than the currently
++ * running task.  If xQueueGenericSendFromISR() sets this value to pdTRUE then
++ * a context switch should be requested before the interrupt is exited.
++ *
++ * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the
++ * item at the back of the queue, or queueSEND_TO_FRONT to place the item
++ * at the front of the queue (for high priority messages).
++ *
++ * @return pdTRUE if the data was successfully sent to the queue, otherwise
++ * errQUEUE_FULL.
++ *
++ * Example usage for buffered IO (where the ISR can obtain more than one value
++ * per call):
++ * @code{c}
++ * void vBufferISR( void )
++ * {
++ * char cIn;
++ * BaseType_t xHigherPriorityTaskWokenByPost;
++ *
++ *  // We have not woken a task at the start of the ISR.
++ *  xHigherPriorityTaskWokenByPost = pdFALSE;
++ *
++ *  // Loop until the buffer is empty.
++ *  do
++ *  {
++ *      // Obtain a byte from the buffer.
++ *      cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
++ *
++ *      // Post each byte.
++ *      xQueueGenericSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWokenByPost, queueSEND_TO_BACK );
++ *
++ *  } while( portINPUT_BYTE( BUFFER_COUNT ) );
++ *
++ *  // Now the buffer is empty we can switch context if necessary.  Note that the
++ *  // name of the yield function required is port specific.
++ *  if( xHigherPriorityTaskWokenByPost )
++ *  {
++ *      portYIELD_FROM_ISR();
++ *  }
++ * }
++ * @endcode
++ *
++ * \defgroup xQueueSendFromISR xQueueSendFromISR
++ * \ingroup QueueManagement
++ */
++BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
++                                     const void * const pvItemToQueue,
++                                     BaseType_t * const pxHigherPriorityTaskWoken,
++                                     const BaseType_t xCopyPosition );
++BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
++                              BaseType_t * const pxHigherPriorityTaskWoken );
++
++/**
++ * queue. h
++ * @code{c}
++ * BaseType_t xQueueReceiveFromISR(
++ *                                     QueueHandle_t    xQueue,
++ *                                     void             *pvBuffer,
++ *                                     BaseType_t       *pxTaskWoken
++ *                                 );
++ * @endcode
++ *
++ * Receive an item from a queue.  It is safe to use this function from within an
++ * interrupt service routine.
++ *
++ * @param xQueue The handle to the queue from which the item is to be
++ * received.
++ *
++ * @param pvBuffer Pointer to the buffer into which the received item will
++ * be copied.
++ *
++ * @param pxTaskWoken A task may be blocked waiting for space to become
++ * available on the queue.  If xQueueReceiveFromISR causes such a task to
++ * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will
++ * remain unchanged.
++ *
++ * @return pdTRUE if an item was successfully received from the queue,
++ * otherwise pdFALSE.
++ *
++ * Example usage:
++ * @code{c}
++ *
++ * QueueHandle_t xQueue;
++ *
++ * // Function to create a queue and post some values.
++ * void vAFunction( void *pvParameters )
++ * {
++ * char cValueToPost;
++ * const TickType_t xTicksToWait = ( TickType_t )0xff;
++ *
++ *  // Create a queue capable of containing 10 characters.
++ *  xQueue = xQueueCreate( 10, sizeof( char ) );
++ *  if( xQueue == 0 )
++ *  {
++ *      // Failed to create the queue.
++ *  }
++ *
++ *  // ...
++ *
++ *  // Post some characters that will be used within an ISR.  If the queue
++ *  // is full then this task will block for xTicksToWait ticks.
++ *  cValueToPost = 'a';
++ *  xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
++ *  cValueToPost = 'b';
++ *  xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
++ *
++ *  // ... keep posting characters ... this task may block when the queue
++ *  // becomes full.
++ *
++ *  cValueToPost = 'c';
++ *  xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
++ * }
++ *
++ * // ISR that outputs all the characters received on the queue.
++ * void vISR_Routine( void )
++ * {
++ * BaseType_t xTaskWokenByReceive = pdFALSE;
++ * char cRxedChar;
++ *
++ *  while( xQueueReceiveFromISR( xQueue, ( void * ) &cRxedChar, &xTaskWokenByReceive) )
++ *  {
++ *      // A character was received.  Output the character now.
++ *      vOutputCharacter( cRxedChar );
++ *
++ *      // If removing the character from the queue woke the task that was
++ *      // posting onto the queue cTaskWokenByReceive will have been set to
++ *      // pdTRUE.  No matter how many times this loop iterates only one
++ *      // task will be woken.
++ *  }
++ *
++ *  if( cTaskWokenByPost != ( char ) pdFALSE;
++ *  {
++ *      taskYIELD ();
++ *  }
++ * }
++ * @endcode
++ * \defgroup xQueueReceiveFromISR xQueueReceiveFromISR
++ * \ingroup QueueManagement
++ */
++BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
++                                 void * const pvBuffer,
++                                 BaseType_t * const pxHigherPriorityTaskWoken );
++
++/*
++ * Utilities to query queues that are safe to use from an ISR.  These utilities
++ * should be used only from witin an ISR, or within a critical section.
++ */
++BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue );
++BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue );
++UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue );
++
++/*
++ * For internal use only.  Use xSemaphoreCreateMutex(),
++ * xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling
++ * these functions directly.
++ */
++QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType );
++QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
++                                       StaticQueue_t * pxStaticQueue );
++QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
++                                             const UBaseType_t uxInitialCount );
++QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
++                                                   const UBaseType_t uxInitialCount,
++                                                   StaticQueue_t * pxStaticQueue );
++BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
++                                TickType_t xTicksToWait );
++TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore );
++TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore );
++
++/*
++ * For internal use only.  Use xSemaphoreTakeMutexRecursive() or
++ * xSemaphoreGiveMutexRecursive() instead of calling these functions directly.
++ */
++BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
++                                     TickType_t xTicksToWait );
++BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex );
++
++/*
++ * Reset a queue back to its original empty state.  The return value is now
++ * obsolete and is always set to pdPASS.
++ */
++#define xQueueReset( xQueue )    xQueueGenericReset( xQueue, pdFALSE )
++
++/*
++ * Generic version of the function used to create a queue using dynamic memory
++ * allocation.  This is called by other functions and macros that create other
++ * RTOS objects that use the queue structure as their base.
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
++                                       const UBaseType_t uxItemSize,
++                                       const uint8_t ucQueueType );
++#endif
++
++/*
++ * Generic version of the function used to create a queue using dynamic memory
++ * allocation.  This is called by other functions and macros that create other
++ * RTOS objects that use the queue structure as their base.
++ */
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++    QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
++                                             const UBaseType_t uxItemSize,
++                                             uint8_t * pucQueueStorage,
++                                             StaticQueue_t * pxStaticQueue,
++                                             const uint8_t ucQueueType );
++#endif
++
++/* Not public API functions. */
++BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
++                               BaseType_t xNewQueue );
++
++/* Unimplemented */
++typedef struct QueueDefinition   * QueueSetHandle_t;
++typedef struct QueueDefinition   * QueueSetMemberHandle_t;
++QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength );
++BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
++                           QueueSetHandle_t xQueueSet );
++BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
++                                QueueSetHandle_t xQueueSet );
++QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
++                                            const TickType_t xTicksToWait );
++QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet );
++BaseType_t xQueuePeek( QueueHandle_t xQueue,
++                       void * const pvBuffer,
++                       TickType_t xTicksToWait );
++BaseType_t xQueueOverwrite(QueueHandle_t xQueue, const void * pvItemToQueue);
++BaseType_t xQueueOverwriteFromISR(QueueHandle_t xQueue, const void * pvItemToQueue, BaseType_t *pxHigherPriorityTaskWoken);
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    }
++#endif
++/* *INDENT-ON* */
++
++#endif /* QUEUE_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/semphr.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/semphr.h
+new file mode 100644
+index 0000000000..053dd177cf
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/semphr.h
+@@ -0,0 +1,1188 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++#ifndef SEMAPHORE_H
++#define SEMAPHORE_H
++
++#ifndef INC_FREERTOS_H
++    #error "include FreeRTOS.h" must appear in source files before "include semphr.h"
++#endif
++
++#include "queue.h"
++
++typedef QueueHandle_t SemaphoreHandle_t;
++
++#define semBINARY_SEMAPHORE_QUEUE_LENGTH    ( ( uint8_t ) 1U )
++#define semSEMAPHORE_QUEUE_ITEM_LENGTH      ( ( uint8_t ) 0U )
++#define semGIVE_BLOCK_TIME                  ( ( TickType_t ) 0U )
++
++
++/**
++ * semphr. h
++ * @code{c}
++ * vSemaphoreCreateBinary( SemaphoreHandle_t xSemaphore );
++ * @endcode
++ *
++ * In many usage scenarios it is faster and more memory efficient to use a
++ * direct to task notification in place of a binary semaphore!
++ * https://www.FreeRTOS.org/RTOS-task-notifications.html
++ *
++ * This old vSemaphoreCreateBinary() macro is now deprecated in favour of the
++ * xSemaphoreCreateBinary() function.  Note that binary semaphores created using
++ * the vSemaphoreCreateBinary() macro are created in a state such that the
++ * first call to 'take' the semaphore would pass, whereas binary semaphores
++ * created using xSemaphoreCreateBinary() are created in a state such that the
++ * the semaphore must first be 'given' before it can be 'taken'.
++ *
++ * <i>Macro</i> that implements a semaphore by using the existing queue mechanism.
++ * The queue length is 1 as this is a binary semaphore.  The data size is 0
++ * as we don't want to actually store any data - we just want to know if the
++ * queue is empty or full.
++ *
++ * This type of semaphore can be used for pure synchronisation between tasks or
++ * between an interrupt and a task.  The semaphore need not be given back once
++ * obtained, so one task/interrupt can continuously 'give' the semaphore while
++ * another continuously 'takes' the semaphore.  For this reason this type of
++ * semaphore does not use a priority inheritance mechanism.  For an alternative
++ * that does use priority inheritance see xSemaphoreCreateMutex().
++ *
++ * @param xSemaphore Handle to the created semaphore.  Should be of type SemaphoreHandle_t.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore = NULL;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ *  // Semaphore cannot be used before a call to vSemaphoreCreateBinary ().
++ *  // This is a macro so pass the variable in directly.
++ *  vSemaphoreCreateBinary( xSemaphore );
++ *
++ *  if( xSemaphore != NULL )
++ *  {
++ *      // The semaphore was created successfully.
++ *      // The semaphore can now be used.
++ *  }
++ * }
++ * @endcode
++ * \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary
++ * \ingroup Semaphores
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    #define vSemaphoreCreateBinary( xSemaphore )                                                                                     \
++    {                                                                                                                                \
++        ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \
++        if( ( xSemaphore ) != NULL )                                                                                                 \
++        {                                                                                                                            \
++            ( void ) xSemaphoreGive( ( xSemaphore ) );                                                                               \
++        }                                                                                                                            \
++    }
++#endif
++
++/**
++ * semphr. h
++ * @code{c}
++ * SemaphoreHandle_t xSemaphoreCreateBinary( void );
++ * @endcode
++ *
++ * Creates a new binary semaphore instance, and returns a handle by which the
++ * new semaphore can be referenced.
++ *
++ * In many usage scenarios it is faster and more memory efficient to use a
++ * direct to task notification in place of a binary semaphore!
++ * https://www.FreeRTOS.org/RTOS-task-notifications.html
++ *
++ * Internally, within the FreeRTOS implementation, binary semaphores use a block
++ * of memory, in which the semaphore structure is stored.  If a binary semaphore
++ * is created using xSemaphoreCreateBinary() then the required memory is
++ * automatically dynamically allocated inside the xSemaphoreCreateBinary()
++ * function.  (see https://www.FreeRTOS.org/a00111.html).  If a binary semaphore
++ * is created using xSemaphoreCreateBinaryStatic() then the application writer
++ * must provide the memory.  xSemaphoreCreateBinaryStatic() therefore allows a
++ * binary semaphore to be created without using any dynamic memory allocation.
++ *
++ * The old vSemaphoreCreateBinary() macro is now deprecated in favour of this
++ * xSemaphoreCreateBinary() function.  Note that binary semaphores created using
++ * the vSemaphoreCreateBinary() macro are created in a state such that the
++ * first call to 'take' the semaphore would pass, whereas binary semaphores
++ * created using xSemaphoreCreateBinary() are created in a state such that the
++ * the semaphore must first be 'given' before it can be 'taken'.
++ *
++ * This type of semaphore can be used for pure synchronisation between tasks or
++ * between an interrupt and a task.  The semaphore need not be given back once
++ * obtained, so one task/interrupt can continuously 'give' the semaphore while
++ * another continuously 'takes' the semaphore.  For this reason this type of
++ * semaphore does not use a priority inheritance mechanism.  For an alternative
++ * that does use priority inheritance see xSemaphoreCreateMutex().
++ *
++ * @return Handle to the created semaphore, or NULL if the memory required to
++ * hold the semaphore's data structures could not be allocated.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore = NULL;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ *  // Semaphore cannot be used before a call to xSemaphoreCreateBinary().
++ *  // This is a macro so pass the variable in directly.
++ *  xSemaphore = xSemaphoreCreateBinary();
++ *
++ *  if( xSemaphore != NULL )
++ *  {
++ *      // The semaphore was created successfully.
++ *      // The semaphore can now be used.
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreCreateBinary xSemaphoreCreateBinary
++ * \ingroup Semaphores
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    #define xSemaphoreCreateBinary()    xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE )
++#endif
++
++/**
++ * semphr. h
++ * @code{c}
++ * SemaphoreHandle_t xSemaphoreCreateBinaryStatic( StaticSemaphore_t *pxSemaphoreBuffer );
++ * @endcode
++ *
++ * Creates a new binary semaphore instance, and returns a handle by which the
++ * new semaphore can be referenced.
++ *
++ * NOTE: In many usage scenarios it is faster and more memory efficient to use a
++ * direct to task notification in place of a binary semaphore!
++ * https://www.FreeRTOS.org/RTOS-task-notifications.html
++ *
++ * Internally, within the FreeRTOS implementation, binary semaphores use a block
++ * of memory, in which the semaphore structure is stored.  If a binary semaphore
++ * is created using xSemaphoreCreateBinary() then the required memory is
++ * automatically dynamically allocated inside the xSemaphoreCreateBinary()
++ * function.  (see https://www.FreeRTOS.org/a00111.html).  If a binary semaphore
++ * is created using xSemaphoreCreateBinaryStatic() then the application writer
++ * must provide the memory.  xSemaphoreCreateBinaryStatic() therefore allows a
++ * binary semaphore to be created without using any dynamic memory allocation.
++ *
++ * This type of semaphore can be used for pure synchronisation between tasks or
++ * between an interrupt and a task.  The semaphore need not be given back once
++ * obtained, so one task/interrupt can continuously 'give' the semaphore while
++ * another continuously 'takes' the semaphore.  For this reason this type of
++ * semaphore does not use a priority inheritance mechanism.  For an alternative
++ * that does use priority inheritance see xSemaphoreCreateMutex().
++ *
++ * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t,
++ * which will then be used to hold the semaphore's data structure, removing the
++ * need for the memory to be allocated dynamically.
++ *
++ * @return If the semaphore is created then a handle to the created semaphore is
++ * returned.  If pxSemaphoreBuffer is NULL then NULL is returned.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore = NULL;
++ * StaticSemaphore_t xSemaphoreBuffer;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ *  // Semaphore cannot be used before a call to xSemaphoreCreateBinary().
++ *  // The semaphore's data structures will be placed in the xSemaphoreBuffer
++ *  // variable, the address of which is passed into the function.  The
++ *  // function's parameter is not NULL, so the function will not attempt any
++ *  // dynamic memory allocation, and therefore the function will not return
++ *  // return NULL.
++ *  xSemaphore = xSemaphoreCreateBinary( &xSemaphoreBuffer );
++ *
++ *  // Rest of task code goes here.
++ * }
++ * @endcode
++ * \defgroup xSemaphoreCreateBinaryStatic xSemaphoreCreateBinaryStatic
++ * \ingroup Semaphores
++ */
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++    #define xSemaphoreCreateBinaryStatic( pxStaticSemaphore )    xQueueGenericCreateStatic( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, ( StaticQueue_t * ) pxStaticSemaphore, queueQUEUE_TYPE_BINARY_SEMAPHORE )
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++
++/**
++ * semphr. h
++ * @code{c}
++ * xSemaphoreTake(
++ *                   SemaphoreHandle_t xSemaphore,
++ *                   TickType_t xBlockTime
++ *               );
++ * @endcode
++ *
++ * <i>Macro</i> to obtain a semaphore.  The semaphore must have previously been
++ * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or
++ * xSemaphoreCreateCounting().
++ *
++ * @param xSemaphore A handle to the semaphore being taken - obtained when
++ * the semaphore was created.
++ *
++ * @param xBlockTime The time in ticks to wait for the semaphore to become
++ * available.  The macro portTICK_PERIOD_MS can be used to convert this to a
++ * real time.  A block time of zero can be used to poll the semaphore.  A block
++ * time of portMAX_DELAY can be used to block indefinitely (provided
++ * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h).
++ *
++ * @return pdTRUE if the semaphore was obtained.  pdFALSE
++ * if xBlockTime expired without the semaphore becoming available.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore = NULL;
++ *
++ * // A task that creates a semaphore.
++ * void vATask( void * pvParameters )
++ * {
++ *  // Create the semaphore to guard a shared resource.
++ *  xSemaphore = xSemaphoreCreateBinary();
++ * }
++ *
++ * // A task that uses the semaphore.
++ * void vAnotherTask( void * pvParameters )
++ * {
++ *  // ... Do other things.
++ *
++ *  if( xSemaphore != NULL )
++ *  {
++ *      // See if we can obtain the semaphore.  If the semaphore is not available
++ *      // wait 10 ticks to see if it becomes free.
++ *      if( xSemaphoreTake( xSemaphore, ( TickType_t ) 10 ) == pdTRUE )
++ *      {
++ *          // We were able to obtain the semaphore and can now access the
++ *          // shared resource.
++ *
++ *          // ...
++ *
++ *          // We have finished accessing the shared resource.  Release the
++ *          // semaphore.
++ *          xSemaphoreGive( xSemaphore );
++ *      }
++ *      else
++ *      {
++ *          // We could not obtain the semaphore and can therefore not access
++ *          // the shared resource safely.
++ *      }
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreTake xSemaphoreTake
++ * \ingroup Semaphores
++ */
++#define xSemaphoreTake( xSemaphore, xBlockTime )    xQueueSemaphoreTake( ( xSemaphore ), ( xBlockTime ) )
++
++/**
++ * semphr. h
++ * @code{c}
++ * xSemaphoreTakeRecursive(
++ *                          SemaphoreHandle_t xMutex,
++ *                          TickType_t xBlockTime
++ *                        );
++ * @endcode
++ *
++ * <i>Macro</i> to recursively obtain, or 'take', a mutex type semaphore.
++ * The mutex must have previously been created using a call to
++ * xSemaphoreCreateRecursiveMutex();
++ *
++ * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this
++ * macro to be available.
++ *
++ * This macro must not be used on mutexes created using xSemaphoreCreateMutex().
++ *
++ * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex
++ * doesn't become available again until the owner has called
++ * xSemaphoreGiveRecursive() for each successful 'take' request.  For example,
++ * if a task successfully 'takes' the same mutex 5 times then the mutex will
++ * not be available to any other task until it has also  'given' the mutex back
++ * exactly five times.
++ *
++ * @param xMutex A handle to the mutex being obtained.  This is the
++ * handle returned by xSemaphoreCreateRecursiveMutex();
++ *
++ * @param xBlockTime The time in ticks to wait for the semaphore to become
++ * available.  The macro portTICK_PERIOD_MS can be used to convert this to a
++ * real time.  A block time of zero can be used to poll the semaphore.  If
++ * the task already owns the semaphore then xSemaphoreTakeRecursive() will
++ * return immediately no matter what the value of xBlockTime.
++ *
++ * @return pdTRUE if the semaphore was obtained.  pdFALSE if xBlockTime
++ * expired without the semaphore becoming available.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xMutex = NULL;
++ *
++ * // A task that creates a mutex.
++ * void vATask( void * pvParameters )
++ * {
++ *  // Create the mutex to guard a shared resource.
++ *  xMutex = xSemaphoreCreateRecursiveMutex();
++ * }
++ *
++ * // A task that uses the mutex.
++ * void vAnotherTask( void * pvParameters )
++ * {
++ *  // ... Do other things.
++ *
++ *  if( xMutex != NULL )
++ *  {
++ *      // See if we can obtain the mutex.  If the mutex is not available
++ *      // wait 10 ticks to see if it becomes free.
++ *      if( xSemaphoreTakeRecursive( xSemaphore, ( TickType_t ) 10 ) == pdTRUE )
++ *      {
++ *          // We were able to obtain the mutex and can now access the
++ *          // shared resource.
++ *
++ *          // ...
++ *          // For some reason due to the nature of the code further calls to
++ *          // xSemaphoreTakeRecursive() are made on the same mutex.  In real
++ *          // code these would not be just sequential calls as this would make
++ *          // no sense.  Instead the calls are likely to be buried inside
++ *          // a more complex call structure.
++ *          xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
++ *          xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
++ *
++ *          // The mutex has now been 'taken' three times, so will not be
++ *          // available to another task until it has also been given back
++ *          // three times.  Again it is unlikely that real code would have
++ *          // these calls sequentially, but instead buried in a more complex
++ *          // call structure.  This is just for illustrative purposes.
++ *          xSemaphoreGiveRecursive( xMutex );
++ *          xSemaphoreGiveRecursive( xMutex );
++ *          xSemaphoreGiveRecursive( xMutex );
++ *
++ *          // Now the mutex can be taken by other tasks.
++ *      }
++ *      else
++ *      {
++ *          // We could not obtain the mutex and can therefore not access
++ *          // the shared resource safely.
++ *      }
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreTakeRecursive xSemaphoreTakeRecursive
++ * \ingroup Semaphores
++ */
++#if ( configUSE_RECURSIVE_MUTEXES == 1 )
++    #define xSemaphoreTakeRecursive( xMutex, xBlockTime )    xQueueTakeMutexRecursive( ( xMutex ), ( xBlockTime ) )
++#endif
++
++/**
++ * semphr. h
++ * @code{c}
++ * xSemaphoreGive( SemaphoreHandle_t xSemaphore );
++ * @endcode
++ *
++ * <i>Macro</i> to release a semaphore.  The semaphore must have previously been
++ * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or
++ * xSemaphoreCreateCounting(). and obtained using sSemaphoreTake().
++ *
++ * This macro must not be used from an ISR.  See xSemaphoreGiveFromISR () for
++ * an alternative which can be used from an ISR.
++ *
++ * This macro must also not be used on semaphores created using
++ * xSemaphoreCreateRecursiveMutex().
++ *
++ * @param xSemaphore A handle to the semaphore being released.  This is the
++ * handle returned when the semaphore was created.
++ *
++ * @return pdTRUE if the semaphore was released.  pdFALSE if an error occurred.
++ * Semaphores are implemented using queues.  An error can occur if there is
++ * no space on the queue to post a message - indicating that the
++ * semaphore was not first obtained correctly.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore = NULL;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ *  // Create the semaphore to guard a shared resource.
++ *  xSemaphore = vSemaphoreCreateBinary();
++ *
++ *  if( xSemaphore != NULL )
++ *  {
++ *      if( xSemaphoreGive( xSemaphore ) != pdTRUE )
++ *      {
++ *          // We would expect this call to fail because we cannot give
++ *          // a semaphore without first "taking" it!
++ *      }
++ *
++ *      // Obtain the semaphore - don't block if the semaphore is not
++ *      // immediately available.
++ *      if( xSemaphoreTake( xSemaphore, ( TickType_t ) 0 ) )
++ *      {
++ *          // We now have the semaphore and can access the shared resource.
++ *
++ *          // ...
++ *
++ *          // We have finished accessing the shared resource so can free the
++ *          // semaphore.
++ *          if( xSemaphoreGive( xSemaphore ) != pdTRUE )
++ *          {
++ *              // We would not expect this call to fail because we must have
++ *              // obtained the semaphore to get here.
++ *          }
++ *      }
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreGive xSemaphoreGive
++ * \ingroup Semaphores
++ */
++#define xSemaphoreGive( xSemaphore )    xQueueGenericSend( ( QueueHandle_t ) ( xSemaphore ), NULL, semGIVE_BLOCK_TIME, queueSEND_TO_BACK )
++
++/**
++ * semphr. h
++ * @code{c}
++ * xSemaphoreGiveRecursive( SemaphoreHandle_t xMutex );
++ * @endcode
++ *
++ * <i>Macro</i> to recursively release, or 'give', a mutex type semaphore.
++ * The mutex must have previously been created using a call to
++ * xSemaphoreCreateRecursiveMutex();
++ *
++ * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this
++ * macro to be available.
++ *
++ * This macro must not be used on mutexes created using xSemaphoreCreateMutex().
++ *
++ * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex
++ * doesn't become available again until the owner has called
++ * xSemaphoreGiveRecursive() for each successful 'take' request.  For example,
++ * if a task successfully 'takes' the same mutex 5 times then the mutex will
++ * not be available to any other task until it has also  'given' the mutex back
++ * exactly five times.
++ *
++ * @param xMutex A handle to the mutex being released, or 'given'.  This is the
++ * handle returned by xSemaphoreCreateMutex();
++ *
++ * @return pdTRUE if the semaphore was given.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xMutex = NULL;
++ *
++ * // A task that creates a mutex.
++ * void vATask( void * pvParameters )
++ * {
++ *  // Create the mutex to guard a shared resource.
++ *  xMutex = xSemaphoreCreateRecursiveMutex();
++ * }
++ *
++ * // A task that uses the mutex.
++ * void vAnotherTask( void * pvParameters )
++ * {
++ *  // ... Do other things.
++ *
++ *  if( xMutex != NULL )
++ *  {
++ *      // See if we can obtain the mutex.  If the mutex is not available
++ *      // wait 10 ticks to see if it becomes free.
++ *      if( xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ) == pdTRUE )
++ *      {
++ *          // We were able to obtain the mutex and can now access the
++ *          // shared resource.
++ *
++ *          // ...
++ *          // For some reason due to the nature of the code further calls to
++ *          // xSemaphoreTakeRecursive() are made on the same mutex.  In real
++ *          // code these would not be just sequential calls as this would make
++ *          // no sense.  Instead the calls are likely to be buried inside
++ *          // a more complex call structure.
++ *          xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
++ *          xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
++ *
++ *          // The mutex has now been 'taken' three times, so will not be
++ *          // available to another task until it has also been given back
++ *          // three times.  Again it is unlikely that real code would have
++ *          // these calls sequentially, it would be more likely that the calls
++ *          // to xSemaphoreGiveRecursive() would be called as a call stack
++ *          // unwound.  This is just for demonstrative purposes.
++ *          xSemaphoreGiveRecursive( xMutex );
++ *          xSemaphoreGiveRecursive( xMutex );
++ *          xSemaphoreGiveRecursive( xMutex );
++ *
++ *          // Now the mutex can be taken by other tasks.
++ *      }
++ *      else
++ *      {
++ *          // We could not obtain the mutex and can therefore not access
++ *          // the shared resource safely.
++ *      }
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreGiveRecursive xSemaphoreGiveRecursive
++ * \ingroup Semaphores
++ */
++#if ( configUSE_RECURSIVE_MUTEXES == 1 )
++    #define xSemaphoreGiveRecursive( xMutex )    xQueueGiveMutexRecursive( ( xMutex ) )
++#endif
++
++/**
++ * semphr. h
++ * @code{c}
++ * xSemaphoreGiveFromISR(
++ *                        SemaphoreHandle_t xSemaphore,
++ *                        BaseType_t *pxHigherPriorityTaskWoken
++ *                    );
++ * @endcode
++ *
++ * <i>Macro</i> to  release a semaphore.  The semaphore must have previously been
++ * created with a call to xSemaphoreCreateBinary() or xSemaphoreCreateCounting().
++ *
++ * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex())
++ * must not be used with this macro.
++ *
++ * This macro can be used from an ISR.
++ *
++ * @param xSemaphore A handle to the semaphore being released.  This is the
++ * handle returned when the semaphore was created.
++ *
++ * @param pxHigherPriorityTaskWoken xSemaphoreGiveFromISR() will set
++ * *pxHigherPriorityTaskWoken to pdTRUE if giving the semaphore caused a task
++ * to unblock, and the unblocked task has a priority higher than the currently
++ * running task.  If xSemaphoreGiveFromISR() sets this value to pdTRUE then
++ * a context switch should be requested before the interrupt is exited.
++ *
++ * @return pdTRUE if the semaphore was successfully given, otherwise errQUEUE_FULL.
++ *
++ * Example usage:
++ * @code{c}
++ \#define LONG_TIME 0xffff
++ \#define TICKS_TO_WAIT 10
++ * SemaphoreHandle_t xSemaphore = NULL;
++ *
++ * // Repetitive task.
++ * void vATask( void * pvParameters )
++ * {
++ *  for( ;; )
++ *  {
++ *      // We want this task to run every 10 ticks of a timer.  The semaphore
++ *      // was created before this task was started.
++ *
++ *      // Block waiting for the semaphore to become available.
++ *      if( xSemaphoreTake( xSemaphore, LONG_TIME ) == pdTRUE )
++ *      {
++ *          // It is time to execute.
++ *
++ *          // ...
++ *
++ *          // We have finished our task.  Return to the top of the loop where
++ *          // we will block on the semaphore until it is time to execute
++ *          // again.  Note when using the semaphore for synchronisation with an
++ *          // ISR in this manner there is no need to 'give' the semaphore back.
++ *      }
++ *  }
++ * }
++ *
++ * // Timer ISR
++ * void vTimerISR( void * pvParameters )
++ * {
++ * static uint8_t ucLocalTickCount = 0;
++ * static BaseType_t xHigherPriorityTaskWoken;
++ *
++ *  // A timer tick has occurred.
++ *
++ *  // ... Do other time functions.
++ *
++ *  // Is it time for vATask () to run?
++ *  xHigherPriorityTaskWoken = pdFALSE;
++ *  ucLocalTickCount++;
++ *  if( ucLocalTickCount >= TICKS_TO_WAIT )
++ *  {
++ *      // Unblock the task by releasing the semaphore.
++ *      xSemaphoreGiveFromISR( xSemaphore, &xHigherPriorityTaskWoken );
++ *
++ *      // Reset the count so we release the semaphore again in 10 ticks time.
++ *      ucLocalTickCount = 0;
++ *  }
++ *
++ *  if( xHigherPriorityTaskWoken != pdFALSE )
++ *  {
++ *      // We can force a context switch here.  Context switching from an
++ *      // ISR uses port specific syntax.  Check the demo task for your port
++ *      // to find the syntax required.
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreGiveFromISR xSemaphoreGiveFromISR
++ * \ingroup Semaphores
++ */
++#define xSemaphoreGiveFromISR( xSemaphore, pxHigherPriorityTaskWoken )    xQueueGiveFromISR( ( QueueHandle_t ) ( xSemaphore ), ( pxHigherPriorityTaskWoken ) )
++
++/**
++ * semphr. h
++ * @code{c}
++ * xSemaphoreTakeFromISR(
++ *                        SemaphoreHandle_t xSemaphore,
++ *                        BaseType_t *pxHigherPriorityTaskWoken
++ *                    );
++ * @endcode
++ *
++ * <i>Macro</i> to  take a semaphore from an ISR.  The semaphore must have
++ * previously been created with a call to xSemaphoreCreateBinary() or
++ * xSemaphoreCreateCounting().
++ *
++ * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex())
++ * must not be used with this macro.
++ *
++ * This macro can be used from an ISR, however taking a semaphore from an ISR
++ * is not a common operation.  It is likely to only be useful when taking a
++ * counting semaphore when an interrupt is obtaining an object from a resource
++ * pool (when the semaphore count indicates the number of resources available).
++ *
++ * @param xSemaphore A handle to the semaphore being taken.  This is the
++ * handle returned when the semaphore was created.
++ *
++ * @param pxHigherPriorityTaskWoken xSemaphoreTakeFromISR() will set
++ * *pxHigherPriorityTaskWoken to pdTRUE if taking the semaphore caused a task
++ * to unblock, and the unblocked task has a priority higher than the currently
++ * running task.  If xSemaphoreTakeFromISR() sets this value to pdTRUE then
++ * a context switch should be requested before the interrupt is exited.
++ *
++ * @return pdTRUE if the semaphore was successfully taken, otherwise
++ * pdFALSE
++ */
++#define xSemaphoreTakeFromISR( xSemaphore, pxHigherPriorityTaskWoken )    xQueueReceiveFromISR( ( QueueHandle_t ) ( xSemaphore ), NULL, ( pxHigherPriorityTaskWoken ) )
++
++/**
++ * semphr. h
++ * @code{c}
++ * SemaphoreHandle_t xSemaphoreCreateMutex( void );
++ * @endcode
++ *
++ * Creates a new mutex type semaphore instance, and returns a handle by which
++ * the new mutex can be referenced.
++ *
++ * Internally, within the FreeRTOS implementation, mutex semaphores use a block
++ * of memory, in which the mutex structure is stored.  If a mutex is created
++ * using xSemaphoreCreateMutex() then the required memory is automatically
++ * dynamically allocated inside the xSemaphoreCreateMutex() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a mutex is created using
++ * xSemaphoreCreateMutexStatic() then the application writer must provided the
++ * memory.  xSemaphoreCreateMutexStatic() therefore allows a mutex to be created
++ * without using any dynamic memory allocation.
++ *
++ * Mutexes created using this function can be accessed using the xSemaphoreTake()
++ * and xSemaphoreGive() macros.  The xSemaphoreTakeRecursive() and
++ * xSemaphoreGiveRecursive() macros must not be used.
++ *
++ * This type of semaphore uses a priority inheritance mechanism so a task
++ * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the
++ * semaphore it is no longer required.
++ *
++ * Mutex type semaphores cannot be used from within interrupt service routines.
++ *
++ * See xSemaphoreCreateBinary() for an alternative implementation that can be
++ * used for pure synchronisation (where one task or interrupt always 'gives' the
++ * semaphore and another always 'takes' the semaphore) and from within interrupt
++ * service routines.
++ *
++ * @return If the mutex was successfully created then a handle to the created
++ * semaphore is returned.  If there was not enough heap to allocate the mutex
++ * data structures then NULL is returned.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ *  // Semaphore cannot be used before a call to xSemaphoreCreateMutex().
++ *  // This is a macro so pass the variable in directly.
++ *  xSemaphore = xSemaphoreCreateMutex();
++ *
++ *  if( xSemaphore != NULL )
++ *  {
++ *      // The semaphore was created successfully.
++ *      // The semaphore can now be used.
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreCreateMutex xSemaphoreCreateMutex
++ * \ingroup Semaphores
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    #define xSemaphoreCreateMutex()    xQueueCreateMutex( queueQUEUE_TYPE_MUTEX )
++#endif
++
++/**
++ * semphr. h
++ * @code{c}
++ * SemaphoreHandle_t xSemaphoreCreateMutexStatic( StaticSemaphore_t *pxMutexBuffer );
++ * @endcode
++ *
++ * Creates a new mutex type semaphore instance, and returns a handle by which
++ * the new mutex can be referenced.
++ *
++ * Internally, within the FreeRTOS implementation, mutex semaphores use a block
++ * of memory, in which the mutex structure is stored.  If a mutex is created
++ * using xSemaphoreCreateMutex() then the required memory is automatically
++ * dynamically allocated inside the xSemaphoreCreateMutex() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a mutex is created using
++ * xSemaphoreCreateMutexStatic() then the application writer must provided the
++ * memory.  xSemaphoreCreateMutexStatic() therefore allows a mutex to be created
++ * without using any dynamic memory allocation.
++ *
++ * Mutexes created using this function can be accessed using the xSemaphoreTake()
++ * and xSemaphoreGive() macros.  The xSemaphoreTakeRecursive() and
++ * xSemaphoreGiveRecursive() macros must not be used.
++ *
++ * This type of semaphore uses a priority inheritance mechanism so a task
++ * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the
++ * semaphore it is no longer required.
++ *
++ * Mutex type semaphores cannot be used from within interrupt service routines.
++ *
++ * See xSemaphoreCreateBinary() for an alternative implementation that can be
++ * used for pure synchronisation (where one task or interrupt always 'gives' the
++ * semaphore and another always 'takes' the semaphore) and from within interrupt
++ * service routines.
++ *
++ * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t,
++ * which will be used to hold the mutex's data structure, removing the need for
++ * the memory to be allocated dynamically.
++ *
++ * @return If the mutex was successfully created then a handle to the created
++ * mutex is returned.  If pxMutexBuffer was NULL then NULL is returned.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore;
++ * StaticSemaphore_t xMutexBuffer;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ *  // A mutex cannot be used before it has been created.  xMutexBuffer is
++ *  // into xSemaphoreCreateMutexStatic() so no dynamic memory allocation is
++ *  // attempted.
++ *  xSemaphore = xSemaphoreCreateMutexStatic( &xMutexBuffer );
++ *
++ *  // As no dynamic memory allocation was performed, xSemaphore cannot be NULL,
++ *  // so there is no need to check it.
++ * }
++ * @endcode
++ * \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic
++ * \ingroup Semaphores
++ */
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++    #define xSemaphoreCreateMutexStatic( pxMutexBuffer )    xQueueCreateMutexStatic( queueQUEUE_TYPE_MUTEX, ( StaticQueue_t * ) ( pxMutexBuffer ) )
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++
++/**
++ * semphr. h
++ * @code{c}
++ * SemaphoreHandle_t xSemaphoreCreateRecursiveMutex( void );
++ * @endcode
++ *
++ * Creates a new recursive mutex type semaphore instance, and returns a handle
++ * by which the new recursive mutex can be referenced.
++ *
++ * Internally, within the FreeRTOS implementation, recursive mutexs use a block
++ * of memory, in which the mutex structure is stored.  If a recursive mutex is
++ * created using xSemaphoreCreateRecursiveMutex() then the required memory is
++ * automatically dynamically allocated inside the
++ * xSemaphoreCreateRecursiveMutex() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a recursive mutex is created using
++ * xSemaphoreCreateRecursiveMutexStatic() then the application writer must
++ * provide the memory that will get used by the mutex.
++ * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to
++ * be created without using any dynamic memory allocation.
++ *
++ * Mutexes created using this macro can be accessed using the
++ * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros.  The
++ * xSemaphoreTake() and xSemaphoreGive() macros must not be used.
++ *
++ * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex
++ * doesn't become available again until the owner has called
++ * xSemaphoreGiveRecursive() for each successful 'take' request.  For example,
++ * if a task successfully 'takes' the same mutex 5 times then the mutex will
++ * not be available to any other task until it has also  'given' the mutex back
++ * exactly five times.
++ *
++ * This type of semaphore uses a priority inheritance mechanism so a task
++ * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the
++ * semaphore it is no longer required.
++ *
++ * Mutex type semaphores cannot be used from within interrupt service routines.
++ *
++ * See xSemaphoreCreateBinary() for an alternative implementation that can be
++ * used for pure synchronisation (where one task or interrupt always 'gives' the
++ * semaphore and another always 'takes' the semaphore) and from within interrupt
++ * service routines.
++ *
++ * @return xSemaphore Handle to the created mutex semaphore.  Should be of type
++ * SemaphoreHandle_t.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ *  // Semaphore cannot be used before a call to xSemaphoreCreateMutex().
++ *  // This is a macro so pass the variable in directly.
++ *  xSemaphore = xSemaphoreCreateRecursiveMutex();
++ *
++ *  if( xSemaphore != NULL )
++ *  {
++ *      // The semaphore was created successfully.
++ *      // The semaphore can now be used.
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreCreateRecursiveMutex xSemaphoreCreateRecursiveMutex
++ * \ingroup Semaphores
++ */
++#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) )
++    #define xSemaphoreCreateRecursiveMutex()    xQueueCreateMutex( queueQUEUE_TYPE_RECURSIVE_MUTEX )
++#endif
++
++/**
++ * semphr. h
++ * @code{c}
++ * SemaphoreHandle_t xSemaphoreCreateRecursiveMutexStatic( StaticSemaphore_t *pxMutexBuffer );
++ * @endcode
++ *
++ * Creates a new recursive mutex type semaphore instance, and returns a handle
++ * by which the new recursive mutex can be referenced.
++ *
++ * Internally, within the FreeRTOS implementation, recursive mutexs use a block
++ * of memory, in which the mutex structure is stored.  If a recursive mutex is
++ * created using xSemaphoreCreateRecursiveMutex() then the required memory is
++ * automatically dynamically allocated inside the
++ * xSemaphoreCreateRecursiveMutex() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a recursive mutex is created using
++ * xSemaphoreCreateRecursiveMutexStatic() then the application writer must
++ * provide the memory that will get used by the mutex.
++ * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to
++ * be created without using any dynamic memory allocation.
++ *
++ * Mutexes created using this macro can be accessed using the
++ * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros.  The
++ * xSemaphoreTake() and xSemaphoreGive() macros must not be used.
++ *
++ * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex
++ * doesn't become available again until the owner has called
++ * xSemaphoreGiveRecursive() for each successful 'take' request.  For example,
++ * if a task successfully 'takes' the same mutex 5 times then the mutex will
++ * not be available to any other task until it has also  'given' the mutex back
++ * exactly five times.
++ *
++ * This type of semaphore uses a priority inheritance mechanism so a task
++ * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the
++ * semaphore it is no longer required.
++ *
++ * Mutex type semaphores cannot be used from within interrupt service routines.
++ *
++ * See xSemaphoreCreateBinary() for an alternative implementation that can be
++ * used for pure synchronisation (where one task or interrupt always 'gives' the
++ * semaphore and another always 'takes' the semaphore) and from within interrupt
++ * service routines.
++ *
++ * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t,
++ * which will then be used to hold the recursive mutex's data structure,
++ * removing the need for the memory to be allocated dynamically.
++ *
++ * @return If the recursive mutex was successfully created then a handle to the
++ * created recursive mutex is returned.  If pxMutexBuffer was NULL then NULL is
++ * returned.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore;
++ * StaticSemaphore_t xMutexBuffer;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ *  // A recursive semaphore cannot be used before it is created.  Here a
++ *  // recursive mutex is created using xSemaphoreCreateRecursiveMutexStatic().
++ *  // The address of xMutexBuffer is passed into the function, and will hold
++ *  // the mutexes data structures - so no dynamic memory allocation will be
++ *  // attempted.
++ *  xSemaphore = xSemaphoreCreateRecursiveMutexStatic( &xMutexBuffer );
++ *
++ *  // As no dynamic memory allocation was performed, xSemaphore cannot be NULL,
++ *  // so there is no need to check it.
++ * }
++ * @endcode
++ * \defgroup xSemaphoreCreateRecursiveMutexStatic xSemaphoreCreateRecursiveMutexStatic
++ * \ingroup Semaphores
++ */
++#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) )
++    #define xSemaphoreCreateRecursiveMutexStatic( pxStaticSemaphore )    xQueueCreateMutexStatic( queueQUEUE_TYPE_RECURSIVE_MUTEX, ( StaticQueue_t * ) pxStaticSemaphore )
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++
++/**
++ * semphr. h
++ * @code{c}
++ * SemaphoreHandle_t xSemaphoreCreateCounting( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount );
++ * @endcode
++ *
++ * Creates a new counting semaphore instance, and returns a handle by which the
++ * new counting semaphore can be referenced.
++ *
++ * In many usage scenarios it is faster and more memory efficient to use a
++ * direct to task notification in place of a counting semaphore!
++ * https://www.FreeRTOS.org/RTOS-task-notifications.html
++ *
++ * Internally, within the FreeRTOS implementation, counting semaphores use a
++ * block of memory, in which the counting semaphore structure is stored.  If a
++ * counting semaphore is created using xSemaphoreCreateCounting() then the
++ * required memory is automatically dynamically allocated inside the
++ * xSemaphoreCreateCounting() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a counting semaphore is created
++ * using xSemaphoreCreateCountingStatic() then the application writer can
++ * instead optionally provide the memory that will get used by the counting
++ * semaphore.  xSemaphoreCreateCountingStatic() therefore allows a counting
++ * semaphore to be created without using any dynamic memory allocation.
++ *
++ * Counting semaphores are typically used for two things:
++ *
++ * 1) Counting events.
++ *
++ *    In this usage scenario an event handler will 'give' a semaphore each time
++ *    an event occurs (incrementing the semaphore count value), and a handler
++ *    task will 'take' a semaphore each time it processes an event
++ *    (decrementing the semaphore count value).  The count value is therefore
++ *    the difference between the number of events that have occurred and the
++ *    number that have been processed.  In this case it is desirable for the
++ *    initial count value to be zero.
++ *
++ * 2) Resource management.
++ *
++ *    In this usage scenario the count value indicates the number of resources
++ *    available.  To obtain control of a resource a task must first obtain a
++ *    semaphore - decrementing the semaphore count value.  When the count value
++ *    reaches zero there are no free resources.  When a task finishes with the
++ *    resource it 'gives' the semaphore back - incrementing the semaphore count
++ *    value.  In this case it is desirable for the initial count value to be
++ *    equal to the maximum count value, indicating that all resources are free.
++ *
++ * @param uxMaxCount The maximum count value that can be reached.  When the
++ *        semaphore reaches this value it can no longer be 'given'.
++ *
++ * @param uxInitialCount The count value assigned to the semaphore when it is
++ *        created.
++ *
++ * @return Handle to the created semaphore.  Null if the semaphore could not be
++ *         created.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ * SemaphoreHandle_t xSemaphore = NULL;
++ *
++ *  // Semaphore cannot be used before a call to xSemaphoreCreateCounting().
++ *  // The max value to which the semaphore can count should be 10, and the
++ *  // initial value assigned to the count should be 0.
++ *  xSemaphore = xSemaphoreCreateCounting( 10, 0 );
++ *
++ *  if( xSemaphore != NULL )
++ *  {
++ *      // The semaphore was created successfully.
++ *      // The semaphore can now be used.
++ *  }
++ * }
++ * @endcode
++ * \defgroup xSemaphoreCreateCounting xSemaphoreCreateCounting
++ * \ingroup Semaphores
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    #define xSemaphoreCreateCounting( uxMaxCount, uxInitialCount )    xQueueCreateCountingSemaphore( ( uxMaxCount ), ( uxInitialCount ) )
++#endif
++
++/**
++ * semphr. h
++ * @code{c}
++ * SemaphoreHandle_t xSemaphoreCreateCountingStatic( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount, StaticSemaphore_t *pxSemaphoreBuffer );
++ * @endcode
++ *
++ * Creates a new counting semaphore instance, and returns a handle by which the
++ * new counting semaphore can be referenced.
++ *
++ * In many usage scenarios it is faster and more memory efficient to use a
++ * direct to task notification in place of a counting semaphore!
++ * https://www.FreeRTOS.org/RTOS-task-notifications.html
++ *
++ * Internally, within the FreeRTOS implementation, counting semaphores use a
++ * block of memory, in which the counting semaphore structure is stored.  If a
++ * counting semaphore is created using xSemaphoreCreateCounting() then the
++ * required memory is automatically dynamically allocated inside the
++ * xSemaphoreCreateCounting() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a counting semaphore is created
++ * using xSemaphoreCreateCountingStatic() then the application writer must
++ * provide the memory.  xSemaphoreCreateCountingStatic() therefore allows a
++ * counting semaphore to be created without using any dynamic memory allocation.
++ *
++ * Counting semaphores are typically used for two things:
++ *
++ * 1) Counting events.
++ *
++ *    In this usage scenario an event handler will 'give' a semaphore each time
++ *    an event occurs (incrementing the semaphore count value), and a handler
++ *    task will 'take' a semaphore each time it processes an event
++ *    (decrementing the semaphore count value).  The count value is therefore
++ *    the difference between the number of events that have occurred and the
++ *    number that have been processed.  In this case it is desirable for the
++ *    initial count value to be zero.
++ *
++ * 2) Resource management.
++ *
++ *    In this usage scenario the count value indicates the number of resources
++ *    available.  To obtain control of a resource a task must first obtain a
++ *    semaphore - decrementing the semaphore count value.  When the count value
++ *    reaches zero there are no free resources.  When a task finishes with the
++ *    resource it 'gives' the semaphore back - incrementing the semaphore count
++ *    value.  In this case it is desirable for the initial count value to be
++ *    equal to the maximum count value, indicating that all resources are free.
++ *
++ * @param uxMaxCount The maximum count value that can be reached.  When the
++ *        semaphore reaches this value it can no longer be 'given'.
++ *
++ * @param uxInitialCount The count value assigned to the semaphore when it is
++ *        created.
++ *
++ * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t,
++ * which will then be used to hold the semaphore's data structure, removing the
++ * need for the memory to be allocated dynamically.
++ *
++ * @return If the counting semaphore was successfully created then a handle to
++ * the created counting semaphore is returned.  If pxSemaphoreBuffer was NULL
++ * then NULL is returned.
++ *
++ * Example usage:
++ * @code{c}
++ * SemaphoreHandle_t xSemaphore;
++ * StaticSemaphore_t xSemaphoreBuffer;
++ *
++ * void vATask( void * pvParameters )
++ * {
++ * SemaphoreHandle_t xSemaphore = NULL;
++ *
++ *  // Counting semaphore cannot be used before they have been created.  Create
++ *  // a counting semaphore using xSemaphoreCreateCountingStatic().  The max
++ *  // value to which the semaphore can count is 10, and the initial value
++ *  // assigned to the count will be 0.  The address of xSemaphoreBuffer is
++ *  // passed in and will be used to hold the semaphore structure, so no dynamic
++ *  // memory allocation will be used.
++ *  xSemaphore = xSemaphoreCreateCounting( 10, 0, &xSemaphoreBuffer );
++ *
++ *  // No memory allocation was attempted so xSemaphore cannot be NULL, so there
++ *  // is no need to check its value.
++ * }
++ * @endcode
++ * \defgroup xSemaphoreCreateCountingStatic xSemaphoreCreateCountingStatic
++ * \ingroup Semaphores
++ */
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++    #define xSemaphoreCreateCountingStatic( uxMaxCount, uxInitialCount, pxSemaphoreBuffer )    xQueueCreateCountingSemaphoreStatic( ( uxMaxCount ), ( uxInitialCount ), ( StaticQueue_t * ) ( pxSemaphoreBuffer ) )
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++
++/**
++ * semphr. h
++ * @code{c}
++ * void vSemaphoreDelete( SemaphoreHandle_t xSemaphore );
++ * @endcode
++ *
++ * Delete a semaphore.  This function must be used with care.  For example,
++ * do not delete a mutex type semaphore if the mutex is held by a task.
++ *
++ * @param xSemaphore A handle to the semaphore to be deleted.
++ *
++ * \defgroup vSemaphoreDelete vSemaphoreDelete
++ * \ingroup Semaphores
++ */
++#define vSemaphoreDelete( xSemaphore )                   vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) )
++
++/**
++ * semphr.h
++ * @code{c}
++ * TaskHandle_t xSemaphoreGetMutexHolder( SemaphoreHandle_t xMutex );
++ * @endcode
++ *
++ * If xMutex is indeed a mutex type semaphore, return the current mutex holder.
++ * If xMutex is not a mutex type semaphore, or the mutex is available (not held
++ * by a task), return NULL.
++ *
++ * Note: This is a good way of determining if the calling task is the mutex
++ * holder, but not a good way of determining the identity of the mutex holder as
++ * the holder may change between the function exiting and the returned value
++ * being tested.
++ */
++#define xSemaphoreGetMutexHolder( xSemaphore )           xQueueGetMutexHolder( ( xSemaphore ) )
++
++/**
++ * semphr.h
++ * @code{c}
++ * TaskHandle_t xSemaphoreGetMutexHolderFromISR( SemaphoreHandle_t xMutex );
++ * @endcode
++ *
++ * If xMutex is indeed a mutex type semaphore, return the current mutex holder.
++ * If xMutex is not a mutex type semaphore, or the mutex is available (not held
++ * by a task), return NULL.
++ *
++ */
++#define xSemaphoreGetMutexHolderFromISR( xSemaphore )    xQueueGetMutexHolderFromISR( ( xSemaphore ) )
++
++/**
++ * semphr.h
++ * @code{c}
++ * UBaseType_t uxSemaphoreGetCount( SemaphoreHandle_t xSemaphore );
++ * @endcode
++ *
++ * If the semaphore is a counting semaphore then uxSemaphoreGetCount() returns
++ * its current count value.  If the semaphore is a binary semaphore then
++ * uxSemaphoreGetCount() returns 1 if the semaphore is available, and 0 if the
++ * semaphore is not available.
++ *
++ */
++#define uxSemaphoreGetCount( xSemaphore )                uxQueueMessagesWaiting( ( QueueHandle_t ) ( xSemaphore ) )
++
++/**
++ * semphr.h
++ * @code{c}
++ * UBaseType_t uxSemaphoreGetCountFromISR( SemaphoreHandle_t xSemaphore );
++ * @endcode
++ *
++ * If the semaphore is a counting semaphore then uxSemaphoreGetCountFromISR() returns
++ * its current count value.  If the semaphore is a binary semaphore then
++ * uxSemaphoreGetCountFromISR() returns 1 if the semaphore is available, and 0 if the
++ * semaphore is not available.
++ *
++ */
++#define uxSemaphoreGetCountFromISR( xSemaphore )         uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) )
++
++#endif /* SEMAPHORE_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/task.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/task.h
+new file mode 100644
+index 0000000000..20f6a52104
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/task.h
+@@ -0,0 +1,2265 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++
++#ifndef INC_TASK_H
++#define INC_TASK_H
++
++#ifndef INC_FREERTOS_H
++    #error "include FreeRTOS.h must appear in source files before include task.h"
++#endif
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    extern "C" {
++#endif
++/* *INDENT-ON* */
++
++/*-----------------------------------------------------------
++* MACROS AND DEFINITIONS
++*----------------------------------------------------------*/
++
++/*
++ * If tskKERNEL_VERSION_NUMBER ends with + it represents the version in development
++ * after the numbered release.
++ *
++ * The tskKERNEL_VERSION_MAJOR, tskKERNEL_VERSION_MINOR, tskKERNEL_VERSION_BUILD
++ * values will reflect the last released version number.
++ */
++#define tskKERNEL_VERSION_NUMBER       "V10.4.6"
++#define tskKERNEL_VERSION_MAJOR        10
++#define tskKERNEL_VERSION_MINOR        4
++#define tskKERNEL_VERSION_BUILD        6
++
++/* The direct to task notification feature used to have only a single notification
++ * per task.  Now there is an array of notifications per task that is dimensioned by
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES.  For backward compatibility, any use of the
++ * original direct to task notification defaults to using the first index in the
++ * array. */
++#define tskDEFAULT_INDEX_TO_NOTIFY     ( 0 )
++
++/* ESP32 */
++#define tskNO_AFFINITY  ( 0x7FFFFFFF )
++
++/**
++ * task. h
++ *
++ * Type by which tasks are referenced.  For example, a call to xTaskCreate
++ * returns (via a pointer parameter) an TaskHandle_t variable that can then
++ * be used as a parameter to vTaskDelete to delete the task.
++ *
++ * \defgroup TaskHandle_t TaskHandle_t
++ * \ingroup Tasks
++ */
++struct tskTaskControlBlock; /* The old naming convention is used to prevent breaking kernel aware debuggers. */
++typedef struct tskTaskControlBlock * TaskHandle_t;
++
++/*
++ * Defines the prototype to which the application task hook function must
++ * conform.
++ */
++typedef BaseType_t (* TaskHookFunction_t)( void * );
++
++/* Task states returned by eTaskGetState. */
++typedef enum
++{
++    eRunning = 0, /* A task is querying the state of itself, so must be running. */
++    eReady,       /* The task being queried is in a ready or pending ready list. */
++    eBlocked,     /* The task being queried is in the Blocked state. */
++    eSuspended,   /* The task being queried is in the Suspended state, or is in the Blocked state with an infinite time out. */
++    eDeleted,     /* The task being queried has been deleted, but its TCB has not yet been freed. */
++    eInvalid      /* Used as an 'invalid state' value. */
++} eTaskState;
++
++/* Actions that can be performed when vTaskNotify() is called. */
++typedef enum
++{
++    eNoAction = 0,            /* Notify the task without updating its notify value. */
++    eSetBits,                 /* Set bits in the task's notification value. */
++    eIncrement,               /* Increment the task's notification value. */
++    eSetValueWithOverwrite,   /* Set the task's notification value to a specific value even if the previous value has not yet been read by the task. */
++    eSetValueWithoutOverwrite /* Set the task's notification value if the previous value has been read by the task. */
++} eNotifyAction;
++
++/*
++ * Used internally only.
++ */
++typedef struct xTIME_OUT
++{
++    BaseType_t xOverflowCount;
++    TickType_t xTimeOnEntering;
++} TimeOut_t;
++
++/**
++ * Defines the priority used by the idle task.  This must not be modified.
++ *
++ * \ingroup TaskUtils
++ */
++#define tskIDLE_PRIORITY    ( ( UBaseType_t ) 0U )
++
++/**
++ * task. h
++ *
++ * Macro for forcing a context switch.
++ *
++ * \defgroup taskYIELD taskYIELD
++ * \ingroup SchedulerControl
++ */
++#define taskYIELD()                        portYIELD()
++
++/**
++ * task. h
++ *
++ * Macro to mark the start of a critical code region.  Preemptive context
++ * switches cannot occur when in a critical region.
++ *
++ * NOTE: This may alter the stack (depending on the portable implementation)
++ * so must be used with care!
++ *
++ * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL
++ * \ingroup SchedulerControl
++ */
++#define taskENTER_CRITICAL()               portENTER_CRITICAL()
++#define taskENTER_CRITICAL_FROM_ISR()      portSET_INTERRUPT_MASK_FROM_ISR()
++
++/**
++ * task. h
++ *
++ * Macro to mark the end of a critical code region.  Preemptive context
++ * switches cannot occur when in a critical region.
++ *
++ * NOTE: This may alter the stack (depending on the portable implementation)
++ * so must be used with care!
++ *
++ * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL
++ * \ingroup SchedulerControl
++ */
++#define taskEXIT_CRITICAL()                portEXIT_CRITICAL()
++#define taskEXIT_CRITICAL_FROM_ISR( x )    portCLEAR_INTERRUPT_MASK_FROM_ISR( x )
++
++/**
++ * task. h
++ *
++ * Macro to disable all maskable interrupts.
++ *
++ * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS
++ * \ingroup SchedulerControl
++ */
++#define taskDISABLE_INTERRUPTS()           portDISABLE_INTERRUPTS()
++
++/**
++ * task. h
++ *
++ * Macro to enable microcontroller interrupts.
++ *
++ * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS
++ * \ingroup SchedulerControl
++ */
++#define taskENABLE_INTERRUPTS()            portENABLE_INTERRUPTS()
++
++/* Definitions returned by xTaskGetSchedulerState().  taskSCHEDULER_SUSPENDED is
++ * 0 to generate more optimal code when configASSERT() is defined as the constant
++ * is used in assert() statements. */
++#define taskSCHEDULER_SUSPENDED      ( ( BaseType_t ) 0 )
++#define taskSCHEDULER_NOT_STARTED    ( ( BaseType_t ) 1 )
++#define taskSCHEDULER_RUNNING        ( ( BaseType_t ) 2 )
++
++/*-----------------------------------------------------------
++* TASK CREATION API
++*----------------------------------------------------------*/
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskCreate(
++ *                            TaskFunction_t pxTaskCode,
++ *                            const char *pcName,
++ *                            configSTACK_DEPTH_TYPE usStackDepth,
++ *                            void *pvParameters,
++ *                            UBaseType_t uxPriority,
++ *                            TaskHandle_t *pxCreatedTask
++ *                        );
++ * @endcode
++ *
++ * Create a new task and add it to the list of tasks that are ready to run.
++ *
++ * Internally, within the FreeRTOS implementation, tasks use two blocks of
++ * memory.  The first block is used to hold the task's data structures.  The
++ * second block is used by the task as its stack.  If a task is created using
++ * xTaskCreate() then both blocks of memory are automatically dynamically
++ * allocated inside the xTaskCreate() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a task is created using
++ * xTaskCreateStatic() then the application writer must provide the required
++ * memory.  xTaskCreateStatic() therefore allows a task to be created without
++ * using any dynamic memory allocation.
++ *
++ * See xTaskCreateStatic() for a version that does not use any dynamic memory
++ * allocation.
++ *
++ * xTaskCreate() can only be used to create a task that has unrestricted
++ * access to the entire microcontroller memory map.  Systems that include MPU
++ * support can alternatively create an MPU constrained task using
++ * xTaskCreateRestricted().
++ *
++ * @param pxTaskCode Pointer to the task entry function.  Tasks
++ * must be implemented to never return (i.e. continuous loop).
++ *
++ * @param pcName A descriptive name for the task.  This is mainly used to
++ * facilitate debugging.  Max length defined by configMAX_TASK_NAME_LEN - default
++ * is 16.
++ *
++ * @param usStackDepth The size of the task stack specified as the number of
++ * variables the stack can hold - not the number of bytes.  For example, if
++ * the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes
++ * will be allocated for stack storage.
++ *
++ * @param pvParameters Pointer that will be used as the parameter for the task
++ * being created.
++ *
++ * @param uxPriority The priority at which the task should run.  Systems that
++ * include MPU support can optionally create tasks in a privileged (system)
++ * mode by setting bit portPRIVILEGE_BIT of the priority parameter.  For
++ * example, to create a privileged task at priority 2 the uxPriority parameter
++ * should be set to ( 2 | portPRIVILEGE_BIT ).
++ *
++ * @param pxCreatedTask Used to pass back a handle by which the created task
++ * can be referenced.
++ *
++ * @return pdPASS if the task was successfully created and added to a ready
++ * list, otherwise an error code defined in the file projdefs.h
++ *
++ * Example usage:
++ * @code{c}
++ * // Task to be created.
++ * void vTaskCode( void * pvParameters )
++ * {
++ *   for( ;; )
++ *   {
++ *       // Task code goes here.
++ *   }
++ * }
++ *
++ * // Function that creates a task.
++ * void vOtherFunction( void )
++ * {
++ * static uint8_t ucParameterToPass;
++ * TaskHandle_t xHandle = NULL;
++ *
++ *   // Create the task, storing the handle.  Note that the passed parameter ucParameterToPass
++ *   // must exist for the lifetime of the task, so in this case is declared static.  If it was just an
++ *   // an automatic stack variable it might no longer exist, or at least have been corrupted, by the time
++ *   // the new task attempts to access it.
++ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, &ucParameterToPass, tskIDLE_PRIORITY, &xHandle );
++ *   configASSERT( xHandle );
++ *
++ *   // Use the handle to delete the task.
++ *   if( xHandle != NULL )
++ *   {
++ *      vTaskDelete( xHandle );
++ *   }
++ * }
++ * @endcode
++ * \defgroup xTaskCreate xTaskCreate
++ * \ingroup Tasks
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
++                            const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
++                            const configSTACK_DEPTH_TYPE usStackDepth,
++                            void * const pvParameters,
++                            UBaseType_t uxPriority,
++                            TaskHandle_t * const pxCreatedTask );
++#endif
++/* ESP32 */
++#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pvTaskCode,
++                                        const char * const pcName,
++                                        const uint32_t usStackDepth,
++                                        void * const pvParameters,
++                                        UBaseType_t uxPriority,
++                                        TaskHandle_t * const pvCreatedTask,
++                                        const BaseType_t xCoreID);
++
++#endif
++
++/**
++ * task. h
++ * @code{c}
++ * TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
++ *                               const char *pcName,
++ *                               uint32_t ulStackDepth,
++ *                               void *pvParameters,
++ *                               UBaseType_t uxPriority,
++ *                               StackType_t *puxStackBuffer,
++ *                               StaticTask_t *pxTaskBuffer );
++ * @endcode
++ *
++ * Create a new task and add it to the list of tasks that are ready to run.
++ *
++ * Internally, within the FreeRTOS implementation, tasks use two blocks of
++ * memory.  The first block is used to hold the task's data structures.  The
++ * second block is used by the task as its stack.  If a task is created using
++ * xTaskCreate() then both blocks of memory are automatically dynamically
++ * allocated inside the xTaskCreate() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a task is created using
++ * xTaskCreateStatic() then the application writer must provide the required
++ * memory.  xTaskCreateStatic() therefore allows a task to be created without
++ * using any dynamic memory allocation.
++ *
++ * @param pxTaskCode Pointer to the task entry function.  Tasks
++ * must be implemented to never return (i.e. continuous loop).
++ *
++ * @param pcName A descriptive name for the task.  This is mainly used to
++ * facilitate debugging.  The maximum length of the string is defined by
++ * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h.
++ *
++ * @param ulStackDepth The size of the task stack specified as the number of
++ * variables the stack can hold - not the number of bytes.  For example, if
++ * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes
++ * will be allocated for stack storage.
++ *
++ * @param pvParameters Pointer that will be used as the parameter for the task
++ * being created.
++ *
++ * @param uxPriority The priority at which the task will run.
++ *
++ * @param puxStackBuffer Must point to a StackType_t array that has at least
++ * ulStackDepth indexes - the array will then be used as the task's stack,
++ * removing the need for the stack to be allocated dynamically.
++ *
++ * @param pxTaskBuffer Must point to a variable of type StaticTask_t, which will
++ * then be used to hold the task's data structures, removing the need for the
++ * memory to be allocated dynamically.
++ *
++ * @return If neither puxStackBuffer nor pxTaskBuffer are NULL, then the task
++ * will be created and a handle to the created task is returned.  If either
++ * puxStackBuffer or pxTaskBuffer are NULL then the task will not be created and
++ * NULL is returned.
++ *
++ * Example usage:
++ * @code{c}
++ *
++ *  // Dimensions of the buffer that the task being created will use as its stack.
++ *  // NOTE:  This is the number of words the stack will hold, not the number of
++ *  // bytes.  For example, if each stack item is 32-bits, and this is set to 100,
++ *  // then 400 bytes (100 * 32-bits) will be allocated.
++ #define STACK_SIZE 200
++ *
++ *  // Structure that will hold the TCB of the task being created.
++ *  StaticTask_t xTaskBuffer;
++ *
++ *  // Buffer that the task being created will use as its stack.  Note this is
++ *  // an array of StackType_t variables.  The size of StackType_t is dependent on
++ *  // the RTOS port.
++ *  StackType_t xStack[ STACK_SIZE ];
++ *
++ *  // Function that implements the task being created.
++ *  void vTaskCode( void * pvParameters )
++ *  {
++ *      // The parameter value is expected to be 1 as 1 is passed in the
++ *      // pvParameters value in the call to xTaskCreateStatic().
++ *      configASSERT( ( uint32_t ) pvParameters == 1UL );
++ *
++ *      for( ;; )
++ *      {
++ *          // Task code goes here.
++ *      }
++ *  }
++ *
++ *  // Function that creates a task.
++ *  void vOtherFunction( void )
++ *  {
++ *      TaskHandle_t xHandle = NULL;
++ *
++ *      // Create the task without using any dynamic memory allocation.
++ *      xHandle = xTaskCreateStatic(
++ *                    vTaskCode,       // Function that implements the task.
++ *                    "NAME",          // Text name for the task.
++ *                    STACK_SIZE,      // Stack size in words, not bytes.
++ *                    ( void * ) 1,    // Parameter passed into the task.
++ *                    tskIDLE_PRIORITY,// Priority at which the task is created.
++ *                    xStack,          // Array to use as the task's stack.
++ *                    &xTaskBuffer );  // Variable to hold the task's data structure.
++ *
++ *      // puxStackBuffer and pxTaskBuffer were not NULL, so the task will have
++ *      // been created, and xHandle will be the task's handle.  Use the handle
++ *      // to suspend the task.
++ *      vTaskSuspend( xHandle );
++ *  }
++ * @endcode
++ * \defgroup xTaskCreateStatic xTaskCreateStatic
++ * \ingroup Tasks
++ */
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++    TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
++                                    const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
++                                    const uint32_t ulStackDepth,
++                                    void * const pvParameters,
++                                    UBaseType_t uxPriority,
++                                    StackType_t * const puxStackBuffer,
++                                    StaticTask_t * const pxTaskBuffer );
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskDelete( TaskHandle_t xTaskToDelete );
++ * @endcode
++ *
++ * INCLUDE_vTaskDelete must be defined as 1 for this function to be available.
++ * See the configuration section for more information.
++ *
++ * Remove a task from the RTOS real time kernel's management.  The task being
++ * deleted will be removed from all ready, blocked, suspended and event lists.
++ *
++ * NOTE:  The idle task is responsible for freeing the kernel allocated
++ * memory from tasks that have been deleted.  It is therefore important that
++ * the idle task is not starved of microcontroller processing time if your
++ * application makes any calls to vTaskDelete ().  Memory allocated by the
++ * task code is not automatically freed, and should be freed before the task
++ * is deleted.
++ *
++ * See the demo application file death.c for sample code that utilises
++ * vTaskDelete ().
++ *
++ * @param xTaskToDelete The handle of the task to be deleted.  Passing NULL will
++ * cause the calling task to be deleted.
++ *
++ * Example usage:
++ * @code{c}
++ * void vOtherFunction( void )
++ * {
++ * TaskHandle_t xHandle;
++ *
++ *   // Create the task, storing the handle.
++ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
++ *
++ *   // Use the handle to delete the task.
++ *   vTaskDelete( xHandle );
++ * }
++ * @endcode
++ * \defgroup vTaskDelete vTaskDelete
++ * \ingroup Tasks
++ */
++void vTaskDelete( TaskHandle_t xTaskToDelete );
++
++/*-----------------------------------------------------------
++* TASK CONTROL API
++*----------------------------------------------------------*/
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskDelay( const TickType_t xTicksToDelay );
++ * @endcode
++ *
++ * Delay a task for a given number of ticks.  The actual time that the
++ * task remains blocked depends on the tick rate.  The constant
++ * portTICK_PERIOD_MS can be used to calculate real time from the tick
++ * rate - with the resolution of one tick period.
++ *
++ * INCLUDE_vTaskDelay must be defined as 1 for this function to be available.
++ * See the configuration section for more information.
++ *
++ *
++ * vTaskDelay() specifies a time at which the task wishes to unblock relative to
++ * the time at which vTaskDelay() is called.  For example, specifying a block
++ * period of 100 ticks will cause the task to unblock 100 ticks after
++ * vTaskDelay() is called.  vTaskDelay() does not therefore provide a good method
++ * of controlling the frequency of a periodic task as the path taken through the
++ * code, as well as other task and interrupt activity, will affect the frequency
++ * at which vTaskDelay() gets called and therefore the time at which the task
++ * next executes.  See xTaskDelayUntil() for an alternative API function designed
++ * to facilitate fixed frequency execution.  It does this by specifying an
++ * absolute time (rather than a relative time) at which the calling task should
++ * unblock.
++ *
++ * @param xTicksToDelay The amount of time, in tick periods, that
++ * the calling task should block.
++ *
++ * Example usage:
++ *
++ * void vTaskFunction( void * pvParameters )
++ * {
++ * // Block for 500ms.
++ * const TickType_t xDelay = 500 / portTICK_PERIOD_MS;
++ *
++ *   for( ;; )
++ *   {
++ *       // Simply toggle the LED every 500ms, blocking between each toggle.
++ *       vToggleLED();
++ *       vTaskDelay( xDelay );
++ *   }
++ * }
++ *
++ * \defgroup vTaskDelay vTaskDelay
++ * \ingroup TaskCtrl
++ */
++void vTaskDelay( const TickType_t xTicksToDelay );
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t xTimeIncrement );
++ * @endcode
++ *
++ * INCLUDE_xTaskDelayUntil must be defined as 1 for this function to be available.
++ * See the configuration section for more information.
++ *
++ * Delay a task until a specified time.  This function can be used by periodic
++ * tasks to ensure a constant execution frequency.
++ *
++ * This function differs from vTaskDelay () in one important aspect:  vTaskDelay () will
++ * cause a task to block for the specified number of ticks from the time vTaskDelay () is
++ * called.  It is therefore difficult to use vTaskDelay () by itself to generate a fixed
++ * execution frequency as the time between a task starting to execute and that task
++ * calling vTaskDelay () may not be fixed [the task may take a different path though the
++ * code between calls, or may get interrupted or preempted a different number of times
++ * each time it executes].
++ *
++ * Whereas vTaskDelay () specifies a wake time relative to the time at which the function
++ * is called, xTaskDelayUntil () specifies the absolute (exact) time at which it wishes to
++ * unblock.
++ *
++ * The macro pdMS_TO_TICKS() can be used to calculate the number of ticks from a
++ * time specified in milliseconds with a resolution of one tick period.
++ *
++ * @param pxPreviousWakeTime Pointer to a variable that holds the time at which the
++ * task was last unblocked.  The variable must be initialised with the current time
++ * prior to its first use (see the example below).  Following this the variable is
++ * automatically updated within xTaskDelayUntil ().
++ *
++ * @param xTimeIncrement The cycle time period.  The task will be unblocked at
++ * time *pxPreviousWakeTime + xTimeIncrement.  Calling xTaskDelayUntil with the
++ * same xTimeIncrement parameter value will cause the task to execute with
++ * a fixed interface period.
++ *
++ * @return Value which can be used to check whether the task was actually delayed.
++ * Will be pdTRUE if the task way delayed and pdFALSE otherwise.  A task will not
++ * be delayed if the next expected wake time is in the past.
++ *
++ * Example usage:
++ * @code{c}
++ * // Perform an action every 10 ticks.
++ * void vTaskFunction( void * pvParameters )
++ * {
++ * TickType_t xLastWakeTime;
++ * const TickType_t xFrequency = 10;
++ * BaseType_t xWasDelayed;
++ *
++ *     // Initialise the xLastWakeTime variable with the current time.
++ *     xLastWakeTime = xTaskGetTickCount ();
++ *     for( ;; )
++ *     {
++ *         // Wait for the next cycle.
++ *         xWasDelayed = xTaskDelayUntil( &xLastWakeTime, xFrequency );
++ *
++ *         // Perform action here. xWasDelayed value can be used to determine
++ *         // whether a deadline was missed if the code here took too long.
++ *     }
++ * }
++ * @endcode
++ * \defgroup xTaskDelayUntil xTaskDelayUntil
++ * \ingroup TaskCtrl
++ */
++BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
++                            const TickType_t xTimeIncrement );
++
++/*
++ * vTaskDelayUntil() is the older version of xTaskDelayUntil() and does not
++ * return a value.
++ */
++#define vTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement )           \
++    {                                                                   \
++        ( void ) xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ); \
++    }
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskAbortDelay( TaskHandle_t xTask );
++ * @endcode
++ *
++ * INCLUDE_xTaskAbortDelay must be defined as 1 in FreeRTOSConfig.h for this
++ * function to be available.
++ *
++ * A task will enter the Blocked state when it is waiting for an event.  The
++ * event it is waiting for can be a temporal event (waiting for a time), such
++ * as when vTaskDelay() is called, or an event on an object, such as when
++ * xQueueReceive() or ulTaskNotifyTake() is called.  If the handle of a task
++ * that is in the Blocked state is used in a call to xTaskAbortDelay() then the
++ * task will leave the Blocked state, and return from whichever function call
++ * placed the task into the Blocked state.
++ *
++ * There is no 'FromISR' version of this function as an interrupt would need to
++ * know which object a task was blocked on in order to know which actions to
++ * take.  For example, if the task was blocked on a queue the interrupt handler
++ * would then need to know if the queue was locked.
++ *
++ * @param xTask The handle of the task to remove from the Blocked state.
++ *
++ * @return If the task referenced by xTask was not in the Blocked state then
++ * pdFAIL is returned.  Otherwise pdPASS is returned.
++ *
++ * \defgroup xTaskAbortDelay xTaskAbortDelay
++ * \ingroup TaskCtrl
++ */
++BaseType_t xTaskAbortDelay( TaskHandle_t xTask );
++
++/**
++ * task. h
++ * @code{c}
++ * UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );
++ * @endcode
++ *
++ * INCLUDE_uxTaskPriorityGet must be defined as 1 for this function to be available.
++ * See the configuration section for more information.
++ *
++ * Obtain the priority of any task.
++ *
++ * @param xTask Handle of the task to be queried.  Passing a NULL
++ * handle results in the priority of the calling task being returned.
++ *
++ * @return The priority of xTask.
++ *
++ * Example usage:
++ * @code{c}
++ * void vAFunction( void )
++ * {
++ * TaskHandle_t xHandle;
++ *
++ *   // Create a task, storing the handle.
++ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
++ *
++ *   // ...
++ *
++ *   // Use the handle to obtain the priority of the created task.
++ *   // It was created with tskIDLE_PRIORITY, but may have changed
++ *   // it itself.
++ *   if( uxTaskPriorityGet( xHandle ) != tskIDLE_PRIORITY )
++ *   {
++ *       // The task has changed it's priority.
++ *   }
++ *
++ *   // ...
++ *
++ *   // Is our priority higher than the created task?
++ *   if( uxTaskPriorityGet( xHandle ) < uxTaskPriorityGet( NULL ) )
++ *   {
++ *       // Our priority (obtained using NULL handle) is higher.
++ *   }
++ * }
++ * @endcode
++ * \defgroup uxTaskPriorityGet uxTaskPriorityGet
++ * \ingroup TaskCtrl
++ */
++UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );
++
++/**
++ * task. h
++ * @code{c}
++ * UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );
++ * @endcode
++ *
++ * A version of uxTaskPriorityGet() that can be used from an ISR.
++ */
++UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );
++
++/**
++ * task. h
++ * @code{c}
++ * eTaskState eTaskGetState( TaskHandle_t xTask );
++ * @endcode
++ *
++ * INCLUDE_eTaskGetState must be defined as 1 for this function to be available.
++ * See the configuration section for more information.
++ *
++ * Obtain the state of any task.  States are encoded by the eTaskState
++ * enumerated type.
++ *
++ * @param xTask Handle of the task to be queried.
++ *
++ * @return The state of xTask at the time the function was called.  Note the
++ * state of the task might change between the function being called, and the
++ * functions return value being tested by the calling task.
++ */
++eTaskState eTaskGetState( TaskHandle_t xTask );
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );
++ * @endcode
++ *
++ * INCLUDE_vTaskPrioritySet must be defined as 1 for this function to be available.
++ * See the configuration section for more information.
++ *
++ * Set the priority of any task.
++ *
++ * A context switch will occur before the function returns if the priority
++ * being set is higher than the currently executing task.
++ *
++ * @param xTask Handle to the task for which the priority is being set.
++ * Passing a NULL handle results in the priority of the calling task being set.
++ *
++ * @param uxNewPriority The priority to which the task will be set.
++ *
++ * Example usage:
++ * @code{c}
++ * void vAFunction( void )
++ * {
++ * TaskHandle_t xHandle;
++ *
++ *   // Create a task, storing the handle.
++ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
++ *
++ *   // ...
++ *
++ *   // Use the handle to raise the priority of the created task.
++ *   vTaskPrioritySet( xHandle, tskIDLE_PRIORITY + 1 );
++ *
++ *   // ...
++ *
++ *   // Use a NULL handle to raise our priority to the same value.
++ *   vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 );
++ * }
++ * @endcode
++ * \defgroup vTaskPrioritySet vTaskPrioritySet
++ * \ingroup TaskCtrl
++ */
++void vTaskPrioritySet( TaskHandle_t xTask,
++                       UBaseType_t uxNewPriority );
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskSuspend( TaskHandle_t xTaskToSuspend );
++ * @endcode
++ *
++ * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available.
++ * See the configuration section for more information.
++ *
++ * Suspend any task.  When suspended a task will never get any microcontroller
++ * processing time, no matter what its priority.
++ *
++ * Calls to vTaskSuspend are not accumulative -
++ * i.e. calling vTaskSuspend () twice on the same task still only requires one
++ * call to vTaskResume () to ready the suspended task.
++ *
++ * RT-Thread only supports suspending the current running thread.
++ * This function must be called with NULL as the parameter.
++ *
++ * @param xTaskToSuspend Handle to the task being suspended.  Passing a NULL
++ * handle will cause the calling task to be suspended.
++ *
++ * Example usage:
++ * @code{c}
++ * void vAFunction( void )
++ * {
++ * TaskHandle_t xHandle;
++ *
++ *   // Create a task, storing the handle.
++ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
++ *
++ *   // ...
++ *
++ *   // Use the handle to suspend the created task.
++ *   vTaskSuspend( xHandle );
++ *
++ *   // ...
++ *
++ *   // The created task will not run during this period, unless
++ *   // another task calls vTaskResume( xHandle ).
++ *
++ *   //...
++ *
++ *
++ *   // Suspend ourselves.
++ *   vTaskSuspend( NULL );
++ *
++ *   // We cannot get here unless another task calls vTaskResume
++ *   // with our handle as the parameter.
++ * }
++ * @endcode
++ * \defgroup vTaskSuspend vTaskSuspend
++ * \ingroup TaskCtrl
++ */
++void vTaskSuspend( TaskHandle_t xTaskToSuspend );
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskResume( TaskHandle_t xTaskToResume );
++ * @endcode
++ *
++ * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available.
++ * See the configuration section for more information.
++ *
++ * Resumes a suspended task.
++ *
++ * A task that has been suspended by one or more calls to vTaskSuspend ()
++ * will be made available for running again by a single call to
++ * vTaskResume ().
++ *
++ * @param xTaskToResume Handle to the task being readied.
++ *
++ * Example usage:
++ * @code{c}
++ * void vAFunction( void )
++ * {
++ * TaskHandle_t xHandle;
++ *
++ *   // Create a task, storing the handle.
++ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
++ *
++ *   // ...
++ *
++ *   // Use the handle to suspend the created task.
++ *   vTaskSuspend( xHandle );
++ *
++ *   // ...
++ *
++ *   // The created task will not run during this period, unless
++ *   // another task calls vTaskResume( xHandle ).
++ *
++ *   //...
++ *
++ *
++ *   // Resume the suspended task ourselves.
++ *   vTaskResume( xHandle );
++ *
++ *   // The created task will once again get microcontroller processing
++ *   // time in accordance with its priority within the system.
++ * }
++ * @endcode
++ * \defgroup vTaskResume vTaskResume
++ * \ingroup TaskCtrl
++ */
++void vTaskResume( TaskHandle_t xTaskToResume );
++
++/**
++ * task. h
++ * @code{c}
++ * void xTaskResumeFromISR( TaskHandle_t xTaskToResume );
++ * @endcode
++ *
++ * INCLUDE_xTaskResumeFromISR must be defined as 1 for this function to be
++ * available.  See the configuration section for more information.
++ *
++ * An implementation of vTaskResume() that can be called from within an ISR.
++ *
++ * A task that has been suspended by one or more calls to vTaskSuspend ()
++ * will be made available for running again by a single call to
++ * xTaskResumeFromISR ().
++ *
++ * xTaskResumeFromISR() should not be used to synchronise a task with an
++ * interrupt if there is a chance that the interrupt could arrive prior to the
++ * task being suspended - as this can lead to interrupts being missed. Use of a
++ * semaphore as a synchronisation mechanism would avoid this eventuality.
++ *
++ * @param xTaskToResume Handle to the task being readied.
++ *
++ * @return pdTRUE if resuming the task should result in a context switch,
++ * otherwise pdFALSE. This is used by the ISR to determine if a context switch
++ * may be required following the ISR.
++ *
++ * \defgroup vTaskResumeFromISR vTaskResumeFromISR
++ * \ingroup TaskCtrl
++ */
++BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume );
++
++/*-----------------------------------------------------------
++* SCHEDULER CONTROL
++*----------------------------------------------------------*/
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskStartScheduler( void );
++ * @endcode
++ *
++ * Starts the real time kernel tick processing.  After calling the kernel
++ * has control over which tasks are executed and when.
++ *
++ * See the demo application file main.c for an example of creating
++ * tasks and starting the kernel.
++ *
++ * Example usage:
++ * @code{c}
++ * void vAFunction( void )
++ * {
++ *   // Create at least one task before starting the kernel.
++ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
++ *
++ *   // Start the real time kernel with preemption.
++ *   vTaskStartScheduler ();
++ *
++ *   // Will not get here unless a task calls vTaskEndScheduler ()
++ * }
++ * @endcode
++ *
++ * \defgroup vTaskStartScheduler vTaskStartScheduler
++ * \ingroup SchedulerControl
++ */
++void vTaskStartScheduler( void );
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskEndScheduler( void );
++ * @endcode
++ *
++ * NOTE:  At the time of writing only the x86 real mode port, which runs on a PC
++ * in place of DOS, implements this function.
++ *
++ * Stops the real time kernel tick.  All created tasks will be automatically
++ * deleted and multitasking (either preemptive or cooperative) will
++ * stop.  Execution then resumes from the point where vTaskStartScheduler ()
++ * was called, as if vTaskStartScheduler () had just returned.
++ *
++ * See the demo application file main. c in the demo/PC directory for an
++ * example that uses vTaskEndScheduler ().
++ *
++ * vTaskEndScheduler () requires an exit function to be defined within the
++ * portable layer (see vPortEndScheduler () in port. c for the PC port).  This
++ * performs hardware specific operations such as stopping the kernel tick.
++ *
++ * vTaskEndScheduler () will cause all of the resources allocated by the
++ * kernel to be freed - but will not free resources allocated by application
++ * tasks.
++ *
++ * Example usage:
++ * @code{c}
++ * void vTaskCode( void * pvParameters )
++ * {
++ *   for( ;; )
++ *   {
++ *       // Task code goes here.
++ *
++ *       // At some point we want to end the real time kernel processing
++ *       // so call ...
++ *       vTaskEndScheduler ();
++ *   }
++ * }
++ *
++ * void vAFunction( void )
++ * {
++ *   // Create at least one task before starting the kernel.
++ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
++ *
++ *   // Start the real time kernel with preemption.
++ *   vTaskStartScheduler ();
++ *
++ *   // Will only get here when the vTaskCode () task has called
++ *   // vTaskEndScheduler ().  When we get here we are back to single task
++ *   // execution.
++ * }
++ * @endcode
++ *
++ * \defgroup vTaskEndScheduler vTaskEndScheduler
++ * \ingroup SchedulerControl
++ */
++void vTaskEndScheduler( void );
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskSuspendAll( void );
++ * @endcode
++ *
++ * Suspends the scheduler without disabling interrupts.  Context switches will
++ * not occur while the scheduler is suspended.
++ *
++ * After calling vTaskSuspendAll () the calling task will continue to execute
++ * without risk of being swapped out until a call to xTaskResumeAll () has been
++ * made.
++ *
++ * API functions that have the potential to cause a context switch (for example,
++ * xTaskDelayUntil(), xQueueSend(), etc.) must not be called while the scheduler
++ * is suspended.
++ *
++ * Example usage:
++ * @code{c}
++ * void vTask1( void * pvParameters )
++ * {
++ *   for( ;; )
++ *   {
++ *       // Task code goes here.
++ *
++ *       // ...
++ *
++ *       // At some point the task wants to perform a long operation during
++ *       // which it does not want to get swapped out.  It cannot use
++ *       // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
++ *       // operation may cause interrupts to be missed - including the
++ *       // ticks.
++ *
++ *       // Prevent the real time kernel swapping out the task.
++ *       vTaskSuspendAll ();
++ *
++ *       // Perform the operation here.  There is no need to use critical
++ *       // sections as we have all the microcontroller processing time.
++ *       // During this time interrupts will still operate and the kernel
++ *       // tick count will be maintained.
++ *
++ *       // ...
++ *
++ *       // The operation is complete.  Restart the kernel.
++ *       xTaskResumeAll ();
++ *   }
++ * }
++ * @endcode
++ * \defgroup vTaskSuspendAll vTaskSuspendAll
++ * \ingroup SchedulerControl
++ */
++void vTaskSuspendAll( void );
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskResumeAll( void );
++ * @endcode
++ *
++ * Resumes scheduler activity after it was suspended by a call to
++ * vTaskSuspendAll().
++ *
++ * xTaskResumeAll() only resumes the scheduler.  It does not unsuspend tasks
++ * that were previously suspended by a call to vTaskSuspend().
++ *
++ * @return If resuming the scheduler caused a context switch then pdTRUE is
++ *         returned, otherwise pdFALSE is returned.
++ *
++ * Example usage:
++ * @code{c}
++ * void vTask1( void * pvParameters )
++ * {
++ *   for( ;; )
++ *   {
++ *       // Task code goes here.
++ *
++ *       // ...
++ *
++ *       // At some point the task wants to perform a long operation during
++ *       // which it does not want to get swapped out.  It cannot use
++ *       // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
++ *       // operation may cause interrupts to be missed - including the
++ *       // ticks.
++ *
++ *       // Prevent the real time kernel swapping out the task.
++ *       vTaskSuspendAll ();
++ *
++ *       // Perform the operation here.  There is no need to use critical
++ *       // sections as we have all the microcontroller processing time.
++ *       // During this time interrupts will still operate and the real
++ *       // time kernel tick count will be maintained.
++ *
++ *       // ...
++ *
++ *       // The operation is complete.  Restart the kernel.  We want to force
++ *       // a context switch - but there is no point if resuming the scheduler
++ *       // caused a context switch already.
++ *       if( !xTaskResumeAll () )
++ *       {
++ *            taskYIELD ();
++ *       }
++ *   }
++ * }
++ * @endcode
++ * \defgroup xTaskResumeAll xTaskResumeAll
++ * \ingroup SchedulerControl
++ */
++BaseType_t xTaskResumeAll( void );
++
++/*-----------------------------------------------------------
++* TASK UTILITIES
++*----------------------------------------------------------*/
++
++/**
++ * task. h
++ * @code{c}
++ * TickType_t xTaskGetTickCount( void );
++ * @endcode
++ *
++ * @return The count of ticks since vTaskStartScheduler was called.
++ *
++ * \defgroup xTaskGetTickCount xTaskGetTickCount
++ * \ingroup TaskUtils
++ */
++TickType_t xTaskGetTickCount( void );
++
++/**
++ * task. h
++ * @code{c}
++ * TickType_t xTaskGetTickCountFromISR( void );
++ * @endcode
++ *
++ * @return The count of ticks since vTaskStartScheduler was called.
++ *
++ * This is a version of xTaskGetTickCount() that is safe to be called from an
++ * ISR - provided that TickType_t is the natural word size of the
++ * microcontroller being used or interrupt nesting is either not supported or
++ * not being used.
++ *
++ * \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR
++ * \ingroup TaskUtils
++ */
++TickType_t xTaskGetTickCountFromISR( void );
++
++/**
++ * task. h
++ * @code{c}
++ * uint16_t uxTaskGetNumberOfTasks( void );
++ * @endcode
++ *
++ * @return The number of tasks that the real time kernel is currently managing.
++ * This includes all ready, blocked and suspended tasks.  A task that
++ * has been deleted but not yet freed by the idle task will also be
++ * included in the count.
++ *
++ * \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks
++ * \ingroup TaskUtils
++ */
++UBaseType_t uxTaskGetNumberOfTasks( void );
++
++/**
++ * task. h
++ * @code{c}
++ * char *pcTaskGetName( TaskHandle_t xTaskToQuery );
++ * @endcode
++ *
++ * @return The text (human readable) name of the task referenced by the handle
++ * xTaskToQuery.  A task can query its own name by either passing in its own
++ * handle, or by setting xTaskToQuery to NULL.
++ *
++ * \defgroup pcTaskGetName pcTaskGetName
++ * \ingroup TaskUtils
++ */
++char * pcTaskGetName( TaskHandle_t xTaskToQuery ); /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
++
++/**
++ * task. h
++ * @code{c}
++ * TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );
++ * @endcode
++ *
++ * NOTE:  This function takes a relatively long time to complete and should be
++ * used sparingly.
++ *
++ * @return The handle of the task that has the human readable name pcNameToQuery.
++ * NULL is returned if no matching name is found.  INCLUDE_xTaskGetHandle
++ * must be set to 1 in FreeRTOSConfig.h for pcTaskGetHandle() to be available.
++ *
++ * \defgroup pcTaskGetHandle pcTaskGetHandle
++ * \ingroup TaskUtils
++ */
++TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ); /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
++
++/**
++ * task.h
++ * @code{c}
++ * UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );
++ * @endcode
++ *
++ * INCLUDE_uxTaskGetStackHighWaterMark must be set to 1 in FreeRTOSConfig.h for
++ * this function to be available.
++ *
++ * Returns the high water mark of the stack associated with xTask.  That is,
++ * the minimum free stack space there has been (in words, so on a 32 bit machine
++ * a value of 1 means 4 bytes) since the task started.  The smaller the returned
++ * number the closer the task has come to overflowing its stack.
++ *
++ * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
++ * same except for their return type.  Using configSTACK_DEPTH_TYPE allows the
++ * user to determine the return type.  It gets around the problem of the value
++ * overflowing on 8-bit types without breaking backward compatibility for
++ * applications that expect an 8-bit return type.
++ *
++ * @param xTask Handle of the task associated with the stack to be checked.
++ * Set xTask to NULL to check the stack of the calling task.
++ *
++ * @return The smallest amount of free stack space there has been (in words, so
++ * actual spaces on the stack rather than bytes) since the task referenced by
++ * xTask was created.
++ */
++UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );
++
++/**
++ * task.h
++ * @code{c}
++ * configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask );
++ * @endcode
++ *
++ * INCLUDE_uxTaskGetStackHighWaterMark2 must be set to 1 in FreeRTOSConfig.h for
++ * this function to be available.
++ *
++ * Returns the high water mark of the stack associated with xTask.  That is,
++ * the minimum free stack space there has been (in words, so on a 32 bit machine
++ * a value of 1 means 4 bytes) since the task started.  The smaller the returned
++ * number the closer the task has come to overflowing its stack.
++ *
++ * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
++ * same except for their return type.  Using configSTACK_DEPTH_TYPE allows the
++ * user to determine the return type.  It gets around the problem of the value
++ * overflowing on 8-bit types without breaking backward compatibility for
++ * applications that expect an 8-bit return type.
++ *
++ * @param xTask Handle of the task associated with the stack to be checked.
++ * Set xTask to NULL to check the stack of the calling task.
++ *
++ * @return The smallest amount of free stack space there has been (in words, so
++ * actual spaces on the stack rather than bytes) since the task referenced by
++ * xTask was created.
++ */
++configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask );
++
++/* When using trace macros it is sometimes necessary to include task.h before
++ * FreeRTOS.h.  When this is done TaskHookFunction_t will not yet have been defined,
++ * so the following two prototypes will cause a compilation error.  This can be
++ * fixed by simply guarding against the inclusion of these two prototypes unless
++ * they are explicitly required by the configUSE_APPLICATION_TASK_TAG configuration
++ * constant. */
++#ifdef configUSE_APPLICATION_TASK_TAG
++    #if configUSE_APPLICATION_TASK_TAG == 1
++
++/**
++ * task.h
++ * @code{c}
++ * void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );
++ * @endcode
++ *
++ * Sets pxHookFunction to be the task hook function used by the task xTask.
++ * Passing xTask as NULL has the effect of setting the calling tasks hook
++ * function.
++ */
++        void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
++                                         TaskHookFunction_t pxHookFunction );
++
++/**
++ * task.h
++ * @code{c}
++ * void xTaskGetApplicationTaskTag( TaskHandle_t xTask );
++ * @endcode
++ *
++ * Returns the pxHookFunction value assigned to the task xTask.  Do not
++ * call from an interrupt service routine - call
++ * xTaskGetApplicationTaskTagFromISR() instead.
++ */
++        TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask );
++
++/**
++ * task.h
++ * @code{c}
++ * void xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );
++ * @endcode
++ *
++ * Returns the pxHookFunction value assigned to the task xTask.  Can
++ * be called from an interrupt service routine.
++ */
++        TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );
++    #endif /* configUSE_APPLICATION_TASK_TAG ==1 */
++#endif /* ifdef configUSE_APPLICATION_TASK_TAG */
++
++/**
++ * task.h
++ * @code{c}
++ * BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );
++ * @endcode
++ *
++ * Calls the hook function associated with xTask.  Passing xTask as NULL has
++ * the effect of calling the Running tasks (the calling task) hook function.
++ *
++ * pvParameter is passed to the hook function for the task to interpret as it
++ * wants.  The return value is the value returned by the task hook function
++ * registered by the user.
++ */
++BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
++                                         void * pvParameter );
++
++/**
++ * xTaskGetIdleTaskHandle() is only available if
++ * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h.
++ *
++ * Simply returns the handle of the idle task.  It is not valid to call
++ * xTaskGetIdleTaskHandle() before the scheduler has been started.
++ */
++TaskHandle_t xTaskGetIdleTaskHandle( void );
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskNotifyIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction );
++ * BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction );
++ * @endcode
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
++ *
++ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these
++ * functions to be available.
++ *
++ * Sends a direct to task notification to a task, with an optional value and
++ * action.
++ *
++ * Each task has a private array of "notification values" (or 'notifications'),
++ * each of which is a 32-bit unsigned integer (uint32_t).  The constant
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the
++ * array, and (for backward compatibility) defaults to 1 if left undefined.
++ * Prior to FreeRTOS V10.4.0 there was only one notification value per task.
++ *
++ * Events can be sent to a task using an intermediary object.  Examples of such
++ * objects are queues, semaphores, mutexes and event groups.  Task notifications
++ * are a method of sending an event directly to a task without the need for such
++ * an intermediary object.
++ *
++ * A notification sent to a task can optionally perform an action, such as
++ * update, overwrite or increment one of the task's notification values.  In
++ * that way task notifications can be used to send data to a task, or be used as
++ * light weight and fast binary or counting semaphores.
++ *
++ * A task can use xTaskNotifyWaitIndexed() or ulTaskNotifyTakeIndexed() to
++ * [optionally] block to wait for a notification to be pending.  The task does
++ * not consume any CPU time while it is in the Blocked state.
++ *
++ * A notification sent to a task will remain pending until it is cleared by the
++ * task calling xTaskNotifyWaitIndexed() or ulTaskNotifyTakeIndexed() (or their
++ * un-indexed equivalents).  If the task was already in the Blocked state to
++ * wait for a notification when the notification arrives then the task will
++ * automatically be removed from the Blocked state (unblocked) and the
++ * notification cleared.
++ *
++ * **NOTE** Each notification within the array operates independently - a task
++ * can only block on one notification within the array at a time and will not be
++ * unblocked by a notification sent to any other array index.
++ *
++ * Backward compatibility information:
++ * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and
++ * all task notification API functions operated on that value. Replacing the
++ * single notification value with an array of notification values necessitated a
++ * new set of API functions that could address specific notifications within the
++ * array.  xTaskNotify() is the original API function, and remains backward
++ * compatible by always operating on the notification value at index 0 in the
++ * array. Calling xTaskNotify() is equivalent to calling xTaskNotifyIndexed()
++ * with the uxIndexToNotify parameter set to 0.
++ *
++ * @param xTaskToNotify The handle of the task being notified.  The handle to a
++ * task can be returned from the xTaskCreate() API function used to create the
++ * task, and the handle of the currently running task can be obtained by calling
++ * xTaskGetCurrentTaskHandle().
++ *
++ * @param uxIndexToNotify The index within the target task's array of
++ * notification values to which the notification is to be sent.  uxIndexToNotify
++ * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES.  xTaskNotify() does
++ * not have this parameter and always sends notifications to index 0.
++ *
++ * @param ulValue Data that can be sent with the notification.  How the data is
++ * used depends on the value of the eAction parameter.
++ *
++ * @param eAction Specifies how the notification updates the task's notification
++ * value, if at all.  Valid values for eAction are as follows:
++ *
++ * eSetBits -
++ * The target notification value is bitwise ORed with ulValue.
++ * xTaskNotifyIndexed() always returns pdPASS in this case.
++ *
++ * eIncrement -
++ * The target notification value is incremented.  ulValue is not used and
++ * xTaskNotifyIndexed() always returns pdPASS in this case.
++ *
++ * eSetValueWithOverwrite -
++ * The target notification value is set to the value of ulValue, even if the
++ * task being notified had not yet processed the previous notification at the
++ * same array index (the task already had a notification pending at that index).
++ * xTaskNotifyIndexed() always returns pdPASS in this case.
++ *
++ * eSetValueWithoutOverwrite -
++ * If the task being notified did not already have a notification pending at the
++ * same array index then the target notification value is set to ulValue and
++ * xTaskNotifyIndexed() will return pdPASS.  If the task being notified already
++ * had a notification pending at the same array index then no action is
++ * performed and pdFAIL is returned.
++ *
++ * eNoAction -
++ * The task receives a notification at the specified array index without the
++ * notification value at that index being updated.  ulValue is not used and
++ * xTaskNotifyIndexed() always returns pdPASS in this case.
++ *
++ * pulPreviousNotificationValue -
++ * Can be used to pass out the subject task's notification value before any
++ * bits are modified by the notify function.
++ *
++ * @return Dependent on the value of eAction.  See the description of the
++ * eAction parameter.
++ *
++ * \defgroup xTaskNotifyIndexed xTaskNotifyIndexed
++ * \ingroup TaskNotifications
++ */
++BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
++                               UBaseType_t uxIndexToNotify,
++                               uint32_t ulValue,
++                               eNotifyAction eAction,
++                               uint32_t * pulPreviousNotificationValue );
++#define xTaskNotify( xTaskToNotify, ulValue, eAction ) \
++    xTaskGenericNotify( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), NULL )
++#define xTaskNotifyIndexed( xTaskToNotify, uxIndexToNotify, ulValue, eAction ) \
++    xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), NULL )
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskNotifyAndQueryIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotifyValue );
++ * BaseType_t xTaskNotifyAndQuery( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotifyValue );
++ * @endcode
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
++ *
++ * xTaskNotifyAndQueryIndexed() performs the same operation as
++ * xTaskNotifyIndexed() with the addition that it also returns the subject
++ * task's prior notification value (the notification value at the time the
++ * function is called rather than when the function returns) in the additional
++ * pulPreviousNotifyValue parameter.
++ *
++ * xTaskNotifyAndQuery() performs the same operation as xTaskNotify() with the
++ * addition that it also returns the subject task's prior notification value
++ * (the notification value as it was at the time the function is called, rather
++ * than when the function returns) in the additional pulPreviousNotifyValue
++ * parameter.
++ *
++ * \defgroup xTaskNotifyAndQueryIndexed xTaskNotifyAndQueryIndexed
++ * \ingroup TaskNotifications
++ */
++#define xTaskNotifyAndQuery( xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue ) \
++    xTaskGenericNotify( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) )
++#define xTaskNotifyAndQueryIndexed( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotifyValue ) \
++    xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) )
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskNotifyIndexedFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
++ * BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
++ * @endcode
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
++ *
++ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these
++ * functions to be available.
++ *
++ * A version of xTaskNotifyIndexed() that can be used from an interrupt service
++ * routine (ISR).
++ *
++ * Each task has a private array of "notification values" (or 'notifications'),
++ * each of which is a 32-bit unsigned integer (uint32_t).  The constant
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the
++ * array, and (for backward compatibility) defaults to 1 if left undefined.
++ * Prior to FreeRTOS V10.4.0 there was only one notification value per task.
++ *
++ * Events can be sent to a task using an intermediary object.  Examples of such
++ * objects are queues, semaphores, mutexes and event groups.  Task notifications
++ * are a method of sending an event directly to a task without the need for such
++ * an intermediary object.
++ *
++ * A notification sent to a task can optionally perform an action, such as
++ * update, overwrite or increment one of the task's notification values.  In
++ * that way task notifications can be used to send data to a task, or be used as
++ * light weight and fast binary or counting semaphores.
++ *
++ * A task can use xTaskNotifyWaitIndexed() to [optionally] block to wait for a
++ * notification to be pending, or ulTaskNotifyTakeIndexed() to [optionally] block
++ * to wait for a notification value to have a non-zero value.  The task does
++ * not consume any CPU time while it is in the Blocked state.
++ *
++ * A notification sent to a task will remain pending until it is cleared by the
++ * task calling xTaskNotifyWaitIndexed() or ulTaskNotifyTakeIndexed() (or their
++ * un-indexed equivalents).  If the task was already in the Blocked state to
++ * wait for a notification when the notification arrives then the task will
++ * automatically be removed from the Blocked state (unblocked) and the
++ * notification cleared.
++ *
++ * **NOTE** Each notification within the array operates independently - a task
++ * can only block on one notification within the array at a time and will not be
++ * unblocked by a notification sent to any other array index.
++ *
++ * Backward compatibility information:
++ * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and
++ * all task notification API functions operated on that value. Replacing the
++ * single notification value with an array of notification values necessitated a
++ * new set of API functions that could address specific notifications within the
++ * array.  xTaskNotifyFromISR() is the original API function, and remains
++ * backward compatible by always operating on the notification value at index 0
++ * within the array. Calling xTaskNotifyFromISR() is equivalent to calling
++ * xTaskNotifyIndexedFromISR() with the uxIndexToNotify parameter set to 0.
++ *
++ * @param uxIndexToNotify The index within the target task's array of
++ * notification values to which the notification is to be sent.  uxIndexToNotify
++ * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES.  xTaskNotifyFromISR()
++ * does not have this parameter and always sends notifications to index 0.
++ *
++ * @param xTaskToNotify The handle of the task being notified.  The handle to a
++ * task can be returned from the xTaskCreate() API function used to create the
++ * task, and the handle of the currently running task can be obtained by calling
++ * xTaskGetCurrentTaskHandle().
++ *
++ * @param ulValue Data that can be sent with the notification.  How the data is
++ * used depends on the value of the eAction parameter.
++ *
++ * @param eAction Specifies how the notification updates the task's notification
++ * value, if at all.  Valid values for eAction are as follows:
++ *
++ * eSetBits -
++ * The task's notification value is bitwise ORed with ulValue.  xTaskNotify()
++ * always returns pdPASS in this case.
++ *
++ * eIncrement -
++ * The task's notification value is incremented.  ulValue is not used and
++ * xTaskNotify() always returns pdPASS in this case.
++ *
++ * eSetValueWithOverwrite -
++ * The task's notification value is set to the value of ulValue, even if the
++ * task being notified had not yet processed the previous notification (the
++ * task already had a notification pending).  xTaskNotify() always returns
++ * pdPASS in this case.
++ *
++ * eSetValueWithoutOverwrite -
++ * If the task being notified did not already have a notification pending then
++ * the task's notification value is set to ulValue and xTaskNotify() will
++ * return pdPASS.  If the task being notified already had a notification
++ * pending then no action is performed and pdFAIL is returned.
++ *
++ * eNoAction -
++ * The task receives a notification without its notification value being
++ * updated.  ulValue is not used and xTaskNotify() always returns pdPASS in
++ * this case.
++ *
++ * @param pxHigherPriorityTaskWoken  xTaskNotifyFromISR() will set
++ * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the
++ * task to which the notification was sent to leave the Blocked state, and the
++ * unblocked task has a priority higher than the currently running task.  If
++ * xTaskNotifyFromISR() sets this value to pdTRUE then a context switch should
++ * be requested before the interrupt is exited.  How a context switch is
++ * requested from an ISR is dependent on the port - see the documentation page
++ * for the port in use.
++ *
++ * @return Dependent on the value of eAction.  See the description of the
++ * eAction parameter.
++ *
++ * \defgroup xTaskNotifyIndexedFromISR xTaskNotifyIndexedFromISR
++ * \ingroup TaskNotifications
++ */
++BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
++                                      UBaseType_t uxIndexToNotify,
++                                      uint32_t ulValue,
++                                      eNotifyAction eAction,
++                                      uint32_t * pulPreviousNotificationValue,
++                                      BaseType_t * pxHigherPriorityTaskWoken );
++#define xTaskNotifyFromISR( xTaskToNotify, ulValue, eAction, pxHigherPriorityTaskWoken ) \
++    xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) )
++#define xTaskNotifyIndexedFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pxHigherPriorityTaskWoken ) \
++    xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) )
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskNotifyAndQueryIndexedFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken );
++ * BaseType_t xTaskNotifyAndQueryFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken );
++ * @endcode
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
++ *
++ * xTaskNotifyAndQueryIndexedFromISR() performs the same operation as
++ * xTaskNotifyIndexedFromISR() with the addition that it also returns the
++ * subject task's prior notification value (the notification value at the time
++ * the function is called rather than at the time the function returns) in the
++ * additional pulPreviousNotifyValue parameter.
++ *
++ * xTaskNotifyAndQueryFromISR() performs the same operation as
++ * xTaskNotifyFromISR() with the addition that it also returns the subject
++ * task's prior notification value (the notification value at the time the
++ * function is called rather than at the time the function returns) in the
++ * additional pulPreviousNotifyValue parameter.
++ *
++ * \defgroup xTaskNotifyAndQueryIndexedFromISR xTaskNotifyAndQueryIndexedFromISR
++ * \ingroup TaskNotifications
++ */
++#define xTaskNotifyAndQueryIndexedFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) \
++    xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) )
++#define xTaskNotifyAndQueryFromISR( xTaskToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) \
++    xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) )
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskNotifyWaitIndexed( UBaseType_t uxIndexToWaitOn, uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
++ *
++ * BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
++ * @endcode
++ *
++ * Waits for a direct to task notification to be pending at a given index within
++ * an array of direct to task notifications.
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
++ *
++ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this
++ * function to be available.
++ *
++ * Each task has a private array of "notification values" (or 'notifications'),
++ * each of which is a 32-bit unsigned integer (uint32_t).  The constant
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the
++ * array, and (for backward compatibility) defaults to 1 if left undefined.
++ * Prior to FreeRTOS V10.4.0 there was only one notification value per task.
++ *
++ * Events can be sent to a task using an intermediary object.  Examples of such
++ * objects are queues, semaphores, mutexes and event groups.  Task notifications
++ * are a method of sending an event directly to a task without the need for such
++ * an intermediary object.
++ *
++ * A notification sent to a task can optionally perform an action, such as
++ * update, overwrite or increment one of the task's notification values.  In
++ * that way task notifications can be used to send data to a task, or be used as
++ * light weight and fast binary or counting semaphores.
++ *
++ * A notification sent to a task will remain pending until it is cleared by the
++ * task calling xTaskNotifyWaitIndexed() or ulTaskNotifyTakeIndexed() (or their
++ * un-indexed equivalents).  If the task was already in the Blocked state to
++ * wait for a notification when the notification arrives then the task will
++ * automatically be removed from the Blocked state (unblocked) and the
++ * notification cleared.
++ *
++ * A task can use xTaskNotifyWaitIndexed() to [optionally] block to wait for a
++ * notification to be pending, or ulTaskNotifyTakeIndexed() to [optionally] block
++ * to wait for a notification value to have a non-zero value.  The task does
++ * not consume any CPU time while it is in the Blocked state.
++ *
++ * **NOTE** Each notification within the array operates independently - a task
++ * can only block on one notification within the array at a time and will not be
++ * unblocked by a notification sent to any other array index.
++ *
++ * Backward compatibility information:
++ * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and
++ * all task notification API functions operated on that value. Replacing the
++ * single notification value with an array of notification values necessitated a
++ * new set of API functions that could address specific notifications within the
++ * array.  xTaskNotifyWait() is the original API function, and remains backward
++ * compatible by always operating on the notification value at index 0 in the
++ * array. Calling xTaskNotifyWait() is equivalent to calling
++ * xTaskNotifyWaitIndexed() with the uxIndexToWaitOn parameter set to 0.
++ *
++ * @param uxIndexToWaitOn The index within the calling task's array of
++ * notification values on which the calling task will wait for a notification to
++ * be received.  uxIndexToWaitOn must be less than
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES.  xTaskNotifyWait() does
++ * not have this parameter and always waits for notifications on index 0.
++ *
++ * @param ulBitsToClearOnEntry Bits that are set in ulBitsToClearOnEntry value
++ * will be cleared in the calling task's notification value before the task
++ * checks to see if any notifications are pending, and optionally blocks if no
++ * notifications are pending.  Setting ulBitsToClearOnEntry to ULONG_MAX (if
++ * limits.h is included) or 0xffffffffUL (if limits.h is not included) will have
++ * the effect of resetting the task's notification value to 0.  Setting
++ * ulBitsToClearOnEntry to 0 will leave the task's notification value unchanged.
++ *
++ * @param ulBitsToClearOnExit If a notification is pending or received before
++ * the calling task exits the xTaskNotifyWait() function then the task's
++ * notification value (see the xTaskNotify() API function) is passed out using
++ * the pulNotificationValue parameter.  Then any bits that are set in
++ * ulBitsToClearOnExit will be cleared in the task's notification value (note
++ * *pulNotificationValue is set before any bits are cleared).  Setting
++ * ulBitsToClearOnExit to ULONG_MAX (if limits.h is included) or 0xffffffffUL
++ * (if limits.h is not included) will have the effect of resetting the task's
++ * notification value to 0 before the function exits.  Setting
++ * ulBitsToClearOnExit to 0 will leave the task's notification value unchanged
++ * when the function exits (in which case the value passed out in
++ * pulNotificationValue will match the task's notification value).
++ *
++ * @param pulNotificationValue Used to pass the task's notification value out
++ * of the function.  Note the value passed out will not be effected by the
++ * clearing of any bits caused by ulBitsToClearOnExit being non-zero.
++ *
++ * @param xTicksToWait The maximum amount of time that the task should wait in
++ * the Blocked state for a notification to be received, should a notification
++ * not already be pending when xTaskNotifyWait() was called.  The task
++ * will not consume any processing time while it is in the Blocked state.  This
++ * is specified in kernel ticks, the macro pdMS_TO_TICKS( value_in_ms ) can be
++ * used to convert a time specified in milliseconds to a time specified in
++ * ticks.
++ *
++ * @return If a notification was received (including notifications that were
++ * already pending when xTaskNotifyWait was called) then pdPASS is
++ * returned.  Otherwise pdFAIL is returned.
++ *
++ * \defgroup xTaskNotifyWaitIndexed xTaskNotifyWaitIndexed
++ * \ingroup TaskNotifications
++ */
++BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
++                                   uint32_t ulBitsToClearOnEntry,
++                                   uint32_t ulBitsToClearOnExit,
++                                   uint32_t * pulNotificationValue,
++                                   TickType_t xTicksToWait );
++#define xTaskNotifyWait( ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ) \
++    xTaskGenericNotifyWait( tskDEFAULT_INDEX_TO_NOTIFY, ( ulBitsToClearOnEntry ), ( ulBitsToClearOnExit ), ( pulNotificationValue ), ( xTicksToWait ) )
++#define xTaskNotifyWaitIndexed( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ) \
++    xTaskGenericNotifyWait( ( uxIndexToWaitOn ), ( ulBitsToClearOnEntry ), ( ulBitsToClearOnExit ), ( pulNotificationValue ), ( xTicksToWait ) )
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskNotifyGiveIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify );
++ * BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );
++ * @endcode
++ *
++ * Sends a direct to task notification to a particular index in the target
++ * task's notification array in a manner similar to giving a counting semaphore.
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for more details.
++ *
++ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these
++ * macros to be available.
++ *
++ * Each task has a private array of "notification values" (or 'notifications'),
++ * each of which is a 32-bit unsigned integer (uint32_t).  The constant
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the
++ * array, and (for backward compatibility) defaults to 1 if left undefined.
++ * Prior to FreeRTOS V10.4.0 there was only one notification value per task.
++ *
++ * Events can be sent to a task using an intermediary object.  Examples of such
++ * objects are queues, semaphores, mutexes and event groups.  Task notifications
++ * are a method of sending an event directly to a task without the need for such
++ * an intermediary object.
++ *
++ * A notification sent to a task can optionally perform an action, such as
++ * update, overwrite or increment one of the task's notification values.  In
++ * that way task notifications can be used to send data to a task, or be used as
++ * light weight and fast binary or counting semaphores.
++ *
++ * xTaskNotifyGiveIndexed() is a helper macro intended for use when task
++ * notifications are used as light weight and faster binary or counting
++ * semaphore equivalents.  Actual FreeRTOS semaphores are given using the
++ * xSemaphoreGive() API function, the equivalent action that instead uses a task
++ * notification is xTaskNotifyGiveIndexed().
++ *
++ * When task notifications are being used as a binary or counting semaphore
++ * equivalent then the task being notified should wait for the notification
++ * using the ulTaskNotificationTakeIndexed() API function rather than the
++ * xTaskNotifyWaitIndexed() API function.
++ *
++ * **NOTE** Each notification within the array operates independently - a task
++ * can only block on one notification within the array at a time and will not be
++ * unblocked by a notification sent to any other array index.
++ *
++ * Backward compatibility information:
++ * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and
++ * all task notification API functions operated on that value. Replacing the
++ * single notification value with an array of notification values necessitated a
++ * new set of API functions that could address specific notifications within the
++ * array.  xTaskNotifyGive() is the original API function, and remains backward
++ * compatible by always operating on the notification value at index 0 in the
++ * array. Calling xTaskNotifyGive() is equivalent to calling
++ * xTaskNotifyGiveIndexed() with the uxIndexToNotify parameter set to 0.
++ *
++ * @param xTaskToNotify The handle of the task being notified.  The handle to a
++ * task can be returned from the xTaskCreate() API function used to create the
++ * task, and the handle of the currently running task can be obtained by calling
++ * xTaskGetCurrentTaskHandle().
++ *
++ * @param uxIndexToNotify The index within the target task's array of
++ * notification values to which the notification is to be sent.  uxIndexToNotify
++ * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES.  xTaskNotifyGive()
++ * does not have this parameter and always sends notifications to index 0.
++ *
++ * @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the
++ * eAction parameter set to eIncrement - so pdPASS is always returned.
++ *
++ * \defgroup xTaskNotifyGiveIndexed xTaskNotifyGiveIndexed
++ * \ingroup TaskNotifications
++ */
++#define xTaskNotifyGive( xTaskToNotify ) \
++    xTaskGenericNotify( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( 0 ), eIncrement, NULL )
++#define xTaskNotifyGiveIndexed( xTaskToNotify, uxIndexToNotify ) \
++    xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( 0 ), eIncrement, NULL )
++
++/**
++ * task. h
++ * @code{c}
++ * void vTaskNotifyGiveIndexedFromISR( TaskHandle_t xTaskHandle, UBaseType_t uxIndexToNotify, BaseType_t *pxHigherPriorityTaskWoken );
++ * void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t *pxHigherPriorityTaskWoken );
++ * @endcode
++ *
++ * A version of xTaskNotifyGiveIndexed() that can be called from an interrupt
++ * service routine (ISR).
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for more details.
++ *
++ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro
++ * to be available.
++ *
++ * Each task has a private array of "notification values" (or 'notifications'),
++ * each of which is a 32-bit unsigned integer (uint32_t).  The constant
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the
++ * array, and (for backward compatibility) defaults to 1 if left undefined.
++ * Prior to FreeRTOS V10.4.0 there was only one notification value per task.
++ *
++ * Events can be sent to a task using an intermediary object.  Examples of such
++ * objects are queues, semaphores, mutexes and event groups.  Task notifications
++ * are a method of sending an event directly to a task without the need for such
++ * an intermediary object.
++ *
++ * A notification sent to a task can optionally perform an action, such as
++ * update, overwrite or increment one of the task's notification values.  In
++ * that way task notifications can be used to send data to a task, or be used as
++ * light weight and fast binary or counting semaphores.
++ *
++ * vTaskNotifyGiveIndexedFromISR() is intended for use when task notifications
++ * are used as light weight and faster binary or counting semaphore equivalents.
++ * Actual FreeRTOS semaphores are given from an ISR using the
++ * xSemaphoreGiveFromISR() API function, the equivalent action that instead uses
++ * a task notification is vTaskNotifyGiveIndexedFromISR().
++ *
++ * When task notifications are being used as a binary or counting semaphore
++ * equivalent then the task being notified should wait for the notification
++ * using the ulTaskNotificationTakeIndexed() API function rather than the
++ * xTaskNotifyWaitIndexed() API function.
++ *
++ * **NOTE** Each notification within the array operates independently - a task
++ * can only block on one notification within the array at a time and will not be
++ * unblocked by a notification sent to any other array index.
++ *
++ * Backward compatibility information:
++ * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and
++ * all task notification API functions operated on that value. Replacing the
++ * single notification value with an array of notification values necessitated a
++ * new set of API functions that could address specific notifications within the
++ * array.  xTaskNotifyFromISR() is the original API function, and remains
++ * backward compatible by always operating on the notification value at index 0
++ * within the array. Calling xTaskNotifyGiveFromISR() is equivalent to calling
++ * xTaskNotifyGiveIndexedFromISR() with the uxIndexToNotify parameter set to 0.
++ *
++ * @param xTaskToNotify The handle of the task being notified.  The handle to a
++ * task can be returned from the xTaskCreate() API function used to create the
++ * task, and the handle of the currently running task can be obtained by calling
++ * xTaskGetCurrentTaskHandle().
++ *
++ * @param uxIndexToNotify The index within the target task's array of
++ * notification values to which the notification is to be sent.  uxIndexToNotify
++ * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES.
++ * xTaskNotifyGiveFromISR() does not have this parameter and always sends
++ * notifications to index 0.
++ *
++ * @param pxHigherPriorityTaskWoken  vTaskNotifyGiveFromISR() will set
++ * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the
++ * task to which the notification was sent to leave the Blocked state, and the
++ * unblocked task has a priority higher than the currently running task.  If
++ * vTaskNotifyGiveFromISR() sets this value to pdTRUE then a context switch
++ * should be requested before the interrupt is exited.  How a context switch is
++ * requested from an ISR is dependent on the port - see the documentation page
++ * for the port in use.
++ *
++ * \defgroup vTaskNotifyGiveIndexedFromISR vTaskNotifyGiveIndexedFromISR
++ * \ingroup TaskNotifications
++ */
++void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
++                                    UBaseType_t uxIndexToNotify,
++                                    BaseType_t * pxHigherPriorityTaskWoken );
++#define vTaskNotifyGiveFromISR( xTaskToNotify, pxHigherPriorityTaskWoken ) \
++    vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( pxHigherPriorityTaskWoken ) );
++#define vTaskNotifyGiveIndexedFromISR( xTaskToNotify, uxIndexToNotify, pxHigherPriorityTaskWoken ) \
++    vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( pxHigherPriorityTaskWoken ) );
++
++/**
++ * task. h
++ * @code{c}
++ * uint32_t ulTaskNotifyTakeIndexed( UBaseType_t uxIndexToWaitOn, BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
++ *
++ * uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
++ * @endcode
++ *
++ * Waits for a direct to task notification on a particular index in the calling
++ * task's notification array in a manner similar to taking a counting semaphore.
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
++ *
++ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this
++ * function to be available.
++ *
++ * Each task has a private array of "notification values" (or 'notifications'),
++ * each of which is a 32-bit unsigned integer (uint32_t).  The constant
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the
++ * array, and (for backward compatibility) defaults to 1 if left undefined.
++ * Prior to FreeRTOS V10.4.0 there was only one notification value per task.
++ *
++ * Events can be sent to a task using an intermediary object.  Examples of such
++ * objects are queues, semaphores, mutexes and event groups.  Task notifications
++ * are a method of sending an event directly to a task without the need for such
++ * an intermediary object.
++ *
++ * A notification sent to a task can optionally perform an action, such as
++ * update, overwrite or increment one of the task's notification values.  In
++ * that way task notifications can be used to send data to a task, or be used as
++ * light weight and fast binary or counting semaphores.
++ *
++ * ulTaskNotifyTakeIndexed() is intended for use when a task notification is
++ * used as a faster and lighter weight binary or counting semaphore alternative.
++ * Actual FreeRTOS semaphores are taken using the xSemaphoreTake() API function,
++ * the equivalent action that instead uses a task notification is
++ * ulTaskNotifyTakeIndexed().
++ *
++ * When a task is using its notification value as a binary or counting semaphore
++ * other tasks should send notifications to it using the xTaskNotifyGiveIndexed()
++ * macro, or xTaskNotifyIndex() function with the eAction parameter set to
++ * eIncrement.
++ *
++ * ulTaskNotifyTakeIndexed() can either clear the task's notification value at
++ * the array index specified by the uxIndexToWaitOn parameter to zero on exit,
++ * in which case the notification value acts like a binary semaphore, or
++ * decrement the notification value on exit, in which case the notification
++ * value acts like a counting semaphore.
++ *
++ * A task can use ulTaskNotifyTakeIndexed() to [optionally] block to wait for
++ * a notification.  The task does not consume any CPU time while it is in the
++ * Blocked state.
++ *
++ * Where as xTaskNotifyWaitIndexed() will return when a notification is pending,
++ * ulTaskNotifyTakeIndexed() will return when the task's notification value is
++ * not zero.
++ *
++ * **NOTE** Each notification within the array operates independently - a task
++ * can only block on one notification within the array at a time and will not be
++ * unblocked by a notification sent to any other array index.
++ *
++ * Backward compatibility information:
++ * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and
++ * all task notification API functions operated on that value. Replacing the
++ * single notification value with an array of notification values necessitated a
++ * new set of API functions that could address specific notifications within the
++ * array.  ulTaskNotifyTake() is the original API function, and remains backward
++ * compatible by always operating on the notification value at index 0 in the
++ * array. Calling ulTaskNotifyTake() is equivalent to calling
++ * ulTaskNotifyTakeIndexed() with the uxIndexToWaitOn parameter set to 0.
++ *
++ * @param uxIndexToWaitOn The index within the calling task's array of
++ * notification values on which the calling task will wait for a notification to
++ * be non-zero.  uxIndexToWaitOn must be less than
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES.  xTaskNotifyTake() does
++ * not have this parameter and always waits for notifications on index 0.
++ *
++ * @param xClearCountOnExit if xClearCountOnExit is pdFALSE then the task's
++ * notification value is decremented when the function exits.  In this way the
++ * notification value acts like a counting semaphore.  If xClearCountOnExit is
++ * not pdFALSE then the task's notification value is cleared to zero when the
++ * function exits.  In this way the notification value acts like a binary
++ * semaphore.
++ *
++ * @param xTicksToWait The maximum amount of time that the task should wait in
++ * the Blocked state for the task's notification value to be greater than zero,
++ * should the count not already be greater than zero when
++ * ulTaskNotifyTake() was called.  The task will not consume any processing
++ * time while it is in the Blocked state.  This is specified in kernel ticks,
++ * the macro pdMS_TO_TICKS( value_in_ms ) can be used to convert a time
++ * specified in milliseconds to a time specified in ticks.
++ *
++ * @return The task's notification count before it is either cleared to zero or
++ * decremented (see the xClearCountOnExit parameter).
++ *
++ * \defgroup ulTaskNotifyTakeIndexed ulTaskNotifyTakeIndexed
++ * \ingroup TaskNotifications
++ */
++uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
++                                  BaseType_t xClearCountOnExit,
++                                  TickType_t xTicksToWait );
++#define ulTaskNotifyTake( xClearCountOnExit, xTicksToWait ) \
++    ulTaskGenericNotifyTake( ( tskDEFAULT_INDEX_TO_NOTIFY ), ( xClearCountOnExit ), ( xTicksToWait ) )
++#define ulTaskNotifyTakeIndexed( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait ) \
++    ulTaskGenericNotifyTake( ( uxIndexToWaitOn ), ( xClearCountOnExit ), ( xTicksToWait ) )
++
++/**
++ * task. h
++ * @code{c}
++ * BaseType_t xTaskNotifyStateClearIndexed( TaskHandle_t xTask, UBaseType_t uxIndexToCLear );
++ *
++ * BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );
++ * @endcode
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
++ *
++ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these
++ * functions to be available.
++ *
++ * Each task has a private array of "notification values" (or 'notifications'),
++ * each of which is a 32-bit unsigned integer (uint32_t).  The constant
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the
++ * array, and (for backward compatibility) defaults to 1 if left undefined.
++ * Prior to FreeRTOS V10.4.0 there was only one notification value per task.
++ *
++ * If a notification is sent to an index within the array of notifications then
++ * the notification at that index is said to be 'pending' until it is read or
++ * explicitly cleared by the receiving task.  xTaskNotifyStateClearIndexed()
++ * is the function that clears a pending notification without reading the
++ * notification value.  The notification value at the same array index is not
++ * altered.  Set xTask to NULL to clear the notification state of the calling
++ * task.
++ *
++ * Backward compatibility information:
++ * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and
++ * all task notification API functions operated on that value. Replacing the
++ * single notification value with an array of notification values necessitated a
++ * new set of API functions that could address specific notifications within the
++ * array.  xTaskNotifyStateClear() is the original API function, and remains
++ * backward compatible by always operating on the notification value at index 0
++ * within the array. Calling xTaskNotifyStateClear() is equivalent to calling
++ * xTaskNotifyStateClearIndexed() with the uxIndexToNotify parameter set to 0.
++ *
++ * @param xTask The handle of the RTOS task that will have a notification state
++ * cleared.  Set xTask to NULL to clear a notification state in the calling
++ * task.  To obtain a task's handle create the task using xTaskCreate() and
++ * make use of the pxCreatedTask parameter, or create the task using
++ * xTaskCreateStatic() and store the returned value, or use the task's name in
++ * a call to xTaskGetHandle().
++ *
++ * @param uxIndexToClear The index within the target task's array of
++ * notification values to act upon.  For example, setting uxIndexToClear to 1
++ * will clear the state of the notification at index 1 within the array.
++ * uxIndexToClear must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES.
++ * ulTaskNotifyStateClear() does not have this parameter and always acts on the
++ * notification at index 0.
++ *
++ * @return pdTRUE if the task's notification state was set to
++ * eNotWaitingNotification, otherwise pdFALSE.
++ *
++ * \defgroup xTaskNotifyStateClearIndexed xTaskNotifyStateClearIndexed
++ * \ingroup TaskNotifications
++ */
++BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
++                                         UBaseType_t uxIndexToClear );
++#define xTaskNotifyStateClear( xTask ) \
++    xTaskGenericNotifyStateClear( ( xTask ), ( tskDEFAULT_INDEX_TO_NOTIFY ) )
++#define xTaskNotifyStateClearIndexed( xTask, uxIndexToClear ) \
++    xTaskGenericNotifyStateClear( ( xTask ), ( uxIndexToClear ) )
++
++/**
++ * task. h
++ * @code{c}
++ * uint32_t ulTaskNotifyValueClearIndexed( TaskHandle_t xTask, UBaseType_t uxIndexToClear, uint32_t ulBitsToClear );
++ *
++ * uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear );
++ * @endcode
++ *
++ * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
++ *
++ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these
++ * functions to be available.
++ *
++ * Each task has a private array of "notification values" (or 'notifications'),
++ * each of which is a 32-bit unsigned integer (uint32_t).  The constant
++ * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the
++ * array, and (for backward compatibility) defaults to 1 if left undefined.
++ * Prior to FreeRTOS V10.4.0 there was only one notification value per task.
++ *
++ * ulTaskNotifyValueClearIndexed() clears the bits specified by the
++ * ulBitsToClear bit mask in the notification value at array index uxIndexToClear
++ * of the task referenced by xTask.
++ *
++ * Backward compatibility information:
++ * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and
++ * all task notification API functions operated on that value. Replacing the
++ * single notification value with an array of notification values necessitated a
++ * new set of API functions that could address specific notifications within the
++ * array.  ulTaskNotifyValueClear() is the original API function, and remains
++ * backward compatible by always operating on the notification value at index 0
++ * within the array. Calling ulTaskNotifyValueClear() is equivalent to calling
++ * ulTaskNotifyValueClearIndexed() with the uxIndexToClear parameter set to 0.
++ *
++ * @param xTask The handle of the RTOS task that will have bits in one of its
++ * notification values cleared. Set xTask to NULL to clear bits in a
++ * notification value of the calling task.  To obtain a task's handle create the
++ * task using xTaskCreate() and make use of the pxCreatedTask parameter, or
++ * create the task using xTaskCreateStatic() and store the returned value, or
++ * use the task's name in a call to xTaskGetHandle().
++ *
++ * @param uxIndexToClear The index within the target task's array of
++ * notification values in which to clear the bits.  uxIndexToClear
++ * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES.
++ * ulTaskNotifyValueClear() does not have this parameter and always clears bits
++ * in the notification value at index 0.
++ *
++ * @param ulBitsToClear Bit mask of the bits to clear in the notification value of
++ * xTask. Set a bit to 1 to clear the corresponding bits in the task's notification
++ * value. Set ulBitsToClear to 0xffffffff (UINT_MAX on 32-bit architectures) to clear
++ * the notification value to 0.  Set ulBitsToClear to 0 to query the task's
++ * notification value without clearing any bits.
++ *
++ *
++ * @return The value of the target task's notification value before the bits
++ * specified by ulBitsToClear were cleared.
++ * \defgroup ulTaskNotifyValueClear ulTaskNotifyValueClear
++ * \ingroup TaskNotifications
++ */
++uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
++                                        UBaseType_t uxIndexToClear,
++                                        uint32_t ulBitsToClear );
++#define ulTaskNotifyValueClear( xTask, ulBitsToClear ) \
++    ulTaskGenericNotifyValueClear( ( xTask ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulBitsToClear ) )
++#define ulTaskNotifyValueClearIndexed( xTask, uxIndexToClear, ulBitsToClear ) \
++    ulTaskGenericNotifyValueClear( ( xTask ), ( uxIndexToClear ), ( ulBitsToClear ) )
++
++/**
++ * task.h
++ * @code{c}
++ * void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut );
++ * @endcode
++ *
++ * Capture the current time for future use with xTaskCheckForTimeOut().
++ *
++ * @param pxTimeOut Pointer to a timeout object into which the current time
++ * is to be captured.  The captured time includes the tick count and the number
++ * of times the tick count has overflowed since the system first booted.
++ * \defgroup vTaskSetTimeOutState vTaskSetTimeOutState
++ * \ingroup TaskCtrl
++ */
++void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut );
++
++/**
++ * task.h
++ * @code{c}
++ * BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait );
++ * @endcode
++ *
++ * Determines if pxTicksToWait ticks has passed since a time was captured
++ * using a call to vTaskSetTimeOutState().  The captured time includes the tick
++ * count and the number of times the tick count has overflowed.
++ *
++ * @param pxTimeOut The time status as captured previously using
++ * vTaskSetTimeOutState. If the timeout has not yet occurred, it is updated
++ * to reflect the current time status.
++ * @param pxTicksToWait The number of ticks to check for timeout i.e. if
++ * pxTicksToWait ticks have passed since pxTimeOut was last updated (either by
++ * vTaskSetTimeOutState() or xTaskCheckForTimeOut()), the timeout has occurred.
++ * If the timeout has not occurred, pxTicksToWait is updated to reflect the
++ * number of remaining ticks.
++ *
++ * @return If timeout has occurred, pdTRUE is returned. Otherwise pdFALSE is
++ * returned and pxTicksToWait is updated to reflect the number of remaining
++ * ticks.
++ *
++ * @see https://www.FreeRTOS.org/xTaskCheckForTimeOut.html
++ *
++ * Example Usage:
++ * @code{c}
++ *  // Driver library function used to receive uxWantedBytes from an Rx buffer
++ *  // that is filled by a UART interrupt. If there are not enough bytes in the
++ *  // Rx buffer then the task enters the Blocked state until it is notified that
++ *  // more data has been placed into the buffer. If there is still not enough
++ *  // data then the task re-enters the Blocked state, and xTaskCheckForTimeOut()
++ *  // is used to re-calculate the Block time to ensure the total amount of time
++ *  // spent in the Blocked state does not exceed MAX_TIME_TO_WAIT. This
++ *  // continues until either the buffer contains at least uxWantedBytes bytes,
++ *  // or the total amount of time spent in the Blocked state reaches
++ *  // MAX_TIME_TO_WAIT - at which point the task reads however many bytes are
++ *  // available up to a maximum of uxWantedBytes.
++ *
++ *  size_t xUART_Receive( uint8_t *pucBuffer, size_t uxWantedBytes )
++ *  {
++ *  size_t uxReceived = 0;
++ *  TickType_t xTicksToWait = MAX_TIME_TO_WAIT;
++ *  TimeOut_t xTimeOut;
++ *
++ *      // Initialize xTimeOut.  This records the time at which this function
++ *      // was entered.
++ *      vTaskSetTimeOutState( &xTimeOut );
++ *
++ *      // Loop until the buffer contains the wanted number of bytes, or a
++ *      // timeout occurs.
++ *      while( UART_bytes_in_rx_buffer( pxUARTInstance ) < uxWantedBytes )
++ *      {
++ *          // The buffer didn't contain enough data so this task is going to
++ *          // enter the Blocked state. Adjusting xTicksToWait to account for
++ *          // any time that has been spent in the Blocked state within this
++ *          // function so far to ensure the total amount of time spent in the
++ *          // Blocked state does not exceed MAX_TIME_TO_WAIT.
++ *          if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) != pdFALSE )
++ *          {
++ *              //Timed out before the wanted number of bytes were available,
++ *              // exit the loop.
++ *              break;
++ *          }
++ *
++ *          // Wait for a maximum of xTicksToWait ticks to be notified that the
++ *          // receive interrupt has placed more data into the buffer.
++ *          ulTaskNotifyTake( pdTRUE, xTicksToWait );
++ *      }
++ *
++ *      // Attempt to read uxWantedBytes from the receive buffer into pucBuffer.
++ *      // The actual number of bytes read (which might be less than
++ *      // uxWantedBytes) is returned.
++ *      uxReceived = UART_read_from_receive_buffer( pxUARTInstance,
++ *                                                  pucBuffer,
++ *                                                  uxWantedBytes );
++ *
++ *      return uxReceived;
++ *  }
++ * @endcode
++ * \defgroup xTaskCheckForTimeOut xTaskCheckForTimeOut
++ * \ingroup TaskCtrl
++ */
++BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
++                                 TickType_t * const pxTicksToWait );
++
++/*-----------------------------------------------------------
++* SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES
++*----------------------------------------------------------*/
++
++/*
++ * Return the handle of the calling task.
++ */
++TaskHandle_t xTaskGetCurrentTaskHandle( void );
++
++/*
++ * Returns the scheduler state as taskSCHEDULER_RUNNING,
++ * taskSCHEDULER_NOT_STARTED or taskSCHEDULER_SUSPENDED.
++ */
++BaseType_t xTaskGetSchedulerState( void );
++
++/* ESP32 */
++BaseType_t xTaskGetAffinity( TaskHandle_t xTask );
++TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t cpuid );
++TaskHandle_t xTaskGetIdleTaskHandleForCPU( UBaseType_t cpuid );
++/* Unimplemented */
++#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
++void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
++                                            BaseType_t xIndex,
++                                            void * pvValue );
++void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
++                                               BaseType_t xIndex );
++#if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
++typedef void (*TlsDeleteCallbackFunction_t)( int, void * );
++void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue, TlsDeleteCallbackFunction_t pvDelCallback);
++#endif
++#endif
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    }
++#endif
++/* *INDENT-ON* */
++#endif /* INC_TASK_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/timers.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/timers.h
+new file mode 100644
+index 0000000000..c5d1acf4d4
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/include/freertos/timers.h
+@@ -0,0 +1,1185 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++
++#ifndef TIMERS_H
++#define TIMERS_H
++
++#ifndef INC_FREERTOS_H
++    #error "include FreeRTOS.h must appear in source files before include timers.h"
++#endif
++
++#include "task.h"
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    extern "C" {
++#endif
++/* *INDENT-ON* */
++
++/*-----------------------------------------------------------
++* MACROS AND DEFINITIONS
++*----------------------------------------------------------*/
++
++/* IDs for commands that can be sent/received on the timer queue.  These are to
++ * be used solely through the macros that make up the public software timer API,
++ * as defined below.  The commands that are sent from interrupts must use the
++ * highest numbers as tmrFIRST_FROM_ISR_COMMAND is used to determine if the task
++ * or interrupt version of the queue send function should be used. */
++#define tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR    ( ( BaseType_t ) -2 )
++#define tmrCOMMAND_EXECUTE_CALLBACK             ( ( BaseType_t ) -1 )
++#define tmrCOMMAND_START_DONT_TRACE             ( ( BaseType_t ) 0 )
++#define tmrCOMMAND_START                        ( ( BaseType_t ) 1 )
++#define tmrCOMMAND_RESET                        ( ( BaseType_t ) 2 )
++#define tmrCOMMAND_STOP                         ( ( BaseType_t ) 3 )
++#define tmrCOMMAND_CHANGE_PERIOD                ( ( BaseType_t ) 4 )
++#define tmrCOMMAND_DELETE                       ( ( BaseType_t ) 5 )
++
++#define tmrFIRST_FROM_ISR_COMMAND               ( ( BaseType_t ) 6 )
++#define tmrCOMMAND_START_FROM_ISR               ( ( BaseType_t ) 6 )
++#define tmrCOMMAND_RESET_FROM_ISR               ( ( BaseType_t ) 7 )
++#define tmrCOMMAND_STOP_FROM_ISR                ( ( BaseType_t ) 8 )
++#define tmrCOMMAND_CHANGE_PERIOD_FROM_ISR       ( ( BaseType_t ) 9 )
++
++/**
++ * Type by which software timers are referenced.  For example, a call to
++ * xTimerCreate() returns an TimerHandle_t variable that can then be used to
++ * reference the subject timer in calls to other software timer API functions
++ * (for example, xTimerStart(), xTimerReset(), etc.).
++ */
++struct tmrTimerControl; /* The old naming convention is used to prevent breaking kernel aware debuggers. */
++typedef struct tmrTimerControl * TimerHandle_t;
++
++/*
++ * Defines the prototype to which timer callback functions must conform.
++ */
++typedef void (* TimerCallbackFunction_t)( TimerHandle_t xTimer );
++
++/**
++ * TimerHandle_t xTimerCreate(  const char * const pcTimerName,
++ *                              TickType_t xTimerPeriodInTicks,
++ *                              UBaseType_t uxAutoReload,
++ *                              void * pvTimerID,
++ *                              TimerCallbackFunction_t pxCallbackFunction );
++ *
++ * Creates a new software timer instance, and returns a handle by which the
++ * created software timer can be referenced.
++ *
++ * Internally, within the FreeRTOS implementation, software timers use a block
++ * of memory, in which the timer data structure is stored.  If a software timer
++ * is created using xTimerCreate() then the required memory is automatically
++ * dynamically allocated inside the xTimerCreate() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a software timer is created using
++ * xTimerCreateStatic() then the application writer must provide the memory that
++ * will get used by the software timer.  xTimerCreateStatic() therefore allows a
++ * software timer to be created without using any dynamic memory allocation.
++ *
++ * Timers are created in the dormant state.  The xTimerStart(), xTimerReset(),
++ * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and
++ * xTimerChangePeriodFromISR() API functions can all be used to transition a
++ * timer into the active state.
++ *
++ * @param pcTimerName A text name that is assigned to the timer.  This is done
++ * purely to assist debugging.  The kernel itself only ever references a timer
++ * by its handle, and never by its name.
++ *
++ * @param xTimerPeriodInTicks The timer period.  The time is defined in tick
++ * periods so the constant portTICK_PERIOD_MS can be used to convert a time that
++ * has been specified in milliseconds.  For example, if the timer must expire
++ * after 100 ticks, then xTimerPeriodInTicks should be set to 100.
++ * Alternatively, if the timer must expire after 500ms, then xPeriod can be set
++ * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or
++ * equal to 1000.  Time timer period must be greater than 0.
++ *
++ * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will
++ * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter.
++ * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and
++ * enter the dormant state after it expires.
++ *
++ * @param pvTimerID An identifier that is assigned to the timer being created.
++ * Typically this would be used in the timer callback function to identify which
++ * timer expired when the same callback function is assigned to more than one
++ * timer.
++ *
++ * @param pxCallbackFunction The function to call when the timer expires.
++ * Callback functions must have the prototype defined by TimerCallbackFunction_t,
++ * which is "void vCallbackFunction( TimerHandle_t xTimer );".
++ *
++ * @return If the timer is successfully created then a handle to the newly
++ * created timer is returned.  If the timer cannot be created because there is
++ * insufficient FreeRTOS heap remaining to allocate the timer
++ * structures then NULL is returned.
++ *
++ * Example usage:
++ * @verbatim
++ * #define NUM_TIMERS 5
++ *
++ * // An array to hold handles to the created timers.
++ * TimerHandle_t xTimers[ NUM_TIMERS ];
++ *
++ * // An array to hold a count of the number of times each timer expires.
++ * int32_t lExpireCounters[ NUM_TIMERS ] = { 0 };
++ *
++ * // Define a callback function that will be used by multiple timer instances.
++ * // The callback function does nothing but count the number of times the
++ * // associated timer expires, and stop the timer once the timer has expired
++ * // 10 times.
++ * void vTimerCallback( TimerHandle_t pxTimer )
++ * {
++ * int32_t lArrayIndex;
++ * const int32_t xMaxExpiryCountBeforeStopping = 10;
++ *
++ *     // Optionally do something if the pxTimer parameter is NULL.
++ *     configASSERT( pxTimer );
++ *
++ *     // Which timer expired?
++ *     lArrayIndex = ( int32_t ) pvTimerGetTimerID( pxTimer );
++ *
++ *     // Increment the number of times that pxTimer has expired.
++ *     lExpireCounters[ lArrayIndex ] += 1;
++ *
++ *     // If the timer has expired 10 times then stop it from running.
++ *     if( lExpireCounters[ lArrayIndex ] == xMaxExpiryCountBeforeStopping )
++ *     {
++ *         // Do not use a block time if calling a timer API function from a
++ *         // timer callback function, as doing so could cause a deadlock!
++ *         xTimerStop( pxTimer, 0 );
++ *     }
++ * }
++ *
++ * void main( void )
++ * {
++ * int32_t x;
++ *
++ *     // Create then start some timers.  Starting the timers before the scheduler
++ *     // has been started means the timers will start running immediately that
++ *     // the scheduler starts.
++ *     for( x = 0; x < NUM_TIMERS; x++ )
++ *     {
++ *         xTimers[ x ] = xTimerCreate(    "Timer",       // Just a text name, not used by the kernel.
++ *                                         ( 100 * x ),   // The timer period in ticks.
++ *                                         pdTRUE,        // The timers will auto-reload themselves when they expire.
++ *                                         ( void * ) x,  // Assign each timer a unique id equal to its array index.
++ *                                         vTimerCallback // Each timer calls the same callback when it expires.
++ *                                     );
++ *
++ *         if( xTimers[ x ] == NULL )
++ *         {
++ *             // The timer was not created.
++ *         }
++ *         else
++ *         {
++ *             // Start the timer.  No block time is specified, and even if one was
++ *             // it would be ignored because the scheduler has not yet been
++ *             // started.
++ *             if( xTimerStart( xTimers[ x ], 0 ) != pdPASS )
++ *             {
++ *                 // The timer could not be set into the Active state.
++ *             }
++ *         }
++ *     }
++ *
++ *     // ...
++ *     // Create tasks here.
++ *     // ...
++ *
++ *     // Starting the scheduler will start the timers running as they have already
++ *     // been set into the active state.
++ *     vTaskStartScheduler();
++ *
++ *     // Should not reach here.
++ *     for( ;; );
++ * }
++ * @endverbatim
++ */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++    TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
++                                const TickType_t xTimerPeriodInTicks,
++                                const UBaseType_t uxAutoReload,
++                                void * const pvTimerID,
++                                TimerCallbackFunction_t pxCallbackFunction );
++#endif
++
++/**
++ * TimerHandle_t xTimerCreateStatic(const char * const pcTimerName,
++ *                                  TickType_t xTimerPeriodInTicks,
++ *                                  UBaseType_t uxAutoReload,
++ *                                  void * pvTimerID,
++ *                                  TimerCallbackFunction_t pxCallbackFunction,
++ *                                  StaticTimer_t *pxTimerBuffer );
++ *
++ * Creates a new software timer instance, and returns a handle by which the
++ * created software timer can be referenced.
++ *
++ * Internally, within the FreeRTOS implementation, software timers use a block
++ * of memory, in which the timer data structure is stored.  If a software timer
++ * is created using xTimerCreate() then the required memory is automatically
++ * dynamically allocated inside the xTimerCreate() function.  (see
++ * https://www.FreeRTOS.org/a00111.html).  If a software timer is created using
++ * xTimerCreateStatic() then the application writer must provide the memory that
++ * will get used by the software timer.  xTimerCreateStatic() therefore allows a
++ * software timer to be created without using any dynamic memory allocation.
++ *
++ * Timers are created in the dormant state.  The xTimerStart(), xTimerReset(),
++ * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and
++ * xTimerChangePeriodFromISR() API functions can all be used to transition a
++ * timer into the active state.
++ *
++ * @param pcTimerName A text name that is assigned to the timer.  This is done
++ * purely to assist debugging.  The kernel itself only ever references a timer
++ * by its handle, and never by its name.
++ *
++ * @param xTimerPeriodInTicks The timer period.  The time is defined in tick
++ * periods so the constant portTICK_PERIOD_MS can be used to convert a time that
++ * has been specified in milliseconds.  For example, if the timer must expire
++ * after 100 ticks, then xTimerPeriodInTicks should be set to 100.
++ * Alternatively, if the timer must expire after 500ms, then xPeriod can be set
++ * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or
++ * equal to 1000.  The timer period must be greater than 0.
++ *
++ * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will
++ * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter.
++ * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and
++ * enter the dormant state after it expires.
++ *
++ * @param pvTimerID An identifier that is assigned to the timer being created.
++ * Typically this would be used in the timer callback function to identify which
++ * timer expired when the same callback function is assigned to more than one
++ * timer.
++ *
++ * @param pxCallbackFunction The function to call when the timer expires.
++ * Callback functions must have the prototype defined by TimerCallbackFunction_t,
++ * which is "void vCallbackFunction( TimerHandle_t xTimer );".
++ *
++ * @param pxTimerBuffer Must point to a variable of type StaticTimer_t, which
++ * will be then be used to hold the software timer's data structures, removing
++ * the need for the memory to be allocated dynamically.
++ *
++ * @return If the timer is created then a handle to the created timer is
++ * returned.  If pxTimerBuffer was NULL then NULL is returned.
++ *
++ * Example usage:
++ * @verbatim
++ *
++ * // The buffer used to hold the software timer's data structure.
++ * static StaticTimer_t xTimerBuffer;
++ *
++ * // A variable that will be incremented by the software timer's callback
++ * // function.
++ * UBaseType_t uxVariableToIncrement = 0;
++ *
++ * // A software timer callback function that increments a variable passed to
++ * // it when the software timer was created.  After the 5th increment the
++ * // callback function stops the software timer.
++ * static void prvTimerCallback( TimerHandle_t xExpiredTimer )
++ * {
++ * UBaseType_t *puxVariableToIncrement;
++ * BaseType_t xReturned;
++ *
++ *     // Obtain the address of the variable to increment from the timer ID.
++ *     puxVariableToIncrement = ( UBaseType_t * ) pvTimerGetTimerID( xExpiredTimer );
++ *
++ *     // Increment the variable to show the timer callback has executed.
++ *     ( *puxVariableToIncrement )++;
++ *
++ *     // If this callback has executed the required number of times, stop the
++ *     // timer.
++ *     if( *puxVariableToIncrement == 5 )
++ *     {
++ *         // This is called from a timer callback so must not block.
++ *         xTimerStop( xExpiredTimer, staticDONT_BLOCK );
++ *     }
++ * }
++ *
++ *
++ * void main( void )
++ * {
++ *     // Create the software time.  xTimerCreateStatic() has an extra parameter
++ *     // than the normal xTimerCreate() API function.  The parameter is a pointer
++ *     // to the StaticTimer_t structure that will hold the software timer
++ *     // structure.  If the parameter is passed as NULL then the structure will be
++ *     // allocated dynamically, just as if xTimerCreate() had been called.
++ *     xTimer = xTimerCreateStatic( "T1",             // Text name for the task.  Helps debugging only.  Not used by FreeRTOS.
++ *                                  xTimerPeriod,     // The period of the timer in ticks.
++ *                                  pdTRUE,           // This is an auto-reload timer.
++ *                                  ( void * ) &uxVariableToIncrement,    // A variable incremented by the software timer's callback function
++ *                                  prvTimerCallback, // The function to execute when the timer expires.
++ *                                  &xTimerBuffer );  // The buffer that will hold the software timer structure.
++ *
++ *     // The scheduler has not started yet so a block time is not used.
++ *     xReturned = xTimerStart( xTimer, 0 );
++ *
++ *     // ...
++ *     // Create tasks here.
++ *     // ...
++ *
++ *     // Starting the scheduler will start the timers running as they have already
++ *     // been set into the active state.
++ *     vTaskStartScheduler();
++ *
++ *     // Should not reach here.
++ *     for( ;; );
++ * }
++ * @endverbatim
++ */
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++    TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
++                                      const TickType_t xTimerPeriodInTicks,
++                                      const UBaseType_t uxAutoReload,
++                                      void * const pvTimerID,
++                                      TimerCallbackFunction_t pxCallbackFunction,
++                                      StaticTimer_t * pxTimerBuffer );
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++
++/**
++ * void *pvTimerGetTimerID( TimerHandle_t xTimer );
++ *
++ * Returns the ID assigned to the timer.
++ *
++ * IDs are assigned to timers using the pvTimerID parameter of the call to
++ * xTimerCreated() that was used to create the timer, and by calling the
++ * vTimerSetTimerID() API function.
++ *
++ * If the same callback function is assigned to multiple timers then the timer
++ * ID can be used as time specific (timer local) storage.
++ *
++ * @param xTimer The timer being queried.
++ *
++ * @return The ID assigned to the timer being queried.
++ *
++ * Example usage:
++ *
++ * See the xTimerCreate() API function example usage scenario.
++ */
++void * pvTimerGetTimerID( const TimerHandle_t xTimer );
++
++/**
++ * void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID );
++ *
++ * Sets the ID assigned to the timer.
++ *
++ * IDs are assigned to timers using the pvTimerID parameter of the call to
++ * xTimerCreated() that was used to create the timer.
++ *
++ * If the same callback function is assigned to multiple timers then the timer
++ * ID can be used as time specific (timer local) storage.
++ *
++ * @param xTimer The timer being updated.
++ *
++ * @param pvNewID The ID to assign to the timer.
++ *
++ * Example usage:
++ *
++ * See the xTimerCreate() API function example usage scenario.
++ */
++void vTimerSetTimerID( TimerHandle_t xTimer,
++                       void * pvNewID );
++
++/**
++ * BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer );
++ *
++ * Queries a timer to see if it is active or dormant.
++ *
++ * A timer will be dormant if:
++ *     1) It has been created but not started, or
++ *     2) It is an expired one-shot timer that has not been restarted.
++ *
++ * Timers are created in the dormant state.  The xTimerStart(), xTimerReset(),
++ * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and
++ * xTimerChangePeriodFromISR() API functions can all be used to transition a timer into the
++ * active state.
++ *
++ * @param xTimer The timer being queried.
++ *
++ * @return pdFALSE will be returned if the timer is dormant.  A value other than
++ * pdFALSE will be returned if the timer is active.
++ *
++ * Example usage:
++ * @verbatim
++ * // This function assumes xTimer has already been created.
++ * void vAFunction( TimerHandle_t xTimer )
++ * {
++ *     if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )"
++ *     {
++ *         // xTimer is active, do something.
++ *     }
++ *     else
++ *     {
++ *         // xTimer is not active, do something else.
++ *     }
++ * }
++ * @endverbatim
++ */
++BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer );
++
++/**
++ * TaskHandle_t xTimerGetTimerDaemonTaskHandle( void );
++ *
++ * Simply returns the handle of the timer service/daemon task.  It it not valid
++ * to call xTimerGetTimerDaemonTaskHandle() before the scheduler has been started.
++ */
++TaskHandle_t xTimerGetTimerDaemonTaskHandle( void );
++
++/**
++ * BaseType_t xTimerStart( TimerHandle_t xTimer, TickType_t xTicksToWait );
++ *
++ * Timer functionality is provided by a timer service/daemon task.  Many of the
++ * public FreeRTOS timer API functions send commands to the timer service task
++ * through a queue called the timer command queue.  The timer command queue is
++ * private to the kernel itself and is not directly accessible to application
++ * code.  The length of the timer command queue is set by the
++ * configTIMER_QUEUE_LENGTH configuration constant.
++ *
++ * xTimerStart() starts a timer that was previously created using the
++ * xTimerCreate() API function.  If the timer had already been started and was
++ * already in the active state, then xTimerStart() has equivalent functionality
++ * to the xTimerReset() API function.
++ *
++ * Starting a timer ensures the timer is in the active state.  If the timer
++ * is not stopped, deleted, or reset in the mean time, the callback function
++ * associated with the timer will get called 'n' ticks after xTimerStart() was
++ * called, where 'n' is the timers defined period.
++ *
++ * It is valid to call xTimerStart() before the scheduler has been started, but
++ * when this is done the timer will not actually start until the scheduler is
++ * started, and the timers expiry time will be relative to when the scheduler is
++ * started, not relative to when xTimerStart() was called.
++ *
++ * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStart()
++ * to be available.
++ *
++ * @param xTimer The handle of the timer being started/restarted.
++ *
++ * @param xTicksToWait Specifies the time, in ticks, that the calling task should
++ * be held in the Blocked state to wait for the start command to be successfully
++ * sent to the timer command queue, should the queue already be full when
++ * xTimerStart() was called.  xTicksToWait is ignored if xTimerStart() is called
++ * before the scheduler is started.
++ *
++ * @return pdFAIL will be returned if the start command could not be sent to
++ * the timer command queue even after xTicksToWait ticks had passed.  pdPASS will
++ * be returned if the command was successfully sent to the timer command queue.
++ * When the command is actually processed will depend on the priority of the
++ * timer service/daemon task relative to other tasks in the system, although the
++ * timers expiry time is relative to when xTimerStart() is actually called.  The
++ * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY
++ * configuration constant.
++ *
++ * Example usage:
++ *
++ * See the xTimerCreate() API function example usage scenario.
++ *
++ */
++#define xTimerStart( xTimer, xTicksToWait ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) )
++
++/**
++ * BaseType_t xTimerStop( TimerHandle_t xTimer, TickType_t xTicksToWait );
++ *
++ * Timer functionality is provided by a timer service/daemon task.  Many of the
++ * public FreeRTOS timer API functions send commands to the timer service task
++ * through a queue called the timer command queue.  The timer command queue is
++ * private to the kernel itself and is not directly accessible to application
++ * code.  The length of the timer command queue is set by the
++ * configTIMER_QUEUE_LENGTH configuration constant.
++ *
++ * xTimerStop() stops a timer that was previously started using either of the
++ * The xTimerStart(), xTimerReset(), xTimerStartFromISR(), xTimerResetFromISR(),
++ * xTimerChangePeriod() or xTimerChangePeriodFromISR() API functions.
++ *
++ * Stopping a timer ensures the timer is not in the active state.
++ *
++ * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStop()
++ * to be available.
++ *
++ * @param xTimer The handle of the timer being stopped.
++ *
++ * @param xTicksToWait Specifies the time, in ticks, that the calling task should
++ * be held in the Blocked state to wait for the stop command to be successfully
++ * sent to the timer command queue, should the queue already be full when
++ * xTimerStop() was called.  xTicksToWait is ignored if xTimerStop() is called
++ * before the scheduler is started.
++ *
++ * @return pdFAIL will be returned if the stop command could not be sent to
++ * the timer command queue even after xTicksToWait ticks had passed.  pdPASS will
++ * be returned if the command was successfully sent to the timer command queue.
++ * When the command is actually processed will depend on the priority of the
++ * timer service/daemon task relative to other tasks in the system.  The timer
++ * service/daemon task priority is set by the configTIMER_TASK_PRIORITY
++ * configuration constant.
++ *
++ * Example usage:
++ *
++ * See the xTimerCreate() API function example usage scenario.
++ *
++ */
++#define xTimerStop( xTimer, xTicksToWait ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP, 0U, NULL, ( xTicksToWait ) )
++
++/**
++ * BaseType_t xTimerChangePeriod(   TimerHandle_t xTimer,
++ *                                  TickType_t xNewPeriod,
++ *                                  TickType_t xTicksToWait );
++ *
++ * Timer functionality is provided by a timer service/daemon task.  Many of the
++ * public FreeRTOS timer API functions send commands to the timer service task
++ * through a queue called the timer command queue.  The timer command queue is
++ * private to the kernel itself and is not directly accessible to application
++ * code.  The length of the timer command queue is set by the
++ * configTIMER_QUEUE_LENGTH configuration constant.
++ *
++ * xTimerChangePeriod() changes the period of a timer that was previously
++ * created using the xTimerCreate() API function.
++ *
++ * xTimerChangePeriod() can be called to change the period of an active or
++ * dormant state timer.
++ *
++ * The configUSE_TIMERS configuration constant must be set to 1 for
++ * xTimerChangePeriod() to be available.
++ *
++ * @param xTimer The handle of the timer that is having its period changed.
++ *
++ * @param xNewPeriod The new period for xTimer. Timer periods are specified in
++ * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time
++ * that has been specified in milliseconds.  For example, if the timer must
++ * expire after 100 ticks, then xNewPeriod should be set to 100.  Alternatively,
++ * if the timer must expire after 500ms, then xNewPeriod can be set to
++ * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than
++ * or equal to 1000.
++ *
++ * @param xTicksToWait Specifies the time, in ticks, that the calling task should
++ * be held in the Blocked state to wait for the change period command to be
++ * successfully sent to the timer command queue, should the queue already be
++ * full when xTimerChangePeriod() was called.  xTicksToWait is ignored if
++ * xTimerChangePeriod() is called before the scheduler is started.
++ *
++ * @return pdFAIL will be returned if the change period command could not be
++ * sent to the timer command queue even after xTicksToWait ticks had passed.
++ * pdPASS will be returned if the command was successfully sent to the timer
++ * command queue.  When the command is actually processed will depend on the
++ * priority of the timer service/daemon task relative to other tasks in the
++ * system.  The timer service/daemon task priority is set by the
++ * configTIMER_TASK_PRIORITY configuration constant.
++ *
++ * Example usage:
++ * @verbatim
++ * // This function assumes xTimer has already been created.  If the timer
++ * // referenced by xTimer is already active when it is called, then the timer
++ * // is deleted.  If the timer referenced by xTimer is not active when it is
++ * // called, then the period of the timer is set to 500ms and the timer is
++ * // started.
++ * void vAFunction( TimerHandle_t xTimer )
++ * {
++ *     if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )"
++ *     {
++ *         // xTimer is already active - delete it.
++ *         xTimerDelete( xTimer );
++ *     }
++ *     else
++ *     {
++ *         // xTimer is not active, change its period to 500ms.  This will also
++ *         // cause the timer to start.  Block for a maximum of 100 ticks if the
++ *         // change period command cannot immediately be sent to the timer
++ *         // command queue.
++ *         if( xTimerChangePeriod( xTimer, 500 / portTICK_PERIOD_MS, 100 ) == pdPASS )
++ *         {
++ *             // The command was successfully sent.
++ *         }
++ *         else
++ *         {
++ *             // The command could not be sent, even after waiting for 100 ticks
++ *             // to pass.  Take appropriate action here.
++ *         }
++ *     }
++ * }
++ * @endverbatim
++ */
++#define xTimerChangePeriod( xTimer, xNewPeriod, xTicksToWait ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD, ( xNewPeriod ), NULL, ( xTicksToWait ) )
++
++/**
++ * BaseType_t xTimerDelete( TimerHandle_t xTimer, TickType_t xTicksToWait );
++ *
++ * Timer functionality is provided by a timer service/daemon task.  Many of the
++ * public FreeRTOS timer API functions send commands to the timer service task
++ * through a queue called the timer command queue.  The timer command queue is
++ * private to the kernel itself and is not directly accessible to application
++ * code.  The length of the timer command queue is set by the
++ * configTIMER_QUEUE_LENGTH configuration constant.
++ *
++ * xTimerDelete() deletes a timer that was previously created using the
++ * xTimerCreate() API function.
++ *
++ * The configUSE_TIMERS configuration constant must be set to 1 for
++ * xTimerDelete() to be available.
++ *
++ * @param xTimer The handle of the timer being deleted.
++ *
++ * @param xTicksToWait Specifies the time, in ticks, that the calling task should
++ * be held in the Blocked state to wait for the delete command to be
++ * successfully sent to the timer command queue, should the queue already be
++ * full when xTimerDelete() was called.  xTicksToWait is ignored if xTimerDelete()
++ * is called before the scheduler is started.
++ *
++ * @return pdFAIL will be returned if the delete command could not be sent to
++ * the timer command queue even after xTicksToWait ticks had passed.  pdPASS will
++ * be returned if the command was successfully sent to the timer command queue.
++ * When the command is actually processed will depend on the priority of the
++ * timer service/daemon task relative to other tasks in the system.  The timer
++ * service/daemon task priority is set by the configTIMER_TASK_PRIORITY
++ * configuration constant.
++ *
++ * Example usage:
++ *
++ * See the xTimerChangePeriod() API function example usage scenario.
++ */
++#define xTimerDelete( xTimer, xTicksToWait ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_DELETE, 0U, NULL, ( xTicksToWait ) )
++
++/**
++ * BaseType_t xTimerReset( TimerHandle_t xTimer, TickType_t xTicksToWait );
++ *
++ * Timer functionality is provided by a timer service/daemon task.  Many of the
++ * public FreeRTOS timer API functions send commands to the timer service task
++ * through a queue called the timer command queue.  The timer command queue is
++ * private to the kernel itself and is not directly accessible to application
++ * code.  The length of the timer command queue is set by the
++ * configTIMER_QUEUE_LENGTH configuration constant.
++ *
++ * xTimerReset() re-starts a timer that was previously created using the
++ * xTimerCreate() API function.  If the timer had already been started and was
++ * already in the active state, then xTimerReset() will cause the timer to
++ * re-evaluate its expiry time so that it is relative to when xTimerReset() was
++ * called.  If the timer was in the dormant state then xTimerReset() has
++ * equivalent functionality to the xTimerStart() API function.
++ *
++ * Resetting a timer ensures the timer is in the active state.  If the timer
++ * is not stopped, deleted, or reset in the mean time, the callback function
++ * associated with the timer will get called 'n' ticks after xTimerReset() was
++ * called, where 'n' is the timers defined period.
++ *
++ * It is valid to call xTimerReset() before the scheduler has been started, but
++ * when this is done the timer will not actually start until the scheduler is
++ * started, and the timers expiry time will be relative to when the scheduler is
++ * started, not relative to when xTimerReset() was called.
++ *
++ * The configUSE_TIMERS configuration constant must be set to 1 for xTimerReset()
++ * to be available.
++ *
++ * @param xTimer The handle of the timer being reset/started/restarted.
++ *
++ * @param xTicksToWait Specifies the time, in ticks, that the calling task should
++ * be held in the Blocked state to wait for the reset command to be successfully
++ * sent to the timer command queue, should the queue already be full when
++ * xTimerReset() was called.  xTicksToWait is ignored if xTimerReset() is called
++ * before the scheduler is started.
++ *
++ * @return pdFAIL will be returned if the reset command could not be sent to
++ * the timer command queue even after xTicksToWait ticks had passed.  pdPASS will
++ * be returned if the command was successfully sent to the timer command queue.
++ * When the command is actually processed will depend on the priority of the
++ * timer service/daemon task relative to other tasks in the system, although the
++ * timers expiry time is relative to when xTimerStart() is actually called.  The
++ * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY
++ * configuration constant.
++ *
++ * Example usage:
++ * @verbatim
++ * // When a key is pressed, an LCD back-light is switched on.  If 5 seconds pass
++ * // without a key being pressed, then the LCD back-light is switched off.  In
++ * // this case, the timer is a one-shot timer.
++ *
++ * TimerHandle_t xBacklightTimer = NULL;
++ *
++ * // The callback function assigned to the one-shot timer.  In this case the
++ * // parameter is not used.
++ * void vBacklightTimerCallback( TimerHandle_t pxTimer )
++ * {
++ *     // The timer expired, therefore 5 seconds must have passed since a key
++ *     // was pressed.  Switch off the LCD back-light.
++ *     vSetBacklightState( BACKLIGHT_OFF );
++ * }
++ *
++ * // The key press event handler.
++ * void vKeyPressEventHandler( char cKey )
++ * {
++ *     // Ensure the LCD back-light is on, then reset the timer that is
++ *     // responsible for turning the back-light off after 5 seconds of
++ *     // key inactivity.  Wait 10 ticks for the command to be successfully sent
++ *     // if it cannot be sent immediately.
++ *     vSetBacklightState( BACKLIGHT_ON );
++ *     if( xTimerReset( xBacklightTimer, 100 ) != pdPASS )
++ *     {
++ *         // The reset command was not executed successfully.  Take appropriate
++ *         // action here.
++ *     }
++ *
++ *     // Perform the rest of the key processing here.
++ * }
++ *
++ * void main( void )
++ * {
++ * int32_t x;
++ *
++ *     // Create then start the one-shot timer that is responsible for turning
++ *     // the back-light off if no keys are pressed within a 5 second period.
++ *     xBacklightTimer = xTimerCreate( "BacklightTimer",           // Just a text name, not used by the kernel.
++ *                                     ( 5000 / portTICK_PERIOD_MS), // The timer period in ticks.
++ *                                     pdFALSE,                    // The timer is a one-shot timer.
++ *                                     0,                          // The id is not used by the callback so can take any value.
++ *                                     vBacklightTimerCallback     // The callback function that switches the LCD back-light off.
++ *                                   );
++ *
++ *     if( xBacklightTimer == NULL )
++ *     {
++ *         // The timer was not created.
++ *     }
++ *     else
++ *     {
++ *         // Start the timer.  No block time is specified, and even if one was
++ *         // it would be ignored because the scheduler has not yet been
++ *         // started.
++ *         if( xTimerStart( xBacklightTimer, 0 ) != pdPASS )
++ *         {
++ *             // The timer could not be set into the Active state.
++ *         }
++ *     }
++ *
++ *     // ...
++ *     // Create tasks here.
++ *     // ...
++ *
++ *     // Starting the scheduler will start the timer running as it has already
++ *     // been set into the active state.
++ *     vTaskStartScheduler();
++ *
++ *     // Should not reach here.
++ *     for( ;; );
++ * }
++ * @endverbatim
++ */
++#define xTimerReset( xTimer, xTicksToWait ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) )
++
++/**
++ * BaseType_t xTimerStartFromISR(   TimerHandle_t xTimer,
++ *                                  BaseType_t *pxHigherPriorityTaskWoken );
++ *
++ * A version of xTimerStart() that can be called from an interrupt service
++ * routine.
++ *
++ * @param xTimer The handle of the timer being started/restarted.
++ *
++ * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most
++ * of its time in the Blocked state, waiting for messages to arrive on the timer
++ * command queue.  Calling xTimerStartFromISR() writes a message to the timer
++ * command queue, so has the potential to transition the timer service/daemon
++ * task out of the Blocked state.  If calling xTimerStartFromISR() causes the
++ * timer service/daemon task to leave the Blocked state, and the timer service/
++ * daemon task has a priority equal to or greater than the currently executing
++ * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will
++ * get set to pdTRUE internally within the xTimerStartFromISR() function.  If
++ * xTimerStartFromISR() sets this value to pdTRUE then a context switch should
++ * be performed before the interrupt exits.
++ *
++ * @return pdFAIL will be returned if the start command could not be sent to
++ * the timer command queue.  pdPASS will be returned if the command was
++ * successfully sent to the timer command queue.  When the command is actually
++ * processed will depend on the priority of the timer service/daemon task
++ * relative to other tasks in the system, although the timers expiry time is
++ * relative to when xTimerStartFromISR() is actually called.  The timer
++ * service/daemon task priority is set by the configTIMER_TASK_PRIORITY
++ * configuration constant.
++ *
++ * Example usage:
++ * @verbatim
++ * // This scenario assumes xBacklightTimer has already been created.  When a
++ * // key is pressed, an LCD back-light is switched on.  If 5 seconds pass
++ * // without a key being pressed, then the LCD back-light is switched off.  In
++ * // this case, the timer is a one-shot timer, and unlike the example given for
++ * // the xTimerReset() function, the key press event handler is an interrupt
++ * // service routine.
++ *
++ * // The callback function assigned to the one-shot timer.  In this case the
++ * // parameter is not used.
++ * void vBacklightTimerCallback( TimerHandle_t pxTimer )
++ * {
++ *     // The timer expired, therefore 5 seconds must have passed since a key
++ *     // was pressed.  Switch off the LCD back-light.
++ *     vSetBacklightState( BACKLIGHT_OFF );
++ * }
++ *
++ * // The key press interrupt service routine.
++ * void vKeyPressEventInterruptHandler( void )
++ * {
++ * BaseType_t xHigherPriorityTaskWoken = pdFALSE;
++ *
++ *     // Ensure the LCD back-light is on, then restart the timer that is
++ *     // responsible for turning the back-light off after 5 seconds of
++ *     // key inactivity.  This is an interrupt service routine so can only
++ *     // call FreeRTOS API functions that end in "FromISR".
++ *     vSetBacklightState( BACKLIGHT_ON );
++ *
++ *     // xTimerStartFromISR() or xTimerResetFromISR() could be called here
++ *     // as both cause the timer to re-calculate its expiry time.
++ *     // xHigherPriorityTaskWoken was initialised to pdFALSE when it was
++ *     // declared (in this function).
++ *     if( xTimerStartFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS )
++ *     {
++ *         // The start command was not executed successfully.  Take appropriate
++ *         // action here.
++ *     }
++ *
++ *     // Perform the rest of the key processing here.
++ *
++ *     // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch
++ *     // should be performed.  The syntax required to perform a context switch
++ *     // from inside an ISR varies from port to port, and from compiler to
++ *     // compiler.  Inspect the demos for the port you are using to find the
++ *     // actual syntax required.
++ *     if( xHigherPriorityTaskWoken != pdFALSE )
++ *     {
++ *         // Call the interrupt safe yield function here (actual function
++ *         // depends on the FreeRTOS port being used).
++ *     }
++ * }
++ * @endverbatim
++ */
++#define xTimerStartFromISR( xTimer, pxHigherPriorityTaskWoken ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U )
++
++/**
++ * BaseType_t xTimerStopFromISR(    TimerHandle_t xTimer,
++ *                                  BaseType_t *pxHigherPriorityTaskWoken );
++ *
++ * A version of xTimerStop() that can be called from an interrupt service
++ * routine.
++ *
++ * @param xTimer The handle of the timer being stopped.
++ *
++ * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most
++ * of its time in the Blocked state, waiting for messages to arrive on the timer
++ * command queue.  Calling xTimerStopFromISR() writes a message to the timer
++ * command queue, so has the potential to transition the timer service/daemon
++ * task out of the Blocked state.  If calling xTimerStopFromISR() causes the
++ * timer service/daemon task to leave the Blocked state, and the timer service/
++ * daemon task has a priority equal to or greater than the currently executing
++ * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will
++ * get set to pdTRUE internally within the xTimerStopFromISR() function.  If
++ * xTimerStopFromISR() sets this value to pdTRUE then a context switch should
++ * be performed before the interrupt exits.
++ *
++ * @return pdFAIL will be returned if the stop command could not be sent to
++ * the timer command queue.  pdPASS will be returned if the command was
++ * successfully sent to the timer command queue.  When the command is actually
++ * processed will depend on the priority of the timer service/daemon task
++ * relative to other tasks in the system.  The timer service/daemon task
++ * priority is set by the configTIMER_TASK_PRIORITY configuration constant.
++ *
++ * Example usage:
++ * @verbatim
++ * // This scenario assumes xTimer has already been created and started.  When
++ * // an interrupt occurs, the timer should be simply stopped.
++ *
++ * // The interrupt service routine that stops the timer.
++ * void vAnExampleInterruptServiceRoutine( void )
++ * {
++ * BaseType_t xHigherPriorityTaskWoken = pdFALSE;
++ *
++ *     // The interrupt has occurred - simply stop the timer.
++ *     // xHigherPriorityTaskWoken was set to pdFALSE where it was defined
++ *     // (within this function).  As this is an interrupt service routine, only
++ *     // FreeRTOS API functions that end in "FromISR" can be used.
++ *     if( xTimerStopFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS )
++ *     {
++ *         // The stop command was not executed successfully.  Take appropriate
++ *         // action here.
++ *     }
++ *
++ *     // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch
++ *     // should be performed.  The syntax required to perform a context switch
++ *     // from inside an ISR varies from port to port, and from compiler to
++ *     // compiler.  Inspect the demos for the port you are using to find the
++ *     // actual syntax required.
++ *     if( xHigherPriorityTaskWoken != pdFALSE )
++ *     {
++ *         // Call the interrupt safe yield function here (actual function
++ *         // depends on the FreeRTOS port being used).
++ *     }
++ * }
++ * @endverbatim
++ */
++#define xTimerStopFromISR( xTimer, pxHigherPriorityTaskWoken ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP_FROM_ISR, 0, ( pxHigherPriorityTaskWoken ), 0U )
++
++/**
++ * BaseType_t xTimerChangePeriodFromISR( TimerHandle_t xTimer,
++ *                                       TickType_t xNewPeriod,
++ *                                       BaseType_t *pxHigherPriorityTaskWoken );
++ *
++ * A version of xTimerChangePeriod() that can be called from an interrupt
++ * service routine.
++ *
++ * @param xTimer The handle of the timer that is having its period changed.
++ *
++ * @param xNewPeriod The new period for xTimer. Timer periods are specified in
++ * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time
++ * that has been specified in milliseconds.  For example, if the timer must
++ * expire after 100 ticks, then xNewPeriod should be set to 100.  Alternatively,
++ * if the timer must expire after 500ms, then xNewPeriod can be set to
++ * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than
++ * or equal to 1000.
++ *
++ * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most
++ * of its time in the Blocked state, waiting for messages to arrive on the timer
++ * command queue.  Calling xTimerChangePeriodFromISR() writes a message to the
++ * timer command queue, so has the potential to transition the timer service/
++ * daemon task out of the Blocked state.  If calling xTimerChangePeriodFromISR()
++ * causes the timer service/daemon task to leave the Blocked state, and the
++ * timer service/daemon task has a priority equal to or greater than the
++ * currently executing task (the task that was interrupted), then
++ * *pxHigherPriorityTaskWoken will get set to pdTRUE internally within the
++ * xTimerChangePeriodFromISR() function.  If xTimerChangePeriodFromISR() sets
++ * this value to pdTRUE then a context switch should be performed before the
++ * interrupt exits.
++ *
++ * @return pdFAIL will be returned if the command to change the timers period
++ * could not be sent to the timer command queue.  pdPASS will be returned if the
++ * command was successfully sent to the timer command queue.  When the command
++ * is actually processed will depend on the priority of the timer service/daemon
++ * task relative to other tasks in the system.  The timer service/daemon task
++ * priority is set by the configTIMER_TASK_PRIORITY configuration constant.
++ *
++ * Example usage:
++ * @verbatim
++ * // This scenario assumes xTimer has already been created and started.  When
++ * // an interrupt occurs, the period of xTimer should be changed to 500ms.
++ *
++ * // The interrupt service routine that changes the period of xTimer.
++ * void vAnExampleInterruptServiceRoutine( void )
++ * {
++ * BaseType_t xHigherPriorityTaskWoken = pdFALSE;
++ *
++ *     // The interrupt has occurred - change the period of xTimer to 500ms.
++ *     // xHigherPriorityTaskWoken was set to pdFALSE where it was defined
++ *     // (within this function).  As this is an interrupt service routine, only
++ *     // FreeRTOS API functions that end in "FromISR" can be used.
++ *     if( xTimerChangePeriodFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS )
++ *     {
++ *         // The command to change the timers period was not executed
++ *         // successfully.  Take appropriate action here.
++ *     }
++ *
++ *     // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch
++ *     // should be performed.  The syntax required to perform a context switch
++ *     // from inside an ISR varies from port to port, and from compiler to
++ *     // compiler.  Inspect the demos for the port you are using to find the
++ *     // actual syntax required.
++ *     if( xHigherPriorityTaskWoken != pdFALSE )
++ *     {
++ *         // Call the interrupt safe yield function here (actual function
++ *         // depends on the FreeRTOS port being used).
++ *     }
++ * }
++ * @endverbatim
++ */
++#define xTimerChangePeriodFromISR( xTimer, xNewPeriod, pxHigherPriorityTaskWoken ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD_FROM_ISR, ( xNewPeriod ), ( pxHigherPriorityTaskWoken ), 0U )
++
++/**
++ * BaseType_t xTimerResetFromISR(   TimerHandle_t xTimer,
++ *                                  BaseType_t *pxHigherPriorityTaskWoken );
++ *
++ * A version of xTimerReset() that can be called from an interrupt service
++ * routine.
++ *
++ * @param xTimer The handle of the timer that is to be started, reset, or
++ * restarted.
++ *
++ * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most
++ * of its time in the Blocked state, waiting for messages to arrive on the timer
++ * command queue.  Calling xTimerResetFromISR() writes a message to the timer
++ * command queue, so has the potential to transition the timer service/daemon
++ * task out of the Blocked state.  If calling xTimerResetFromISR() causes the
++ * timer service/daemon task to leave the Blocked state, and the timer service/
++ * daemon task has a priority equal to or greater than the currently executing
++ * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will
++ * get set to pdTRUE internally within the xTimerResetFromISR() function.  If
++ * xTimerResetFromISR() sets this value to pdTRUE then a context switch should
++ * be performed before the interrupt exits.
++ *
++ * @return pdFAIL will be returned if the reset command could not be sent to
++ * the timer command queue.  pdPASS will be returned if the command was
++ * successfully sent to the timer command queue.  When the command is actually
++ * processed will depend on the priority of the timer service/daemon task
++ * relative to other tasks in the system, although the timers expiry time is
++ * relative to when xTimerResetFromISR() is actually called.  The timer service/daemon
++ * task priority is set by the configTIMER_TASK_PRIORITY configuration constant.
++ *
++ * Example usage:
++ * @verbatim
++ * // This scenario assumes xBacklightTimer has already been created.  When a
++ * // key is pressed, an LCD back-light is switched on.  If 5 seconds pass
++ * // without a key being pressed, then the LCD back-light is switched off.  In
++ * // this case, the timer is a one-shot timer, and unlike the example given for
++ * // the xTimerReset() function, the key press event handler is an interrupt
++ * // service routine.
++ *
++ * // The callback function assigned to the one-shot timer.  In this case the
++ * // parameter is not used.
++ * void vBacklightTimerCallback( TimerHandle_t pxTimer )
++ * {
++ *     // The timer expired, therefore 5 seconds must have passed since a key
++ *     // was pressed.  Switch off the LCD back-light.
++ *     vSetBacklightState( BACKLIGHT_OFF );
++ * }
++ *
++ * // The key press interrupt service routine.
++ * void vKeyPressEventInterruptHandler( void )
++ * {
++ * BaseType_t xHigherPriorityTaskWoken = pdFALSE;
++ *
++ *     // Ensure the LCD back-light is on, then reset the timer that is
++ *     // responsible for turning the back-light off after 5 seconds of
++ *     // key inactivity.  This is an interrupt service routine so can only
++ *     // call FreeRTOS API functions that end in "FromISR".
++ *     vSetBacklightState( BACKLIGHT_ON );
++ *
++ *     // xTimerStartFromISR() or xTimerResetFromISR() could be called here
++ *     // as both cause the timer to re-calculate its expiry time.
++ *     // xHigherPriorityTaskWoken was initialised to pdFALSE when it was
++ *     // declared (in this function).
++ *     if( xTimerResetFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS )
++ *     {
++ *         // The reset command was not executed successfully.  Take appropriate
++ *         // action here.
++ *     }
++ *
++ *     // Perform the rest of the key processing here.
++ *
++ *     // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch
++ *     // should be performed.  The syntax required to perform a context switch
++ *     // from inside an ISR varies from port to port, and from compiler to
++ *     // compiler.  Inspect the demos for the port you are using to find the
++ *     // actual syntax required.
++ *     if( xHigherPriorityTaskWoken != pdFALSE )
++ *     {
++ *         // Call the interrupt safe yield function here (actual function
++ *         // depends on the FreeRTOS port being used).
++ *     }
++ * }
++ * @endverbatim
++ */
++#define xTimerResetFromISR( xTimer, pxHigherPriorityTaskWoken ) \
++    xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U )
++
++/**
++ * const char * const pcTimerGetName( TimerHandle_t xTimer );
++ *
++ * Returns the name that was assigned to a timer when the timer was created.
++ *
++ * @param xTimer The handle of the timer being queried.
++ *
++ * @return The name assigned to the timer specified by the xTimer parameter.
++ */
++const char * pcTimerGetName( TimerHandle_t xTimer );
++
++/**
++ * void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload );
++ *
++ * Updates a timer to be either an auto-reload timer, in which case the timer
++ * automatically resets itself each time it expires, or a one-shot timer, in
++ * which case the timer will only expire once unless it is manually restarted.
++ *
++ * @param xTimer The handle of the timer being updated.
++ *
++ * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will
++ * expire repeatedly with a frequency set by the timer's period (see the
++ * xTimerPeriodInTicks parameter of the xTimerCreate() API function).  If
++ * uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and
++ * enter the dormant state after it expires.
++ */
++void vTimerSetReloadMode( TimerHandle_t xTimer,
++                          const UBaseType_t uxAutoReload );
++
++/**
++ * UBaseType_t uxTimerGetReloadMode( TimerHandle_t xTimer );
++ *
++ * Queries a timer to determine if it is an auto-reload timer, in which case the timer
++ * automatically resets itself each time it expires, or a one-shot timer, in
++ * which case the timer will only expire once unless it is manually restarted.
++ *
++ * @param xTimer The handle of the timer being queried.
++ *
++ * @return If the timer is an auto-reload timer then pdTRUE is returned, otherwise
++ * pdFALSE is returned.
++ */
++UBaseType_t uxTimerGetReloadMode( TimerHandle_t xTimer );
++
++/**
++ * TickType_t xTimerGetPeriod( TimerHandle_t xTimer );
++ *
++ * Returns the period of a timer.
++ *
++ * @param xTimer The handle of the timer being queried.
++ *
++ * @return The period of the timer in ticks.
++ */
++TickType_t xTimerGetPeriod( TimerHandle_t xTimer );
++
++/**
++ * TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer );
++ *
++ * Returns the time in ticks at which the timer will expire.  If this is less
++ * than the current tick count then the expiry time has overflowed from the
++ * current time.
++ *
++ * @param xTimer The handle of the timer being queried.
++ *
++ * @return If the timer is running then the time in ticks at which the timer
++ * will next expire is returned.  If the timer is not running then the return
++ * value is undefined.
++ */
++TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer );
++
++/*
++ * Functions beyond this part are not part of the public API and are intended
++ * for use by the kernel only.
++ */
++BaseType_t xTimerGenericCommand( TimerHandle_t xTimer,
++                                 const BaseType_t xCommandID,
++                                 const TickType_t xOptionalValue,
++                                 BaseType_t * const pxHigherPriorityTaskWoken,
++                                 const TickType_t xTicksToWait );
++
++/* *INDENT-OFF* */
++#ifdef __cplusplus
++    }
++#endif
++/* *INDENT-ON* */
++#endif /* TIMERS_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/list.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/list.c
+new file mode 100644
+index 0000000000..5eec523162
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/list.c
+@@ -0,0 +1,213 @@
++/*
++ * FreeRTOS Kernel V10.4.3
++ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++
++#include <stdlib.h>
++
++/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
++ * all the API functions to use the MPU wrappers.  That should only be done when
++ * task.h is included from an application file. */
++#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
++
++#include "FreeRTOS.h"
++#include "list.h"
++
++/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
++ * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be
++ * defined for the header files above, but not in this file, in order to
++ * generate the correct privileged Vs unprivileged linkage and placement. */
++#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
++
++/*-----------------------------------------------------------
++* PUBLIC LIST API documented in list.h
++*----------------------------------------------------------*/
++
++void vListInitialise( List_t * const pxList )
++{
++    /* The list structure contains a list item which is used to mark the
++     * end of the list.  To initialise the list the list end is inserted
++     * as the only list entry. */
++    pxList->pxIndex = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM.  This is checked and valid. */
++
++    /* The list end value is the highest possible value in the list to
++     * ensure it remains at the end of the list. */
++    pxList->xListEnd.xItemValue = portMAX_DELAY;
++
++    /* The list end next and previous pointers point to itself so we know
++     * when the list is empty. */
++    pxList->xListEnd.pxNext = ( ListItem_t * ) &( pxList->xListEnd );     /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM.  This is checked and valid. */
++    pxList->xListEnd.pxPrevious = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM.  This is checked and valid. */
++
++    pxList->uxNumberOfItems = ( UBaseType_t ) 0U;
++
++    /* Write known values into the list if
++     * configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
++    listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList );
++    listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList );
++}
++/*-----------------------------------------------------------*/
++
++void vListInitialiseItem( ListItem_t * const pxItem )
++{
++    /* Make sure the list item is not recorded as being on a list. */
++    pxItem->pxContainer = NULL;
++
++    /* Write known values into the list item if
++     * configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
++    listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem );
++    listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem );
++}
++/*-----------------------------------------------------------*/
++
++void vListInsertEnd( List_t * const pxList,
++                     ListItem_t * const pxNewListItem )
++{
++    ListItem_t * const pxIndex = pxList->pxIndex;
++
++    /* Only effective when configASSERT() is also defined, these tests may catch
++     * the list data structures being overwritten in memory.  They will not catch
++     * data errors caused by incorrect configuration or use of FreeRTOS. */
++    listTEST_LIST_INTEGRITY( pxList );
++    listTEST_LIST_ITEM_INTEGRITY( pxNewListItem );
++
++    /* Insert a new list item into pxList, but rather than sort the list,
++     * makes the new list item the last item to be removed by a call to
++     * listGET_OWNER_OF_NEXT_ENTRY(). */
++    pxNewListItem->pxNext = pxIndex;
++    pxNewListItem->pxPrevious = pxIndex->pxPrevious;
++
++    /* Only used during decision coverage testing. */
++    mtCOVERAGE_TEST_DELAY();
++
++    pxIndex->pxPrevious->pxNext = pxNewListItem;
++    pxIndex->pxPrevious = pxNewListItem;
++
++    /* Remember which list the item is in. */
++    pxNewListItem->pxContainer = pxList;
++
++    ( pxList->uxNumberOfItems )++;
++}
++/*-----------------------------------------------------------*/
++
++void vListInsert( List_t * const pxList,
++                  ListItem_t * const pxNewListItem )
++{
++    ListItem_t * pxIterator;
++    const TickType_t xValueOfInsertion = pxNewListItem->xItemValue;
++
++    /* Only effective when configASSERT() is also defined, these tests may catch
++     * the list data structures being overwritten in memory.  They will not catch
++     * data errors caused by incorrect configuration or use of FreeRTOS. */
++    listTEST_LIST_INTEGRITY( pxList );
++    listTEST_LIST_ITEM_INTEGRITY( pxNewListItem );
++
++    /* Insert the new list item into the list, sorted in xItemValue order.
++     *
++     * If the list already contains a list item with the same item value then the
++     * new list item should be placed after it.  This ensures that TCBs which are
++     * stored in ready lists (all of which have the same xItemValue value) get a
++     * share of the CPU.  However, if the xItemValue is the same as the back marker
++     * the iteration loop below will not end.  Therefore the value is checked
++     * first, and the algorithm slightly modified if necessary. */
++    if( xValueOfInsertion == portMAX_DELAY )
++    {
++        pxIterator = pxList->xListEnd.pxPrevious;
++    }
++    else
++    {
++        /* *** NOTE ***********************************************************
++        *  If you find your application is crashing here then likely causes are
++        *  listed below.  In addition see https://www.FreeRTOS.org/FAQHelp.html for
++        *  more tips, and ensure configASSERT() is defined!
++        *  https://www.FreeRTOS.org/a00110.html#configASSERT
++        *
++        *   1) Stack overflow -
++        *      see https://www.FreeRTOS.org/Stacks-and-stack-overflow-checking.html
++        *   2) Incorrect interrupt priority assignment, especially on Cortex-M
++        *      parts where numerically high priority values denote low actual
++        *      interrupt priorities, which can seem counter intuitive.  See
++        *      https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html and the definition
++        *      of configMAX_SYSCALL_INTERRUPT_PRIORITY on
++        *      https://www.FreeRTOS.org/a00110.html
++        *   3) Calling an API function from within a critical section or when
++        *      the scheduler is suspended, or calling an API function that does
++        *      not end in "FromISR" from an interrupt.
++        *   4) Using a queue or semaphore before it has been initialised or
++        *      before the scheduler has been started (are interrupts firing
++        *      before vTaskStartScheduler() has been called?).
++        *   5) If the FreeRTOS port supports interrupt nesting then ensure that
++        *      the priority of the tick interrupt is at or below
++        *      configMAX_SYSCALL_INTERRUPT_PRIORITY.
++        **********************************************************************/
++
++        for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM.  This is checked and valid. *//*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */
++        {
++            /* There is nothing to do here, just iterating to the wanted
++             * insertion position. */
++        }
++    }
++
++    pxNewListItem->pxNext = pxIterator->pxNext;
++    pxNewListItem->pxNext->pxPrevious = pxNewListItem;
++    pxNewListItem->pxPrevious = pxIterator;
++    pxIterator->pxNext = pxNewListItem;
++
++    /* Remember which list the item is in.  This allows fast removal of the
++     * item later. */
++    pxNewListItem->pxContainer = pxList;
++
++    ( pxList->uxNumberOfItems )++;
++}
++/*-----------------------------------------------------------*/
++
++UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
++{
++/* The list item knows which list it is in.  Obtain the list from the list
++ * item. */
++    List_t * const pxList = pxItemToRemove->pxContainer;
++
++    pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious;
++    pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext;
++
++    /* Only used during decision coverage testing. */
++    mtCOVERAGE_TEST_DELAY();
++
++    /* Make sure the index is left pointing to a valid item. */
++    if( pxList->pxIndex == pxItemToRemove )
++    {
++        pxList->pxIndex = pxItemToRemove->pxPrevious;
++    }
++    else
++    {
++        mtCOVERAGE_TEST_MARKER();
++    }
++
++    pxItemToRemove->pxContainer = NULL;
++    ( pxList->uxNumberOfItems )--;
++
++    return pxList->uxNumberOfItems;
++}
++/*-----------------------------------------------------------*/
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_1.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_1.c
+new file mode 100644
+index 0000000000..cebc240892
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_1.c
+@@ -0,0 +1,145 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++
++/*
++ * The simplest possible implementation of pvPortMalloc().  Note that this
++ * implementation does NOT allow allocated memory to be freed again.
++ *
++ * See heap_2.c, heap_3.c and heap_4.c for alternative implementations, and the
++ * memory management pages of https://www.FreeRTOS.org for more information.
++ */
++#include <stdlib.h>
++
++#include "FreeRTOS.h"
++#include "task.h"
++
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
++    #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
++#endif
++
++/* A few bytes might be lost to byte aligning the heap start address. */
++#define configADJUSTED_HEAP_SIZE    ( configTOTAL_HEAP_SIZE - portBYTE_ALIGNMENT )
++
++/* Allocate the memory for the heap. */
++#if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
++
++/* The application writer has already defined the array used for the RTOS
++* heap - probably so it can be placed in a special segment or address. */
++    extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
++#else
++    static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
++#endif /* configAPPLICATION_ALLOCATED_HEAP */
++
++/* Index into the ucHeap array. */
++static size_t xNextFreeByte = ( size_t ) 0;
++
++/*-----------------------------------------------------------*/
++
++void * pvPortMalloc( size_t xWantedSize )
++{
++    void * pvReturn = NULL;
++    static uint8_t * pucAlignedHeap = NULL;
++
++    /* Ensure that blocks are always aligned. */
++    #if ( portBYTE_ALIGNMENT != 1 )
++        {
++            if( xWantedSize & portBYTE_ALIGNMENT_MASK )
++            {
++                /* Byte alignment required. Check for overflow. */
++                if ( (xWantedSize + ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) )) > xWantedSize )
++                {
++                    xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
++                }
++                else
++                {
++                    xWantedSize = 0;
++                }
++            }
++        }
++    #endif
++
++    vTaskSuspendAll();
++    {
++        if( pucAlignedHeap == NULL )
++        {
++            /* Ensure the heap starts on a correctly aligned boundary. */
++            pucAlignedHeap = ( uint8_t * ) ( ( ( portPOINTER_SIZE_TYPE ) & ucHeap[ portBYTE_ALIGNMENT - 1 ] ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
++        }
++
++        /* Check there is enough room left for the allocation and. */
++        if( ( xWantedSize > 0 ) && /* valid size */
++            ( ( xNextFreeByte + xWantedSize ) < configADJUSTED_HEAP_SIZE ) &&
++            ( ( xNextFreeByte + xWantedSize ) > xNextFreeByte ) ) /* Check for overflow. */
++        {
++            /* Return the next free byte then increment the index past this
++             * block. */
++            pvReturn = pucAlignedHeap + xNextFreeByte;
++            xNextFreeByte += xWantedSize;
++        }
++
++    }
++    ( void ) xTaskResumeAll();
++
++    #if ( configUSE_MALLOC_FAILED_HOOK == 1 )
++        {
++            if( pvReturn == NULL )
++            {
++                extern void vApplicationMallocFailedHook( void );
++                vApplicationMallocFailedHook();
++            }
++        }
++    #endif
++
++    return pvReturn;
++}
++/*-----------------------------------------------------------*/
++
++void vPortFree( void * pv )
++{
++    /* Memory cannot be freed using this scheme.  See heap_2.c, heap_3.c and
++     * heap_4.c for alternative implementations, and the memory management pages of
++     * https://www.FreeRTOS.org for more information. */
++    ( void ) pv;
++
++    /* Force an assert as it is invalid to call this function. */
++    configASSERT( pv == NULL );
++}
++/*-----------------------------------------------------------*/
++
++void vPortInitialiseBlocks( void )
++{
++    /* Only required when static memory is not cleared. */
++    xNextFreeByte = ( size_t ) 0;
++}
++/*-----------------------------------------------------------*/
++
++size_t xPortGetFreeHeapSize( void )
++{
++    return( configADJUSTED_HEAP_SIZE - xNextFreeByte );
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_2.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_2.c
+new file mode 100644
+index 0000000000..00a68b26b4
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_2.c
+@@ -0,0 +1,277 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++/*
++ * A sample implementation of pvPortMalloc() and vPortFree() that permits
++ * allocated blocks to be freed, but does not combine adjacent free blocks
++ * into a single larger block (and so will fragment memory).  See heap_4.c for
++ * an equivalent that does combine adjacent blocks into single larger blocks.
++ *
++ * See heap_1.c, heap_3.c and heap_4.c for alternative implementations, and the
++ * memory management pages of https://www.FreeRTOS.org for more information.
++ */
++#include <stdlib.h>
++
++#include "FreeRTOS.h"
++#include "task.h"
++
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
++    #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
++#endif
++
++/* A few bytes might be lost to byte aligning the heap start address. */
++#define configADJUSTED_HEAP_SIZE    ( configTOTAL_HEAP_SIZE - portBYTE_ALIGNMENT )
++
++/*
++ * Initialises the heap structures before their first use.
++ */
++static void prvHeapInit( void );
++
++/* Allocate the memory for the heap. */
++#if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
++
++/* The application writer has already defined the array used for the RTOS
++* heap - probably so it can be placed in a special segment or address. */
++    extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
++#else
++    static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
++#endif /* configAPPLICATION_ALLOCATED_HEAP */
++
++
++/* Define the linked list structure.  This is used to link free blocks in order
++ * of their size. */
++typedef struct A_BLOCK_LINK
++{
++    struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */
++    size_t xBlockSize;                     /*<< The size of the free block. */
++} BlockLink_t;
++
++
++static const uint16_t heapSTRUCT_SIZE = ( ( sizeof( BlockLink_t ) + ( portBYTE_ALIGNMENT - 1 ) ) & ~portBYTE_ALIGNMENT_MASK );
++#define heapMINIMUM_BLOCK_SIZE    ( ( size_t ) ( heapSTRUCT_SIZE * 2 ) )
++
++/* Create a couple of list links to mark the start and end of the list. */
++static BlockLink_t xStart, xEnd;
++
++/* Keeps track of the number of free bytes remaining, but says nothing about
++ * fragmentation. */
++static size_t xFreeBytesRemaining = configADJUSTED_HEAP_SIZE;
++
++/* STATIC FUNCTIONS ARE DEFINED AS MACROS TO MINIMIZE THE FUNCTION CALL DEPTH. */
++
++/*
++ * Insert a block into the list of free blocks - which is ordered by size of
++ * the block.  Small blocks at the start of the list and large blocks at the end
++ * of the list.
++ */
++#define prvInsertBlockIntoFreeList( pxBlockToInsert )                                                                               \
++    {                                                                                                                               \
++        BlockLink_t * pxIterator;                                                                                                   \
++        size_t xBlockSize;                                                                                                          \
++                                                                                                                                    \
++        xBlockSize = pxBlockToInsert->xBlockSize;                                                                                   \
++                                                                                                                                    \
++        /* Iterate through the list until a block is found that has a larger size */                                                \
++        /* than the block we are inserting. */                                                                                      \
++        for( pxIterator = &xStart; pxIterator->pxNextFreeBlock->xBlockSize < xBlockSize; pxIterator = pxIterator->pxNextFreeBlock ) \
++        {                                                                                                                           \
++            /* There is nothing to do here - just iterate to the correct position. */                                               \
++        }                                                                                                                           \
++                                                                                                                                    \
++        /* Update the list to include the block being inserted in the correct */                                                    \
++        /* position. */                                                                                                             \
++        pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;                                                             \
++        pxIterator->pxNextFreeBlock = pxBlockToInsert;                                                                              \
++    }
++/*-----------------------------------------------------------*/
++
++void * pvPortMalloc( size_t xWantedSize )
++{
++    BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink;
++    static BaseType_t xHeapHasBeenInitialised = pdFALSE;
++    void * pvReturn = NULL;
++
++    vTaskSuspendAll();
++    {
++        /* If this is the first call to malloc then the heap will require
++         * initialisation to setup the list of free blocks. */
++        if( xHeapHasBeenInitialised == pdFALSE )
++        {
++            prvHeapInit();
++            xHeapHasBeenInitialised = pdTRUE;
++        }
++
++        /* The wanted size must be increased so it can contain a BlockLink_t
++         * structure in addition to the requested amount of bytes. */
++        if( ( xWantedSize > 0 ) &&
++            ( ( xWantedSize + heapSTRUCT_SIZE ) >  xWantedSize ) ) /* Overflow check */
++        {
++            xWantedSize += heapSTRUCT_SIZE;
++
++            /* Byte alignment required. Check for overflow. */
++            if( ( xWantedSize + ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ) )
++                    > xWantedSize )
++            {
++                xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
++                configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 );
++            }
++            else
++            {
++                xWantedSize = 0;
++            }
++        }
++        else
++        {
++            xWantedSize = 0;
++        }
++
++
++        if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
++        {
++            /* Blocks are stored in byte order - traverse the list from the start
++             * (smallest) block until one of adequate size is found. */
++            pxPreviousBlock = &xStart;
++            pxBlock = xStart.pxNextFreeBlock;
++
++            while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
++            {
++                pxPreviousBlock = pxBlock;
++                pxBlock = pxBlock->pxNextFreeBlock;
++            }
++
++            /* If we found the end marker then a block of adequate size was not found. */
++            if( pxBlock != &xEnd )
++            {
++                /* Return the memory space - jumping over the BlockLink_t structure
++                 * at its start. */
++                pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE );
++
++                /* This block is being returned for use so must be taken out of the
++                 * list of free blocks. */
++                pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
++
++                /* If the block is larger than required it can be split into two. */
++                if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
++                {
++                    /* This block is to be split into two.  Create a new block
++                     * following the number of bytes requested. The void cast is
++                     * used to prevent byte alignment warnings from the compiler. */
++                    pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
++
++                    /* Calculate the sizes of two blocks split from the single
++                     * block. */
++                    pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
++                    pxBlock->xBlockSize = xWantedSize;
++
++                    /* Insert the new block into the list of free blocks. */
++                    prvInsertBlockIntoFreeList( ( pxNewBlockLink ) );
++                }
++
++                xFreeBytesRemaining -= pxBlock->xBlockSize;
++            }
++        }
++
++    }
++    ( void ) xTaskResumeAll();
++
++    #if ( configUSE_MALLOC_FAILED_HOOK == 1 )
++        {
++            if( pvReturn == NULL )
++            {
++                extern void vApplicationMallocFailedHook( void );
++                vApplicationMallocFailedHook();
++            }
++        }
++    #endif
++
++    return pvReturn;
++}
++/*-----------------------------------------------------------*/
++
++void vPortFree( void * pv )
++{
++    uint8_t * puc = ( uint8_t * ) pv;
++    BlockLink_t * pxLink;
++
++    if( pv != NULL )
++    {
++        /* The memory being freed will have an BlockLink_t structure immediately
++         * before it. */
++        puc -= heapSTRUCT_SIZE;
++
++        /* This unexpected casting is to keep some compilers from issuing
++         * byte alignment warnings. */
++        pxLink = ( void * ) puc;
++
++        vTaskSuspendAll();
++        {
++            /* Add this block to the list of free blocks. */
++            prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
++            xFreeBytesRemaining += pxLink->xBlockSize;
++        }
++        ( void ) xTaskResumeAll();
++    }
++}
++/*-----------------------------------------------------------*/
++
++size_t xPortGetFreeHeapSize( void )
++{
++    return xFreeBytesRemaining;
++}
++/*-----------------------------------------------------------*/
++
++void vPortInitialiseBlocks( void )
++{
++    /* This just exists to keep the linker quiet. */
++}
++/*-----------------------------------------------------------*/
++
++static void prvHeapInit( void )
++{
++    BlockLink_t * pxFirstFreeBlock;
++    uint8_t * pucAlignedHeap;
++
++    /* Ensure the heap starts on a correctly aligned boundary. */
++    pucAlignedHeap = ( uint8_t * ) ( ( ( portPOINTER_SIZE_TYPE ) & ucHeap[ portBYTE_ALIGNMENT - 1 ] ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
++
++    /* xStart is used to hold a pointer to the first item in the list of free
++     * blocks.  The void cast is used to prevent compiler warnings. */
++    xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
++    xStart.xBlockSize = ( size_t ) 0;
++
++    /* xEnd is used to mark the end of the list of free blocks. */
++    xEnd.xBlockSize = configADJUSTED_HEAP_SIZE;
++    xEnd.pxNextFreeBlock = NULL;
++
++    /* To start with there is a single free block that is sized to take up the
++     * entire heap space. */
++    pxFirstFreeBlock = ( void * ) pucAlignedHeap;
++    pxFirstFreeBlock->xBlockSize = configADJUSTED_HEAP_SIZE;
++    pxFirstFreeBlock->pxNextFreeBlock = &xEnd;
++}
++/*-----------------------------------------------------------*/
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_3.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_3.c
+new file mode 100644
+index 0000000000..7cdd9bb18a
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_3.c
+@@ -0,0 +1,78 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++
++/*
++ * Implementation of pvPortMalloc() and vPortFree() that relies on the
++ * compilers own malloc() and free() implementations.
++ *
++ * This file can only be used if the linker is configured to to generate
++ * a heap memory area.
++ *
++ * See heap_1.c, heap_2.c and heap_4.c for alternative implementations, and the
++ * memory management pages of https://www.FreeRTOS.org for more information.
++ */
++
++#include <stdlib.h>
++
++#include "FreeRTOS.h"
++#include "task.h"
++
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
++    #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
++#endif
++
++/*-----------------------------------------------------------*/
++
++void * pvPortMalloc( size_t xWantedSize )
++{
++    void * pvReturn;
++
++    pvReturn = RT_KERNEL_MALLOC( xWantedSize );
++
++    #if ( configUSE_MALLOC_FAILED_HOOK == 1 )
++        {
++            if( pvReturn == NULL )
++            {
++                extern void vApplicationMallocFailedHook( void );
++                vApplicationMallocFailedHook();
++            }
++        }
++    #endif
++
++    return pvReturn;
++}
++/*-----------------------------------------------------------*/
++
++void vPortFree( void * pv )
++{
++    if( pv )
++    {
++        RT_KERNEL_FREE( pv );
++    }
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_4.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_4.c
+new file mode 100644
+index 0000000000..53536b7025
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_4.c
+@@ -0,0 +1,447 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++/*
++ * A sample implementation of pvPortMalloc() and vPortFree() that combines
++ * (coalescences) adjacent memory blocks as they are freed, and in so doing
++ * limits memory fragmentation.
++ *
++ * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the
++ * memory management pages of https://www.FreeRTOS.org for more information.
++ */
++#include <stdlib.h>
++
++#include "FreeRTOS.h"
++#include "task.h"
++
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
++    #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
++#endif
++
++/* Block sizes must not get too small. */
++#define heapMINIMUM_BLOCK_SIZE    ( ( size_t ) ( xHeapStructSize << 1 ) )
++
++/* Assumes 8bit bytes! */
++#define heapBITS_PER_BYTE         ( ( size_t ) 8 )
++
++/* Allocate the memory for the heap. */
++#if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
++
++/* The application writer has already defined the array used for the RTOS
++* heap - probably so it can be placed in a special segment or address. */
++    extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
++#else
++    static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
++#endif /* configAPPLICATION_ALLOCATED_HEAP */
++
++/* Define the linked list structure.  This is used to link free blocks in order
++ * of their memory address. */
++typedef struct A_BLOCK_LINK
++{
++    struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */
++    size_t xBlockSize;                     /*<< The size of the free block. */
++} BlockLink_t;
++
++/*-----------------------------------------------------------*/
++
++/*
++ * Inserts a block of memory that is being freed into the correct position in
++ * the list of free memory blocks.  The block being freed will be merged with
++ * the block in front it and/or the block behind it if the memory blocks are
++ * adjacent to each other.
++ */
++static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert );
++
++/*
++ * Called automatically to setup the required heap structures the first time
++ * pvPortMalloc() is called.
++ */
++static void prvHeapInit( void );
++
++/*-----------------------------------------------------------*/
++
++/* The size of the structure placed at the beginning of each allocated memory
++ * block must by correctly byte aligned. */
++static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
++
++/* Create a couple of list links to mark the start and end of the list. */
++static BlockLink_t xStart, * pxEnd = NULL;
++
++/* Keeps track of the number of calls to allocate and free memory as well as the
++ * number of free bytes remaining, but says nothing about fragmentation. */
++static size_t xFreeBytesRemaining = 0U;
++static size_t xMinimumEverFreeBytesRemaining = 0U;
++static size_t xNumberOfSuccessfulAllocations = 0;
++static size_t xNumberOfSuccessfulFrees = 0;
++
++/* Gets set to the top bit of an size_t type.  When this bit in the xBlockSize
++ * member of an BlockLink_t structure is set then the block belongs to the
++ * application.  When the bit is free the block is still part of the free heap
++ * space. */
++static size_t xBlockAllocatedBit = 0;
++
++/*-----------------------------------------------------------*/
++
++void * pvPortMalloc( size_t xWantedSize )
++{
++    BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink;
++    void * pvReturn = NULL;
++
++    vTaskSuspendAll();
++    {
++        /* If this is the first call to malloc then the heap will require
++         * initialisation to setup the list of free blocks. */
++        if( pxEnd == NULL )
++        {
++            prvHeapInit();
++        }
++
++        /* Check the requested block size is not so large that the top bit is
++         * set.  The top bit of the block size member of the BlockLink_t structure
++         * is used to determine who owns the block - the application or the
++         * kernel, so it must be free. */
++        if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
++        {
++            /* The wanted size must be increased so it can contain a BlockLink_t
++             * structure in addition to the requested amount of bytes. */
++            if( ( xWantedSize > 0 ) &&
++                ( ( xWantedSize + xHeapStructSize ) >  xWantedSize ) ) /* Overflow check */
++            {
++                xWantedSize += xHeapStructSize;
++
++                /* Ensure that blocks are always aligned. */
++                if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
++                {
++                    /* Byte alignment required. Check for overflow. */
++                    if( ( xWantedSize + ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ) )
++                            > xWantedSize )
++                    {
++                        xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
++                        configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 );
++                    }
++                    else
++                    {
++                        xWantedSize = 0;
++                    }
++                }
++            }
++            else
++            {
++                xWantedSize = 0;
++            }
++
++            if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
++            {
++                /* Traverse the list from the start (lowest address) block until
++                 * one of adequate size is found. */
++                pxPreviousBlock = &xStart;
++                pxBlock = xStart.pxNextFreeBlock;
++
++                while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
++                {
++                    pxPreviousBlock = pxBlock;
++                    pxBlock = pxBlock->pxNextFreeBlock;
++                }
++
++                /* If the end marker was reached then a block of adequate size
++                 * was not found. */
++                if( pxBlock != pxEnd )
++                {
++                    /* Return the memory space pointed to - jumping over the
++                     * BlockLink_t structure at its start. */
++                    pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
++
++                    /* This block is being returned for use so must be taken out
++                     * of the list of free blocks. */
++                    pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
++
++                    /* If the block is larger than required it can be split into
++                     * two. */
++                    if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
++                    {
++                        /* This block is to be split into two.  Create a new
++                         * block following the number of bytes requested. The void
++                         * cast is used to prevent byte alignment warnings from the
++                         * compiler. */
++                        pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
++                        configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
++
++                        /* Calculate the sizes of two blocks split from the
++                         * single block. */
++                        pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
++                        pxBlock->xBlockSize = xWantedSize;
++
++                        /* Insert the new block into the list of free blocks. */
++                        prvInsertBlockIntoFreeList( pxNewBlockLink );
++                    }
++
++                    xFreeBytesRemaining -= pxBlock->xBlockSize;
++
++                    if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
++                    {
++                        xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
++                    }
++
++                    /* The block is being returned - it is allocated and owned
++                     * by the application and has no "next" block. */
++                    pxBlock->xBlockSize |= xBlockAllocatedBit;
++                    pxBlock->pxNextFreeBlock = NULL;
++                    xNumberOfSuccessfulAllocations++;
++                }
++            }
++        }
++
++    }
++    ( void ) xTaskResumeAll();
++
++    #if ( configUSE_MALLOC_FAILED_HOOK == 1 )
++        {
++            if( pvReturn == NULL )
++            {
++                extern void vApplicationMallocFailedHook( void );
++                vApplicationMallocFailedHook();
++            }
++        }
++    #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */
++
++    configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
++    return pvReturn;
++}
++/*-----------------------------------------------------------*/
++
++void vPortFree( void * pv )
++{
++    uint8_t * puc = ( uint8_t * ) pv;
++    BlockLink_t * pxLink;
++
++    if( pv != NULL )
++    {
++        /* The memory being freed will have an BlockLink_t structure immediately
++         * before it. */
++        puc -= xHeapStructSize;
++
++        /* This casting is to keep the compiler from issuing warnings. */
++        pxLink = ( void * ) puc;
++
++        /* Check the block is actually allocated. */
++        configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
++        configASSERT( pxLink->pxNextFreeBlock == NULL );
++
++        if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
++        {
++            if( pxLink->pxNextFreeBlock == NULL )
++            {
++                /* The block is being returned to the heap - it is no longer
++                 * allocated. */
++                pxLink->xBlockSize &= ~xBlockAllocatedBit;
++
++                vTaskSuspendAll();
++                {
++                    /* Add this block to the list of free blocks. */
++                    xFreeBytesRemaining += pxLink->xBlockSize;
++                    prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
++                    xNumberOfSuccessfulFrees++;
++                }
++                ( void ) xTaskResumeAll();
++            }
++        }
++    }
++}
++/*-----------------------------------------------------------*/
++
++size_t xPortGetFreeHeapSize( void )
++{
++    return xFreeBytesRemaining;
++}
++/*-----------------------------------------------------------*/
++
++size_t xPortGetMinimumEverFreeHeapSize( void )
++{
++    return xMinimumEverFreeBytesRemaining;
++}
++/*-----------------------------------------------------------*/
++
++void vPortInitialiseBlocks( void )
++{
++    /* This just exists to keep the linker quiet. */
++}
++/*-----------------------------------------------------------*/
++
++static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */
++{
++    BlockLink_t * pxFirstFreeBlock;
++    uint8_t * pucAlignedHeap;
++    size_t uxAddress;
++    size_t xTotalHeapSize = configTOTAL_HEAP_SIZE;
++
++    /* Ensure the heap starts on a correctly aligned boundary. */
++    uxAddress = ( size_t ) ucHeap;
++
++    if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 )
++    {
++        uxAddress += ( portBYTE_ALIGNMENT - 1 );
++        uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
++        xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
++    }
++
++    pucAlignedHeap = ( uint8_t * ) uxAddress;
++
++    /* xStart is used to hold a pointer to the first item in the list of free
++     * blocks.  The void cast is used to prevent compiler warnings. */
++    xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
++    xStart.xBlockSize = ( size_t ) 0;
++
++    /* pxEnd is used to mark the end of the list of free blocks and is inserted
++     * at the end of the heap space. */
++    uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
++    uxAddress -= xHeapStructSize;
++    uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
++    pxEnd = ( void * ) uxAddress;
++    pxEnd->xBlockSize = 0;
++    pxEnd->pxNextFreeBlock = NULL;
++
++    /* To start with there is a single free block that is sized to take up the
++     * entire heap space, minus the space taken by pxEnd. */
++    pxFirstFreeBlock = ( void * ) pucAlignedHeap;
++    pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
++    pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
++
++    /* Only one block exists - and it covers the entire usable heap space. */
++    xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
++    xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
++
++    /* Work out the position of the top bit in a size_t variable. */
++    xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 );
++}
++/*-----------------------------------------------------------*/
++
++static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) /* PRIVILEGED_FUNCTION */
++{
++    BlockLink_t * pxIterator;
++    uint8_t * puc;
++
++    /* Iterate through the list until a block is found that has a higher address
++     * than the block being inserted. */
++    for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
++    {
++        /* Nothing to do here, just iterate to the right position. */
++    }
++
++    /* Do the block being inserted, and the block it is being inserted after
++     * make a contiguous block of memory? */
++    puc = ( uint8_t * ) pxIterator;
++
++    if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
++    {
++        pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
++        pxBlockToInsert = pxIterator;
++    }
++
++    /* Do the block being inserted, and the block it is being inserted before
++     * make a contiguous block of memory? */
++    puc = ( uint8_t * ) pxBlockToInsert;
++
++    if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
++    {
++        if( pxIterator->pxNextFreeBlock != pxEnd )
++        {
++            /* Form one big block from the two blocks. */
++            pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
++            pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
++        }
++        else
++        {
++            pxBlockToInsert->pxNextFreeBlock = pxEnd;
++        }
++    }
++    else
++    {
++        pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
++    }
++
++    /* If the block being inserted plugged a gab, so was merged with the block
++     * before and the block after, then it's pxNextFreeBlock pointer will have
++     * already been set, and should not be set here as that would make it point
++     * to itself. */
++    if( pxIterator != pxBlockToInsert )
++    {
++        pxIterator->pxNextFreeBlock = pxBlockToInsert;
++    }
++}
++/*-----------------------------------------------------------*/
++
++void vPortGetHeapStats( HeapStats_t * pxHeapStats )
++{
++    BlockLink_t * pxBlock;
++    size_t xBlocks = 0, xMaxSize = 0, xMinSize = portMAX_DELAY; /* portMAX_DELAY used as a portable way of getting the maximum value. */
++
++    vTaskSuspendAll();
++    {
++        pxBlock = xStart.pxNextFreeBlock;
++
++        /* pxBlock will be NULL if the heap has not been initialised.  The heap
++         * is initialised automatically when the first allocation is made. */
++        if( pxBlock != NULL )
++        {
++            do
++            {
++                /* Increment the number of blocks and record the largest block seen
++                 * so far. */
++                xBlocks++;
++
++                if( pxBlock->xBlockSize > xMaxSize )
++                {
++                    xMaxSize = pxBlock->xBlockSize;
++                }
++
++                if( pxBlock->xBlockSize < xMinSize )
++                {
++                    xMinSize = pxBlock->xBlockSize;
++                }
++
++                /* Move to the next block in the chain until the last block is
++                 * reached. */
++                pxBlock = pxBlock->pxNextFreeBlock;
++            } while( pxBlock != pxEnd );
++        }
++    }
++    ( void ) xTaskResumeAll();
++
++    pxHeapStats->xSizeOfLargestFreeBlockInBytes = xMaxSize;
++    pxHeapStats->xSizeOfSmallestFreeBlockInBytes = xMinSize;
++    pxHeapStats->xNumberOfFreeBlocks = xBlocks;
++
++    taskENTER_CRITICAL();
++    {
++        pxHeapStats->xAvailableHeapSpaceInBytes = xFreeBytesRemaining;
++        pxHeapStats->xNumberOfSuccessfulAllocations = xNumberOfSuccessfulAllocations;
++        pxHeapStats->xNumberOfSuccessfulFrees = xNumberOfSuccessfulFrees;
++        pxHeapStats->xMinimumEverFreeBytesRemaining = xMinimumEverFreeBytesRemaining;
++    }
++    taskEXIT_CRITICAL();
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_5.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_5.c
+new file mode 100644
+index 0000000000..7fadbd4b0f
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/MemMang/heap_5.c
+@@ -0,0 +1,506 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++/*
++ * A sample implementation of pvPortMalloc() that allows the heap to be defined
++ * across multiple non-contigous blocks and combines (coalescences) adjacent
++ * memory blocks as they are freed.
++ *
++ * See heap_1.c, heap_2.c, heap_3.c and heap_4.c for alternative
++ * implementations, and the memory management pages of https://www.FreeRTOS.org
++ * for more information.
++ *
++ * Usage notes:
++ *
++ * vPortDefineHeapRegions() ***must*** be called before pvPortMalloc().
++ * pvPortMalloc() will be called if any task objects (tasks, queues, event
++ * groups, etc.) are created, therefore vPortDefineHeapRegions() ***must*** be
++ * called before any other objects are defined.
++ *
++ * vPortDefineHeapRegions() takes a single parameter.  The parameter is an array
++ * of HeapRegion_t structures.  HeapRegion_t is defined in portable.h as
++ *
++ * typedef struct HeapRegion
++ * {
++ *  uint8_t *pucStartAddress; << Start address of a block of memory that will be part of the heap.
++ *  size_t xSizeInBytes;      << Size of the block of memory.
++ * } HeapRegion_t;
++ *
++ * The array is terminated using a NULL zero sized region definition, and the
++ * memory regions defined in the array ***must*** appear in address order from
++ * low address to high address.  So the following is a valid example of how
++ * to use the function.
++ *
++ * HeapRegion_t xHeapRegions[] =
++ * {
++ *  { ( uint8_t * ) 0x80000000UL, 0x10000 }, << Defines a block of 0x10000 bytes starting at address 0x80000000
++ *  { ( uint8_t * ) 0x90000000UL, 0xa0000 }, << Defines a block of 0xa0000 bytes starting at address of 0x90000000
++ *  { NULL, 0 }                << Terminates the array.
++ * };
++ *
++ * vPortDefineHeapRegions( xHeapRegions ); << Pass the array into vPortDefineHeapRegions().
++ *
++ * Note 0x80000000 is the lower address so appears in the array first.
++ *
++ */
++#include <stdlib.h>
++
++#include "FreeRTOS.h"
++#include "task.h"
++
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
++    #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
++#endif
++
++/* Block sizes must not get too small. */
++#define heapMINIMUM_BLOCK_SIZE    ( ( size_t ) ( xHeapStructSize << 1 ) )
++
++/* Assumes 8bit bytes! */
++#define heapBITS_PER_BYTE         ( ( size_t ) 8 )
++
++/* Define the linked list structure.  This is used to link free blocks in order
++ * of their memory address. */
++typedef struct A_BLOCK_LINK
++{
++    struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */
++    size_t xBlockSize;                     /*<< The size of the free block. */
++} BlockLink_t;
++
++/*-----------------------------------------------------------*/
++
++/*
++ * Inserts a block of memory that is being freed into the correct position in
++ * the list of free memory blocks.  The block being freed will be merged with
++ * the block in front it and/or the block behind it if the memory blocks are
++ * adjacent to each other.
++ */
++static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert );
++
++/*-----------------------------------------------------------*/
++
++/* The size of the structure placed at the beginning of each allocated memory
++ * block must by correctly byte aligned. */
++static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
++
++/* Create a couple of list links to mark the start and end of the list. */
++static BlockLink_t xStart, * pxEnd = NULL;
++
++/* Keeps track of the number of calls to allocate and free memory as well as the
++ * number of free bytes remaining, but says nothing about fragmentation. */
++static size_t xFreeBytesRemaining = 0U;
++static size_t xMinimumEverFreeBytesRemaining = 0U;
++static size_t xNumberOfSuccessfulAllocations = 0;
++static size_t xNumberOfSuccessfulFrees = 0;
++
++/* Gets set to the top bit of an size_t type.  When this bit in the xBlockSize
++ * member of an BlockLink_t structure is set then the block belongs to the
++ * application.  When the bit is free the block is still part of the free heap
++ * space. */
++static size_t xBlockAllocatedBit = 0;
++
++/*-----------------------------------------------------------*/
++
++void * pvPortMalloc( size_t xWantedSize )
++{
++    BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink;
++    void * pvReturn = NULL;
++
++    /* The heap must be initialised before the first call to
++     * prvPortMalloc(). */
++    configASSERT( pxEnd );
++
++    vTaskSuspendAll();
++    {
++        /* Check the requested block size is not so large that the top bit is
++         * set.  The top bit of the block size member of the BlockLink_t structure
++         * is used to determine who owns the block - the application or the
++         * kernel, so it must be free. */
++        if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
++        {
++            /* The wanted size is increased so it can contain a BlockLink_t
++             * structure in addition to the requested amount of bytes. */
++            if( ( xWantedSize > 0 ) &&
++                ( ( xWantedSize + xHeapStructSize ) >  xWantedSize ) ) /* Overflow check */
++            {
++                xWantedSize += xHeapStructSize;
++
++                /* Ensure that blocks are always aligned */
++                if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
++                {
++                    /* Byte alignment required. Check for overflow */
++                    if( ( xWantedSize + ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ) ) >
++                         xWantedSize )
++                    {
++                        xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
++                    }
++                    else
++                    {
++                        xWantedSize = 0;
++                    }
++                }
++            }
++            else
++            {
++                xWantedSize = 0;
++            }
++
++            if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
++            {
++                /* Traverse the list from the start (lowest address) block until
++                 * one of adequate size is found. */
++                pxPreviousBlock = &xStart;
++                pxBlock = xStart.pxNextFreeBlock;
++
++                while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
++                {
++                    pxPreviousBlock = pxBlock;
++                    pxBlock = pxBlock->pxNextFreeBlock;
++                }
++
++                /* If the end marker was reached then a block of adequate size
++                 * was not found. */
++                if( pxBlock != pxEnd )
++                {
++                    /* Return the memory space pointed to - jumping over the
++                     * BlockLink_t structure at its start. */
++                    pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
++
++                    /* This block is being returned for use so must be taken out
++                     * of the list of free blocks. */
++                    pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
++
++                    /* If the block is larger than required it can be split into
++                     * two. */
++                    if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
++                    {
++                        /* This block is to be split into two.  Create a new
++                         * block following the number of bytes requested. The void
++                         * cast is used to prevent byte alignment warnings from the
++                         * compiler. */
++                        pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
++
++                        /* Calculate the sizes of two blocks split from the
++                         * single block. */
++                        pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
++                        pxBlock->xBlockSize = xWantedSize;
++
++                        /* Insert the new block into the list of free blocks. */
++                        prvInsertBlockIntoFreeList( ( pxNewBlockLink ) );
++                    }
++
++                    xFreeBytesRemaining -= pxBlock->xBlockSize;
++
++                    if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
++                    {
++                        xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
++                    }
++
++                    /* The block is being returned - it is allocated and owned
++                     * by the application and has no "next" block. */
++                    pxBlock->xBlockSize |= xBlockAllocatedBit;
++                    pxBlock->pxNextFreeBlock = NULL;
++                    xNumberOfSuccessfulAllocations++;
++                }
++            }
++        }
++
++    }
++    ( void ) xTaskResumeAll();
++
++    #if ( configUSE_MALLOC_FAILED_HOOK == 1 )
++        {
++            if( pvReturn == NULL )
++            {
++                extern void vApplicationMallocFailedHook( void );
++                vApplicationMallocFailedHook();
++            }
++        }
++    #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */
++
++    return pvReturn;
++}
++/*-----------------------------------------------------------*/
++
++void vPortFree( void * pv )
++{
++    uint8_t * puc = ( uint8_t * ) pv;
++    BlockLink_t * pxLink;
++
++    if( pv != NULL )
++    {
++        /* The memory being freed will have an BlockLink_t structure immediately
++         * before it. */
++        puc -= xHeapStructSize;
++
++        /* This casting is to keep the compiler from issuing warnings. */
++        pxLink = ( void * ) puc;
++
++        /* Check the block is actually allocated. */
++        configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
++        configASSERT( pxLink->pxNextFreeBlock == NULL );
++
++        if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
++        {
++            if( pxLink->pxNextFreeBlock == NULL )
++            {
++                /* The block is being returned to the heap - it is no longer
++                 * allocated. */
++                pxLink->xBlockSize &= ~xBlockAllocatedBit;
++
++                vTaskSuspendAll();
++                {
++                    /* Add this block to the list of free blocks. */
++                    xFreeBytesRemaining += pxLink->xBlockSize;
++                    prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
++                    xNumberOfSuccessfulFrees++;
++                }
++                ( void ) xTaskResumeAll();
++            }
++        }
++    }
++}
++/*-----------------------------------------------------------*/
++
++size_t xPortGetFreeHeapSize( void )
++{
++    return xFreeBytesRemaining;
++}
++/*-----------------------------------------------------------*/
++
++size_t xPortGetMinimumEverFreeHeapSize( void )
++{
++    return xMinimumEverFreeBytesRemaining;
++}
++/*-----------------------------------------------------------*/
++
++static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert )
++{
++    BlockLink_t * pxIterator;
++    uint8_t * puc;
++
++    /* Iterate through the list until a block is found that has a higher address
++     * than the block being inserted. */
++    for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
++    {
++        /* Nothing to do here, just iterate to the right position. */
++    }
++
++    /* Do the block being inserted, and the block it is being inserted after
++     * make a contiguous block of memory? */
++    puc = ( uint8_t * ) pxIterator;
++
++    if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
++    {
++        pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
++        pxBlockToInsert = pxIterator;
++    }
++
++    /* Do the block being inserted, and the block it is being inserted before
++     * make a contiguous block of memory? */
++    puc = ( uint8_t * ) pxBlockToInsert;
++
++    if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
++    {
++        if( pxIterator->pxNextFreeBlock != pxEnd )
++        {
++            /* Form one big block from the two blocks. */
++            pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
++            pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
++        }
++        else
++        {
++            pxBlockToInsert->pxNextFreeBlock = pxEnd;
++        }
++    }
++    else
++    {
++        pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
++    }
++
++    /* If the block being inserted plugged a gab, so was merged with the block
++     * before and the block after, then it's pxNextFreeBlock pointer will have
++     * already been set, and should not be set here as that would make it point
++     * to itself. */
++    if( pxIterator != pxBlockToInsert )
++    {
++        pxIterator->pxNextFreeBlock = pxBlockToInsert;
++    }
++}
++/*-----------------------------------------------------------*/
++
++void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions )
++{
++    BlockLink_t * pxFirstFreeBlockInRegion = NULL, * pxPreviousFreeBlock;
++    size_t xAlignedHeap;
++    size_t xTotalRegionSize, xTotalHeapSize = 0;
++    BaseType_t xDefinedRegions = 0;
++    size_t xAddress;
++    const HeapRegion_t * pxHeapRegion;
++
++    /* Can only call once! */
++    configASSERT( pxEnd == NULL );
++
++    pxHeapRegion = &( pxHeapRegions[ xDefinedRegions ] );
++
++    while( pxHeapRegion->xSizeInBytes > 0 )
++    {
++        xTotalRegionSize = pxHeapRegion->xSizeInBytes;
++
++        /* Ensure the heap region starts on a correctly aligned boundary. */
++        xAddress = ( size_t ) pxHeapRegion->pucStartAddress;
++
++        if( ( xAddress & portBYTE_ALIGNMENT_MASK ) != 0 )
++        {
++            xAddress += ( portBYTE_ALIGNMENT - 1 );
++            xAddress &= ~portBYTE_ALIGNMENT_MASK;
++
++            /* Adjust the size for the bytes lost to alignment. */
++            xTotalRegionSize -= xAddress - ( size_t ) pxHeapRegion->pucStartAddress;
++        }
++
++        xAlignedHeap = xAddress;
++
++        /* Set xStart if it has not already been set. */
++        if( xDefinedRegions == 0 )
++        {
++            /* xStart is used to hold a pointer to the first item in the list of
++             *  free blocks.  The void cast is used to prevent compiler warnings. */
++            xStart.pxNextFreeBlock = ( BlockLink_t * ) xAlignedHeap;
++            xStart.xBlockSize = ( size_t ) 0;
++        }
++        else
++        {
++            /* Should only get here if one region has already been added to the
++             * heap. */
++            configASSERT( pxEnd != NULL );
++
++            /* Check blocks are passed in with increasing start addresses. */
++            configASSERT( xAddress > ( size_t ) pxEnd );
++        }
++
++        /* Remember the location of the end marker in the previous region, if
++         * any. */
++        pxPreviousFreeBlock = pxEnd;
++
++        /* pxEnd is used to mark the end of the list of free blocks and is
++         * inserted at the end of the region space. */
++        xAddress = xAlignedHeap + xTotalRegionSize;
++        xAddress -= xHeapStructSize;
++        xAddress &= ~portBYTE_ALIGNMENT_MASK;
++        pxEnd = ( BlockLink_t * ) xAddress;
++        pxEnd->xBlockSize = 0;
++        pxEnd->pxNextFreeBlock = NULL;
++
++        /* To start with there is a single free block in this region that is
++         * sized to take up the entire heap region minus the space taken by the
++         * free block structure. */
++        pxFirstFreeBlockInRegion = ( BlockLink_t * ) xAlignedHeap;
++        pxFirstFreeBlockInRegion->xBlockSize = xAddress - ( size_t ) pxFirstFreeBlockInRegion;
++        pxFirstFreeBlockInRegion->pxNextFreeBlock = pxEnd;
++
++        /* If this is not the first region that makes up the entire heap space
++         * then link the previous region to this region. */
++        if( pxPreviousFreeBlock != NULL )
++        {
++            pxPreviousFreeBlock->pxNextFreeBlock = pxFirstFreeBlockInRegion;
++        }
++
++        xTotalHeapSize += pxFirstFreeBlockInRegion->xBlockSize;
++
++        /* Move onto the next HeapRegion_t structure. */
++        xDefinedRegions++;
++        pxHeapRegion = &( pxHeapRegions[ xDefinedRegions ] );
++    }
++
++    xMinimumEverFreeBytesRemaining = xTotalHeapSize;
++    xFreeBytesRemaining = xTotalHeapSize;
++
++    /* Check something was actually defined before it is accessed. */
++    configASSERT( xTotalHeapSize );
++
++    /* Work out the position of the top bit in a size_t variable. */
++    xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 );
++}
++/*-----------------------------------------------------------*/
++
++void vPortGetHeapStats( HeapStats_t * pxHeapStats )
++{
++    BlockLink_t * pxBlock;
++    size_t xBlocks = 0, xMaxSize = 0, xMinSize = portMAX_DELAY; /* portMAX_DELAY used as a portable way of getting the maximum value. */
++
++    vTaskSuspendAll();
++    {
++        pxBlock = xStart.pxNextFreeBlock;
++
++        /* pxBlock will be NULL if the heap has not been initialised.  The heap
++         * is initialised automatically when the first allocation is made. */
++        if( pxBlock != NULL )
++        {
++            do
++            {
++                /* Increment the number of blocks and record the largest block seen
++                 * so far. */
++                xBlocks++;
++
++                if( pxBlock->xBlockSize > xMaxSize )
++                {
++                    xMaxSize = pxBlock->xBlockSize;
++                }
++
++                /* Heap five will have a zero sized block at the end of each
++                 * each region - the block is only used to link to the next
++                 * heap region so it not a real block. */
++                if( pxBlock->xBlockSize != 0 )
++                {
++                    if( pxBlock->xBlockSize < xMinSize )
++                    {
++                        xMinSize = pxBlock->xBlockSize;
++                    }
++                }
++
++                /* Move to the next block in the chain until the last block is
++                 * reached. */
++                pxBlock = pxBlock->pxNextFreeBlock;
++            } while( pxBlock != pxEnd );
++        }
++    }
++    ( void ) xTaskResumeAll();
++
++    pxHeapStats->xSizeOfLargestFreeBlockInBytes = xMaxSize;
++    pxHeapStats->xSizeOfSmallestFreeBlockInBytes = xMinSize;
++    pxHeapStats->xNumberOfFreeBlocks = xBlocks;
++
++    taskENTER_CRITICAL();
++    {
++        pxHeapStats->xAvailableHeapSpaceInBytes = xFreeBytesRemaining;
++        pxHeapStats->xNumberOfSuccessfulAllocations = xNumberOfSuccessfulAllocations;
++        pxHeapStats->xNumberOfSuccessfulFrees = xNumberOfSuccessfulFrees;
++        pxHeapStats->xMinimumEverFreeBytesRemaining = xMinimumEverFreeBytesRemaining;
++    }
++    taskEXIT_CRITICAL();
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/port_common.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/port_common.c
+new file mode 100644
+index 0000000000..9d8159f588
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/port_common.c
+@@ -0,0 +1,203 @@
++/*
++ * SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
++ *
++ * SPDX-License-Identifier: Apache-2.0
++ */
++
++#include <string.h>
++#include "FreeRTOS.h"
++#include "task.h"
++#include "portmacro.h"
++#include "esp_system.h"
++#include "esp_heap_caps_init.h"
++#include "esp_int_wdt.h"
++#include "esp_task_wdt.h"
++#include "esp_task.h"
++#include "esp_private/crosscore_int.h"
++#include "esp_private/startup_internal.h"    /* Required by g_spiram_ok. [refactor-todo] for g_spiram_ok */
++#include "esp_log.h"
++#include "soc/soc_memory_types.h"
++#include "soc/dport_access.h"
++#include "sdkconfig.h"
++
++#if CONFIG_IDF_TARGET_ESP32
++#include "esp32/spiram.h"
++#elif CONFIG_IDF_TARGET_ESP32S2
++#include "esp32s2/spiram.h"
++#elif CONFIG_IDF_TARGET_ESP32S3
++#include "esp32s3/spiram.h"
++#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2
++// SPIRAM is not supported on ESP32-C3
++#endif
++
++#if CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL
++static const char* TAG = "cpu_start";
++#endif
++
++/* Architecture-agnostic parts of the FreeRTOS ESP-IDF port layer can go here.
++ *
++ * The actual call flow will be to call esp_startup_start_app() in <ARCH>/port.c,
++ * which will then call esp_startup_start_app_common()
++ */
++
++// Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting
++volatile unsigned port_xSchedulerRunning[portNUM_PROCESSORS] = {0};
++
++// For now, running FreeRTOS on one core and a bare metal on the other (or other OSes)
++// is not supported. For now CONFIG_FREERTOS_UNICORE and CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
++// should mirror each other's values.
++//
++// And since this should be true, we can just check for CONFIG_FREERTOS_UNICORE.
++#if CONFIG_FREERTOS_UNICORE != CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
++    #error "FreeRTOS and system configuration mismatch regarding the use of multiple cores."
++#endif
++
++static void main_task(void* args);
++
++#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
++void esp_gdbstub_init(void);
++#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
++
++extern void app_main(void);
++
++void esp_startup_start_app_common(void)
++{
++#if CONFIG_ESP_INT_WDT
++    esp_int_wdt_init();
++    //Initialize the interrupt watch dog for CPU0.
++    esp_int_wdt_cpu_init();
++#endif
++
++    esp_crosscore_int_init();
++
++#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
++    esp_gdbstub_init();
++#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
++
++#ifdef CONFIG_IDF_RTOS_RTTHREAD
++    app_main();
++#else
++    portBASE_TYPE res = xTaskCreatePinnedToCore(&main_task, "main",
++                                                ESP_TASK_MAIN_STACK, NULL,
++                                                ESP_TASK_MAIN_PRIO, NULL, ESP_TASK_MAIN_CORE);
++    assert(res == pdTRUE);
++    (void)res;
++#endif
++
++}
++
++static void main_task(void* args)
++{
++#if !CONFIG_FREERTOS_UNICORE
++    // Wait for FreeRTOS initialization to finish on APP CPU, before replacing its startup stack
++    while (port_xSchedulerRunning[1] == 0) {
++        ;
++    }
++#endif
++
++    // [refactor-todo] check if there is a way to move the following block to esp_system startup
++    heap_caps_enable_nonos_stack_heaps();
++
++    // Now we have startup stack RAM available for heap, enable any DMA pool memory
++#if CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL
++    if (g_spiram_ok) {
++        esp_err_t r = esp_spiram_reserve_dma_pool(CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL);
++        if (r != ESP_OK) {
++            ESP_EARLY_LOGE(TAG, "Could not reserve internal/DMA pool (error 0x%x)", r);
++            abort();
++        }
++    }
++#endif
++
++    //Initialize task wdt if configured to do so
++#ifdef CONFIG_ESP_TASK_WDT_PANIC
++    ESP_ERROR_CHECK(esp_task_wdt_init(CONFIG_ESP_TASK_WDT_TIMEOUT_S, true));
++#elif CONFIG_ESP_TASK_WDT
++    ESP_ERROR_CHECK(esp_task_wdt_init(CONFIG_ESP_TASK_WDT_TIMEOUT_S, false));
++#endif
++
++    //Add IDLE 0 to task wdt
++#ifdef CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU0
++    TaskHandle_t idle_0 = xTaskGetIdleTaskHandleForCPU(0);
++    if(idle_0 != NULL){
++        ESP_ERROR_CHECK(esp_task_wdt_add(idle_0));
++    }
++#endif
++    //Add IDLE 1 to task wdt
++#ifdef CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU1
++    TaskHandle_t idle_1 = xTaskGetIdleTaskHandleForCPU(1);
++    if(idle_1 != NULL){
++        ESP_ERROR_CHECK(esp_task_wdt_add(idle_1));
++    }
++#endif
++
++    app_main();
++    vTaskDelete(NULL);
++}
++
++// -------------------- Heap Related -----------------------
++
++bool xPortCheckValidTCBMem(const void *ptr)
++{
++    return esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr);
++}
++
++bool xPortcheckValidStackMem(const void *ptr)
++{
++#ifdef CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY
++    return esp_ptr_byte_accessible(ptr);
++#else
++    return esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr);
++#endif
++}
++
++// ------------- FreeRTOS Static Allocation ----------------
++
++/*
++This function is required by FreeRTOS when configSUPPORT_STATIC_ALLOCATION is
++enabled and is used by FreeRTOS to obtain memory for its IDLE tasks.
++
++Like the pvPortMallocTcbMem() and pvPortMallocStackMem() macros, TCB and stack
++memory MUST be placed in internal RAM.
++*/
++void vApplicationGetIdleTaskMemory(StaticTask_t **ppxIdleTaskTCBBuffer,
++                                   StackType_t **ppxIdleTaskStackBuffer,
++                                   uint32_t *pulIdleTaskStackSize )
++{
++    StaticTask_t *pxTCBBufferTemp;
++    StackType_t *pxStackBufferTemp;
++    //Allocate TCB and stack buffer in internal memory
++    pxTCBBufferTemp = pvPortMallocTcbMem(sizeof(StaticTask_t));
++    pxStackBufferTemp = pvPortMallocStackMem(configIDLE_TASK_STACK_SIZE);
++    assert(pxTCBBufferTemp != NULL);
++    assert(pxStackBufferTemp != NULL);
++    //Write back pointers
++    *ppxIdleTaskTCBBuffer = pxTCBBufferTemp;
++    *ppxIdleTaskStackBuffer = pxStackBufferTemp;
++    *pulIdleTaskStackSize = configIDLE_TASK_STACK_SIZE;
++}
++
++/*
++This function is required by FreeRTOS when configSUPPORT_STATIC_ALLOCATION is
++enabled and is used by the FreeRTOS Timer to obtain memory for its daemone task.
++
++
++Like the pvPortMallocTcbMem() and pvPortMallocStackMem() macros, TCB and stack
++memory MUST be placed in internal RAM.
++*/
++void vApplicationGetTimerTaskMemory(StaticTask_t **ppxTimerTaskTCBBuffer,
++                                    StackType_t **ppxTimerTaskStackBuffer,
++                                    uint32_t *pulTimerTaskStackSize )
++{
++    StaticTask_t *pxTCBBufferTemp;
++    StackType_t *pxStackBufferTemp;
++    //Allocate TCB and stack buffer in internal memory
++    pxTCBBufferTemp = pvPortMallocTcbMem(sizeof(StaticTask_t));
++    pxStackBufferTemp = pvPortMallocStackMem(configTIMER_TASK_STACK_DEPTH);
++    assert(pxTCBBufferTemp != NULL);
++    assert(pxStackBufferTemp != NULL);
++    //Write back pointers
++    *ppxTimerTaskTCBBuffer = pxTCBBufferTemp;
++    *ppxTimerTaskStackBuffer = pxStackBufferTemp;
++    *pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/FreeRTOSConfig_arch.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/FreeRTOSConfig_arch.h
+new file mode 100644
+index 0000000000..a7d534343f
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/FreeRTOSConfig_arch.h
+@@ -0,0 +1,105 @@
++/*
++    FreeRTOS V10 - Copyright (C) 2021 Real Time Engineers Ltd.
++    All rights reserved
++
++    VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
++
++    This file is part of the FreeRTOS distribution.
++
++    FreeRTOS is free software; you can redistribute it and/or modify it under
++    the terms of the GNU General Public License (version 2) as published by the
++    Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
++
++	***************************************************************************
++    >>!   NOTE: The modification to the GPL is included to allow you to     !<<
++    >>!   distribute a combined work that includes FreeRTOS without being   !<<
++    >>!   obliged to provide the source code for proprietary components     !<<
++    >>!   outside of the FreeRTOS kernel.                                   !<<
++	***************************************************************************
++
++    FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
++    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++    FOR A PARTICULAR PURPOSE.  Full license text is available on the following
++    link: http://www.freertos.org/a00114.html
++
++    ***************************************************************************
++     *                                                                       *
++     *    FreeRTOS provides completely free yet professionally developed,    *
++     *    robust, strictly quality controlled, supported, and cross          *
++     *    platform software that is more than just the market leader, it     *
++     *    is the industry's de facto standard.                               *
++     *                                                                       *
++     *    Help yourself get started quickly while simultaneously helping     *
++     *    to support the FreeRTOS project by purchasing a FreeRTOS           *
++     *    tutorial book, reference manual, or both:                          *
++     *    http://www.FreeRTOS.org/Documentation                              *
++     *                                                                       *
++    ***************************************************************************
++
++    http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading
++	the FAQ page "My application does not run, what could be wrong?".  Have you
++	defined configASSERT()?
++
++	http://www.FreeRTOS.org/support - In return for receiving this top quality
++	embedded software for free we request you assist our global community by
++	participating in the support forum.
++
++	http://www.FreeRTOS.org/training - Investing in training allows your team to
++	be as productive as possible as early as possible.  Now you can receive
++	FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
++	Ltd, and the world's leading authority on the world's leading RTOS.
++
++    http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
++    including FreeRTOS+Trace - an indispensable productivity tool, a DOS
++    compatible FAT file system, and our tiny thread aware UDP/IP stack.
++
++    http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
++    Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
++
++    http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
++    Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS
++    licenses offer ticketed support, indemnification and commercial middleware.
++
++    http://www.SafeRTOS.com - High Integrity Systems also provide a safety
++    engineered and independently SIL3 certified version for use in safety and
++    mission critical applications that require provable dependability.
++
++    1 tab == 4 spaces!
++*/
++
++#ifndef FREERTOS_CONFIG_RISCV_H
++#define FREERTOS_CONFIG_RISCV_H
++
++// This file is included in the common FreeRTOSConfig.h.
++
++#include "sdkconfig.h"
++
++#define configUSE_PORT_OPTIMISED_TASK_SELECTION         0
++
++#ifndef __ASSEMBLER__
++#if CONFIG_IDF_TARGET_ESP32C3
++#include "esp32c3/rom/ets_sys.h"
++#elif CONFIG_IDF_TARGET_ESP32H2
++#include "esp32h2/rom/ets_sys.h"
++#endif
++#endif // __ASSEMBLER__
++
++/* The maximum interrupt priority from which FreeRTOS.org API functions can
++   be called.  Only API functions that end in ...FromISR() can be used within
++   interrupts. */
++#define configMAX_SYSCALL_INTERRUPT_PRIORITY            0
++
++#ifndef configISR_STACK_SIZE
++#define configISR_STACK_SIZE                            (CONFIG_FREERTOS_ISR_STACKSIZE)
++#endif
++
++#ifndef __ASSEMBLER__
++#if CONFIG_APPTRACE_SV_ENABLE
++extern int xPortSwitchFlag;
++#define os_task_switch_is_pended(_cpu_) (xPortSwitchFlag)
++#else
++#define os_task_switch_is_pended(_cpu_) (false)
++#endif
++#endif
++
++#endif // FREERTOS_CONFIG_RISCV_H
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro.h
+new file mode 100644
+index 0000000000..ce683ac301
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro.h
+@@ -0,0 +1,107 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++
++#ifndef PORTMACRO_H
++    #define PORTMACRO_H
++
++    #ifdef __cplusplus
++        extern "C" {
++    #endif
++
++/*-----------------------------------------------------------
++ * Port specific definitions.
++ *
++ * The settings in this file configure FreeRTOS correctly for the
++ * given hardware and compiler.
++ *
++ * These settings should not be altered.
++ *-----------------------------------------------------------
++ */
++
++/* Type definitions. */
++    #define portCHAR          char
++    #define portFLOAT         float
++    #define portDOUBLE        double
++    #define portLONG          long
++    #define portSHORT         short
++    #define portSTACK_TYPE    rt_ubase_t
++    #define portBASE_TYPE     rt_base_t
++
++    typedef portSTACK_TYPE   StackType_t;
++    typedef rt_base_t        BaseType_t;
++    typedef rt_ubase_t       UBaseType_t;
++    typedef rt_tick_t        TickType_t;
++    #define portMAX_DELAY    ( TickType_t ) RT_TICK_MAX
++
++    struct rt_semaphore_wrapper
++    {
++        struct rt_semaphore sem;
++        rt_uint16_t max_value;
++    };
++
++/*-----------------------------------------------------------*/
++
++/* Architecture specifics. */
++    #define portBYTE_ALIGNMENT      RT_ALIGN_SIZE
++    #define portPOINTER_SIZE_TYPE   rt_size_t
++/*-----------------------------------------------------------*/
++
++/* Scheduler utilities. */
++    #define portYIELD()                 rt_thread_yield()
++    #define portYIELD_FROM_ISR( x )     rt_thread_yield()
++
++/*-----------------------------------------------------------*/
++
++/* Critical section management. */
++    extern void vPortEnterCritical( void );
++    extern void vPortExitCritical( void );
++    #define portSET_INTERRUPT_MASK_FROM_ISR()         rt_hw_interrupt_disable()
++    #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x )    rt_hw_interrupt_enable( x )
++    #define portDISABLE_INTERRUPTS()                  vPortEnterCritical()
++    #define portENABLE_INTERRUPTS()                   vPortExitCritical()
++    //#define portENTER_CRITICAL()                      vPortEnterCritical()
++    //#define portEXIT_CRITICAL()                       vPortExitCritical()
++
++/*-----------------------------------------------------------*/
++
++/* Use this macro to calculate the buffer size when allocating a queue statically
++ * To ensure the buffer can fit the desired number of messages
++ */
++    #define QUEUE_BUFFER_SIZE( uxQueueLength, uxItemSize )  ( ( RT_ALIGN( uxItemSize, RT_ALIGN_SIZE ) + sizeof( void * ) ) * uxQueueLength )
++
++    BaseType_t rt_err_to_freertos(rt_err_t rt_err);
++
++/* For ESP32 */
++    #include "portmacro_esp32c3.h"
++
++    #ifdef __cplusplus
++        }
++    #endif
++
++#endif /* PORTMACRO_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro_deprecated.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro_deprecated.h
+new file mode 100644
+index 0000000000..597d99c333
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro_deprecated.h
+@@ -0,0 +1,94 @@
++/*
++ * SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
++ *
++ * SPDX-License-Identifier: Apache-2.0
++ */
++
++/* ---------------------------------------------------- Deprecate ------------------------------------------------------
++ * - Macros or functions that should be deprecated in v5.0, then removed in the next major release
++ * - Kept as not to cause a breaking change
++ * - Include this header at the end of portmacro.h
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++/**
++ * @brief Disable interrupts in a nested manner
++ *
++ * Does the exact same thing as portSET_INTERRUPT_MASK_FROM_ISR()
++ *
++ * @deprecated This function is deprecated. Call portSET_INTERRUPT_MASK_FROM_ISR() instead
++ */
++static inline __attribute__((deprecated)) UBaseType_t portENTER_CRITICAL_NESTED(void) {
++    return portSET_INTERRUPT_MASK_FROM_ISR();
++}
++
++/**
++ * @brief Reenables interrupts in a nested manner
++ *
++ * Does the exact same thing as portCLEAR_INTERRUPT_MASK_FROM_ISR()
++ *
++ * @deprecated This function is deprecated. Call portCLEAR_INTERRUPT_MASK_FROM_ISR() instead
++ */
++static inline void __attribute__((deprecated)) portEXIT_CRITICAL_NESTED(UBaseType_t prev_level)
++{
++    portCLEAR_INTERRUPT_MASK_FROM_ISR(prev_level);
++}
++
++/* ---------------------- Spinlocks --------------------- */
++
++/**
++ * @brief Deprecated placed holder function to initialize a spinlock
++ *
++ * Currently does nothing.
++ *
++ * @deprecated This function is deprecated. If on multi-core, use spinlock_initialize() instead
++ * @param[in] mux Spinlock
++ */
++static inline void __attribute__((deprecated)) __attribute__((always_inline)) vPortCPUInitializeMutex(portMUX_TYPE *mux)
++{
++    (void)mux;
++}
++
++/**
++ * @brief Deprecated placed holder function to acquire a spinlock
++ *
++ * Currently does nothing.
++ *
++ * @deprecated This function is deprecated. If on multi-core, use spinlock_acquire() instead
++ * @param[in] mux Spinlock
++ */
++static inline void __attribute__((deprecated)) __attribute__((always_inline)) vPortCPUAcquireMutex(portMUX_TYPE *mux)
++{
++    (void)mux;
++}
++
++/**
++ * @brief Deprecated placed holder function to acquire a spinlock but with a specified timeout
++ *
++ * Currently just returns true
++ *
++ * @deprecated This function is deprecated. If on multi-core, use spinlock_acquire() instead
++ * @note Does not have deprecated attribute due to usage in app_trace_util.c
++ * @param[in] mux Spinlock
++ * @param[in] timeout Timeout in number of CPU cycles
++ * @return true Always returns true
++ */
++static inline bool __attribute__((always_inline))  vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles)
++{
++    (void)mux;
++    (void)timeout_cycles;
++    return true;
++}
++
++/**
++ * @brief Deprecated placed holder function to release a spinlock
++ *
++ * Currently does nothing.
++ *
++ * @deprecated This function is deprecated. If on multi-core, use spinlock_release() instead
++ * @note Does not have deprecated attribute due to usage in app_trace_util.c
++ * @param[in] mux Spinlock
++ */
++static inline void __attribute__((always_inline)) vPortCPUReleaseMutex(portMUX_TYPE *mux)
++{
++    (void)mux;
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro_esp32c3.h b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro_esp32c3.h
+new file mode 100644
+index 0000000000..88459bb6a2
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/include/freertos/portmacro_esp32c3.h
+@@ -0,0 +1,424 @@
++/*
++ * FreeRTOS Kernel V10.4.3
++ * Copyright (C) 2019 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * http://www.FreeRTOS.org
++ * http://aws.amazon.com/freertos
++ *
++ * 1 tab == 4 spaces!
++ */
++
++#ifndef PORTMACRO_ESP32C3_H
++#define PORTMACRO_ESP32C3_H
++
++#ifndef __ASSEMBLER__
++
++#include "sdkconfig.h"
++#include <stdint.h>
++#include <stdlib.h>
++#include <stdbool.h>
++#include <stdio.h>
++#include "soc/spinlock.h"
++#include "soc/interrupt_core0_reg.h"
++#include "soc/cpu.h"
++#include "esp_attr.h"
++#include "esp_rom_sys.h"
++#include "esp_timer.h"              /* required for FreeRTOS run time stats */
++#include "esp_heap_caps.h"
++#include "esp_system.h"             /* required by esp_get_...() functions in portable.h. [refactor-todo] Update portable.h */
++#include "esp_newlib.h"
++//#include "portbenchmark.h"
++
++/* [refactor-todo] These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */
++#include <limits.h>
++#ifdef CONFIG_LEGACY_INCLUDE_COMMON_HEADERS
++#include "soc/soc_memory_layout.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++
++/* --------------------------------------------------- Port Types ------------------------------------------------------
++ * - Port specific types.
++ * - The settings in this file configure FreeRTOS correctly for the given hardware and compiler.
++ * - These settings should not be altered.
++ * - The port types must come first as they are used further down in this file
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++/* Task function macros as described on the FreeRTOS.org WEB site. */
++#define portTASK_FUNCTION_PROTO(vFunction, pvParameters) void vFunction(void *pvParameters)
++#define portTASK_FUNCTION(vFunction, pvParameters) void vFunction(void *pvParameters)
++
++// interrupt module will mask interrupt with priority less than threshold
++#define RVHAL_EXCM_LEVEL            4
++
++
++/* ----------------------------------------------- Port Configurations -------------------------------------------------
++ * - Configurations values supplied by each port
++ * - Required by FreeRTOS
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++#define portCRITICAL_NESTING_IN_TCB     0
++#define portSTACK_GROWTH                (-1)
++#define portTICK_PERIOD_MS              ((TickType_t) (1000 / configTICK_RATE_HZ))
++#define portNOP() __asm volatile        (" nop ")
++
++
++
++/* ---------------------------------------------- Forward Declarations -------------------------------------------------
++ * - Forward declarations of all the port functions and macros need to implement the FreeRTOS porting interface
++ * - These must come before definition/declaration of the FreeRTOS porting interface
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++// --------------------- Interrupts ------------------------
++
++/**
++ * @brief Checks if the current core is in an ISR context
++ *
++ * - ISR context consist of Low/Mid priority ISR, or time tick ISR
++ * - High priority ISRs aren't detected here, but they normally cannot call C code, so that should not be an issue anyway.
++ *
++ * @note [refactor-todo] Check if this should be inlined
++ * @return
++ *  - pdTRUE if in ISR
++ *  - pdFALSE otherwise
++ */
++BaseType_t xPortInIsrContext(void);
++
++/**
++ * @brief Check if in ISR context from High priority ISRs
++ *
++ * - Called from High priority ISR
++ * - Checks if the previous context (before high priority interrupt) was in ISR context (meaning low/med priority)
++ *
++ * @note [refactor-todo] Check if this should be inlined
++ * @return
++ *  - pdTRUE if in previous in ISR context
++ *  - pdFALSE otherwise
++ */
++BaseType_t xPortInterruptedFromISRContext(void);
++
++/* ---------------------- Spinlocks ------------------------
++ - Spinlocks added to match API with SMP FreeRTOS. Single core RISC-V does not need spin locks
++ - Because single core does not have a primitive spinlock data type, we have to implement one here
++ * @note [refactor-todo] Refactor critical section API so that this is no longer required
++ * ------------------------------------------------------ */
++
++/**
++ * @brief Spinlock object
++ * Owner:
++ *  - Set to 0 if uninitialized
++ *  - Set to portMUX_FREE_VAL when free
++ *  - Set to CORE_ID_REGVAL_PRO or CORE_ID_REGVAL_AP when locked
++ *  - Any other value indicates corruption
++ * Count:
++ *  - 0 if unlocked
++ *  - Recursive count if locked
++ *
++ * @note Not a true spinlock as single core RISC-V does not have atomic compare and set instruction
++ * @note Keep portMUX_INITIALIZER_UNLOCKED in sync with this struct
++ */
++typedef struct {
++    uint32_t owner;
++    uint32_t count;
++} portMUX_TYPE;
++/**< Spinlock initializer */
++#define portMUX_INITIALIZER_UNLOCKED {                      \
++            .owner = portMUX_FREE_VAL,                      \
++            .count = 0,                                     \
++        }
++#define portMUX_FREE_VAL                    SPINLOCK_FREE           /**< Spinlock is free. [refactor-todo] check if this is still required */
++#define portMUX_NO_TIMEOUT                  SPINLOCK_WAIT_FOREVER   /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
++#define portMUX_TRY_LOCK                    SPINLOCK_NO_WAIT        /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
++#define portMUX_INITIALIZE(mux)    ({ \
++    (mux)->owner = portMUX_FREE_VAL; \
++    (mux)->count = 0; \
++})
++
++/**
++ * @brief Wrapper for atomic compare-and-set instruction
++ *
++ * @note Isn't a real atomic CAS.
++ * @note [refactor-todo] check if we still need this
++ * @note [refactor-todo] Check if this function should be renamed (due to void return type)
++ *
++ * @param[inout] addr Pointer to target address
++ * @param[in] compare Compare value
++ * @param[inout] set Pointer to set value
++ */
++static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
++
++/**
++ * @brief Wrapper for atomic compare-and-set instruction in external RAM
++ *
++ * @note Isn't a real atomic CAS.
++ * @note [refactor-todo] check if we still need this
++ * @note [refactor-todo] Check if this function should be renamed (due to void return type)
++ *
++ * @param[inout] addr Pointer to target address
++ * @param[in] compare Compare value
++ * @param[inout] set Pointer to set value
++ */
++static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
++
++// ---------------------- Yielding -------------------------
++
++/**
++ * @brief Perform a context switch from a task
++ *
++ * @note [refactor-todo] The rest of ESP-IDF should call taskYield() instead
++ */
++#define vPortYield()    portYIELD()
++
++/**
++ * @brief Perform a context switch from an ISR
++ */
++#define vPortYieldFromISR() portYIELD_FROM_ISR(0)
++
++/**
++ * @brief Checks if the current core can yield
++ *
++ * - A core cannot yield if its in an ISR or in a critical section
++ *
++ * @note [refactor-todo] See if this can be separated from port macro
++ * @note [refactor-todo] Check if this function should be renamed (due to bool return type)
++ * @return true Core can yield
++ * @return false Core cannot yield
++ */
++static inline bool IRAM_ATTR xPortCanYield(void);
++
++// ------------------- Hook Functions ----------------------
++
++extern void esp_vApplicationIdleHook(void);
++extern void esp_vApplicationTickHook(void);
++
++/**
++ * @brief Hook function called on entry to tickless idle
++ *
++ * - Implemented in pm_impl.c
++ *
++ * @param xExpectedIdleTime Expected idle time
++ */
++void vApplicationSleep(TickType_t xExpectedIdleTime);
++
++// ----------------------- System --------------------------
++
++/**
++ * @brief Get the tick rate per second
++ *
++ * @note [refactor-todo] make this inline
++ * @note [refactor-todo] Check if this function should be renamed (due to uint return type)
++ * @return uint32_t Tick rate in Hz
++ */
++uint32_t xPortGetTickRateHz(void);
++
++/**
++ * @brief Set a watchpoint to watch the last 32 bytes of the stack
++ *
++ * Callback to set a watchpoint on the end of the stack. Called every context switch to change the stack watchpoint
++ * around.
++ *
++ * @param pxStackStart Pointer to the start of the stack
++ */
++void vPortSetStackWatchpoint(void *pxStackStart);
++
++/**
++ * @brief Get the current core's ID
++ *
++ * @note Added to be compatible with SMP API
++ * @note [refactor-todo] IDF should call a FreeRTOS like macro instead of port function directly
++ * @return BaseType_t Core ID
++ */
++static inline BaseType_t IRAM_ATTR xPortGetCoreID(void)
++{
++    return (uint32_t) cpu_hal_get_core_id();
++}
++
++
++
++/* ------------------------------------------- FreeRTOS Porting Interface ----------------------------------------------
++ * - Contains all the mappings of the macros required by FreeRTOS
++ * - Most come after forward declare as porting macros map to declared functions
++ * - Maps to forward declared functions
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++// ----------------------- Memory --------------------------
++
++/**
++ * @brief Task memory allocation macros
++ *
++ * @note Because the ROM routines don't necessarily handle a stack in external RAM correctly, we force the stack
++ * memory to always be internal.
++ * @note [refactor-todo] Update portable.h to match v10.4.3 to use new malloc prototypes
++ */
++#define portTcbMemoryCaps               (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)
++#define portStackMemoryCaps             (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)
++#define pvPortMallocTcbMem(size)        pvPortMalloc(size)
++#define pvPortMallocStackMem(size)      pvPortMalloc(size)
++
++// ------------------ Critical Sections --------------------
++#define portENTER_CRITICAL(mux)                 {(void)mux;  vPortEnterCritical();}
++#define portEXIT_CRITICAL(mux)                  {(void)mux;  vPortExitCritical();}
++#define portTRY_ENTER_CRITICAL(mux, timeout)    ({  \
++    (void)mux; (void)timeout;                       \
++    vPortEnterCritical();                           \
++    BaseType_t ret = pdPASS;                        \
++    ret;                                            \
++})
++//In single-core RISC-V, we can use the same critical section API
++#define portENTER_CRITICAL_ISR(mux)                 portENTER_CRITICAL(mux)
++#define portEXIT_CRITICAL_ISR(mux)                  portEXIT_CRITICAL(mux)
++#define portTRY_ENTER_CRITICAL_ISR(mux, timeout)    portTRY_ENTER_CRITICAL(mux, timeout)
++
++/* [refactor-todo] on RISC-V, both ISR and non-ISR cases result in the same call. We can redefine this macro */
++#define portENTER_CRITICAL_SAFE(mux)    ({  \
++    if (xPortInIsrContext()) {              \
++        portENTER_CRITICAL_ISR(mux);        \
++    } else {                                \
++        portENTER_CRITICAL(mux);            \
++    }                                       \
++})
++#define portEXIT_CRITICAL_SAFE(mux)     ({  \
++    if (xPortInIsrContext()) {              \
++        portEXIT_CRITICAL_ISR(mux);         \
++    } else {                                \
++        portEXIT_CRITICAL(mux);             \
++    }                                       \
++})
++#define portTRY_ENTER_CRITICAL_SAFE(mux, timeout)   portENTER_CRITICAL_SAFE(mux, timeout)
++
++// ---------------------- Yielding -------------------------
++
++#define portEND_SWITCHING_ISR(xSwitchRequired) if(xSwitchRequired) vPortYield()
++/* Yielding within an API call (when interrupts are off), means the yield should be delayed
++   until interrupts are re-enabled.
++   To do this, we use the "cross-core" interrupt as a trigger to yield on this core when interrupts are re-enabled.This
++   is the same interrupt & code path which is used to trigger a yield between CPUs, although in this case the yield is
++   happening on the same CPU.
++*/
++#define portYIELD_WITHIN_API() portYIELD()
++
++// ------------------- Hook Functions ----------------------
++
++#ifndef CONFIG_FREERTOS_LEGACY_HOOKS
++#define vApplicationIdleHook    esp_vApplicationIdleHook
++#define vApplicationTickHook    esp_vApplicationTickHook
++#endif /* !CONFIG_FREERTOS_LEGACY_HOOKS */
++#define portSUPPRESS_TICKS_AND_SLEEP(idleTime) vApplicationSleep(idleTime)
++
++// ------------------- Run Time Stats ----------------------
++
++#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()
++#define portGET_RUN_TIME_COUNTER_VALUE() 0
++#ifdef CONFIG_FREERTOS_RUN_TIME_STATS_USING_ESP_TIMER
++/* Coarse resolution time (us) */
++#define portALT_GET_RUN_TIME_COUNTER_VALUE(x)    do {x = (uint32_t)esp_timer_get_time();} while(0)
++#endif
++
++
++
++/* --------------------------------------------- Inline Implementations ------------------------------------------------
++ * - Implementation of inline functions of the forward declares
++ * - Should come after forward declare and FreeRTOS Porting interface, as implementation may use both.
++ * - For implementation of non-inlined functions, see port.c, port_common.c, or other assembly files
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++// --------------------- Interrupts ------------------------
++
++
++
++// ---------------------- Spinlocks ------------------------
++
++static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
++{
++    compare_and_set_native(addr, compare, set);
++}
++
++static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
++{
++#if defined(CONFIG_SPIRAM)
++    compare_and_set_extram(addr, compare, set);
++#endif
++}
++
++// ---------------------- Yielding -------------------------
++
++static inline bool IRAM_ATTR xPortCanYield(void)
++{
++    uint32_t threshold = REG_READ(INTERRUPT_CORE0_CPU_INT_THRESH_REG);
++    /* when enter critical code, FreeRTOS will mask threshold to RVHAL_EXCM_LEVEL
++     * and exit critical code, will recover threshold value (1). so threshold <= 1
++     * means not in critical code
++     */
++    return (threshold <= 1);
++}
++
++
++
++/* ------------------------------------------------------ Misc ---------------------------------------------------------
++ * - Miscellaneous porting macros
++ * - These are not port of the FreeRTOS porting interface, but are used by other FreeRTOS dependent components
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++// -------------------- Heap Related -----------------------
++
++/**
++ * @brief Checks if a given piece of memory can be used to store a task's TCB
++ *
++ * - Defined in port_common.c
++ *
++ * @param ptr Pointer to memory
++ * @return true Memory can be used to store a TCB
++ * @return false Otherwise
++ */
++bool xPortCheckValidTCBMem(const void *ptr);
++
++/**
++ * @brief Checks if a given piece of memory can be used to store a task's stack
++ *
++ * - Defined in port_common.c
++ *
++ * @param ptr Pointer to memory
++ * @return true Memory can be used to store a task stack
++ * @return false Otherwise
++ */
++bool xPortcheckValidStackMem(const void *ptr);
++
++#define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr)
++#define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr)
++
++
++
++/* ---------------------------------------------------- Deprecate ------------------------------------------------------
++ * - Pull in header containing deprecated macros here
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++#include "portmacro_deprecated.h"
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif //__ASSEMBLER__
++
++#endif /* PORTMACRO_H */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/port.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/port.c
+new file mode 100644
+index 0000000000..9d8195f832
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/port.c
+@@ -0,0 +1,44 @@
++#include <FreeRTOS.h>
++
++static rt_base_t level = 0;
++static rt_base_t critical_nesting = 0;
++
++void vPortEnterCritical( void )
++{
++    if ( critical_nesting == 0 )
++    {
++        level = rt_hw_interrupt_disable();
++    }
++    critical_nesting += 1;
++}
++
++void vPortExitCritical( void )
++{
++    critical_nesting -= 1;
++    if ( critical_nesting == 0 )
++    {
++        rt_hw_interrupt_enable( level );
++    }
++}
++
++void vPortEndScheduler( void )
++{
++    /* Not implemented in ports where there is nothing to return to. */
++}
++
++BaseType_t rt_err_to_freertos(rt_err_t rt_err)
++{
++    switch(-rt_err)
++    {
++        case RT_EOK:
++            return pdPASS;
++        case RT_ENOMEM:
++            return errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
++        case RT_EFULL:
++            return errQUEUE_FULL;
++        case RT_EEMPTY:
++            return errQUEUE_EMPTY;
++        default:
++            return pdFAIL;
++    }
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/port_esp32c3.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/port_esp32c3.c
+new file mode 100644
+index 0000000000..64fbe9b15b
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/port/rt-thread/port_esp32c3.c
+@@ -0,0 +1,197 @@
++/*
++    FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.
++    All rights reserved
++
++    VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
++
++    This file is part of the FreeRTOS distribution and was contributed
++    to the project by Technolution B.V. (www.technolution.nl,
++    freertos-riscv@technolution.eu) under the terms of the FreeRTOS
++    contributors license.
++
++    FreeRTOS is free software; you can redistribute it and/or modify it under
++    the terms of the GNU General Public License (version 2) as published by the
++    Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
++
++    ***************************************************************************
++    >>!   NOTE: The modification to the GPL is included to allow you to     !<<
++    >>!   distribute a combined work that includes FreeRTOS without being   !<<
++    >>!   obliged to provide the source code for proprietary components     !<<
++    >>!   outside of the FreeRTOS kernel.                                   !<<
++    ***************************************************************************
++
++    FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
++    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++    FOR A PARTICULAR PURPOSE.  Full license text is available on the following
++    link: http://www.freertos.org/a00114.html
++
++    ***************************************************************************
++     *                                                                       *
++     *    FreeRTOS provides completely free yet professionally developed,    *
++     *    robust, strictly quality controlled, supported, and cross          *
++     *    platform software that is more than just the market leader, it     *
++     *    is the industry's de facto standard.                               *
++     *                                                                       *
++     *    Help yourself get started quickly while simultaneously helping     *
++     *    to support the FreeRTOS project by purchasing a FreeRTOS           *
++     *    tutorial book, reference manual, or both:                          *
++     *    http://www.FreeRTOS.org/Documentation                              *
++     *                                                                       *
++    ***************************************************************************
++
++    http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading
++    the FAQ page "My application does not run, what could be wrong?".  Have you
++    defined configASSERT()?
++
++    http://www.FreeRTOS.org/support - In return for receiving this top quality
++    embedded software for free we request you assist our global community by
++    participating in the support forum.
++
++    http://www.FreeRTOS.org/training - Investing in training allows your team to
++    be as productive as possible as early as possible.  Now you can receive
++    FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
++    Ltd, and the world's leading authority on the world's leading RTOS.
++
++    http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
++    including FreeRTOS+Trace - an indispensable productivity tool, a DOS
++    compatible FAT file system, and our tiny thread aware UDP/IP stack.
++
++    http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
++    Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
++
++    http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
++    Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS
++    licenses offer ticketed support, indemnification and commercial middleware.
++
++    http://www.SafeRTOS.com - High Integrity Systems also provide a safety
++    engineered and independently SIL3 certified version for use in safety and
++    mission critical applications that require provable dependability.
++
++    1 tab == 4 spaces!
++*/
++
++/*-----------------------------------------------------------------------
++ * Implementation of functions defined in portable.h for the RISC-V port.
++ *----------------------------------------------------------------------*/
++
++#include "sdkconfig.h"
++#include <string.h>
++#include "soc/soc_caps.h"
++#include "soc/periph_defs.h"
++#include "soc/system_reg.h"
++#include "hal/systimer_hal.h"
++#include "hal/systimer_ll.h"
++#include "riscv/rvruntime-frames.h"
++#include "riscv/riscv_interrupts.h"
++#include "riscv/interrupt.h"
++#include "esp_private/crosscore_int.h"
++#include "esp_private/pm_trace.h"
++#include "esp_attr.h"
++#include "esp_system.h"
++#include "esp_intr_alloc.h"
++#include "esp_debug_helpers.h"
++#include "esp_log.h"
++#include "FreeRTOS.h"       /* This pulls in portmacro.h */
++#include "task.h"
++#include "portmacro.h"
++//#include "port_systick.h"
++
++
++
++/* ---------------------------------------------------- Variables ------------------------------------------------------
++ *
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++static const char *TAG = "cpu_start"; // [refactor-todo]: might be appropriate to change in the future, but
++
++/**
++ * @brief A variable is used to keep track of the critical section nesting.
++ * @note This variable has to be stored as part of the task context and must be initialized to a non zero value
++ *       to ensure interrupts don't inadvertently become unmasked before the scheduler starts.
++ *       As it is stored as part of the task context it will automatically be set to 0 when the first task is started.
++ */
++static UBaseType_t uxCriticalNesting = 0;
++static UBaseType_t uxSavedInterruptState = 0;
++BaseType_t uxSchedulerRunning = 0;
++UBaseType_t uxInterruptNesting = 0;
++BaseType_t xPortSwitchFlag = 0;
++__attribute__((aligned(16))) static StackType_t xIsrStack[configISR_STACK_SIZE];
++StackType_t *xIsrStackTop = &xIsrStack[0] + (configISR_STACK_SIZE & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK)));
++
++
++
++/* ---------------------------------------------- Port Implementations -------------------------------------------------
++ *
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++// --------------------- Interrupts ------------------------
++
++BaseType_t xPortInIsrContext(void)
++{
++    return (BaseType_t)rt_interrupt_get_nest();
++}
++
++BaseType_t IRAM_ATTR xPortInterruptedFromISRContext(void)
++{
++    /* For single core, this can be the same as xPortInIsrContext() because reading it is atomic */
++    return (BaseType_t)rt_interrupt_get_nest();
++}
++
++// ---------------------- Spinlocks ------------------------
++
++
++// ---------------------- Yielding -------------------------
++
++
++// ------------------- Hook Functions ----------------------
++
++void __attribute__((weak)) vApplicationStackOverflowHook(TaskHandle_t xTask, char *pcTaskName)
++{
++#define ERR_STR1 "***ERROR*** A stack overflow in task "
++#define ERR_STR2 " has been detected."
++    const char *str[] = {ERR_STR1, pcTaskName, ERR_STR2};
++
++    char buf[sizeof(ERR_STR1) + CONFIG_FREERTOS_MAX_TASK_NAME_LEN + sizeof(ERR_STR2) + 1 /* null char */] = {0};
++
++    char *dest = buf;
++    for (int i = 0; i < sizeof(str) / sizeof(str[0]); i++) {
++        dest = strcat(dest, str[i]);
++    }
++    esp_system_abort(buf);
++}
++
++// ----------------------- System --------------------------
++
++uint32_t xPortGetTickRateHz(void)
++{
++    return (uint32_t)configTICK_RATE_HZ;
++}
++
++#define STACK_WATCH_AREA_SIZE 32
++#define STACK_WATCH_POINT_NUMBER (SOC_CPU_WATCHPOINTS_NUM - 1)
++
++void vPortSetStackWatchpoint(void *pxStackStart)
++{
++    uint32_t addr = (uint32_t)pxStackStart;
++    addr = (addr + (STACK_WATCH_AREA_SIZE - 1)) & (~(STACK_WATCH_AREA_SIZE - 1));
++    esp_cpu_set_watchpoint(STACK_WATCH_POINT_NUMBER, (char *)addr, STACK_WATCH_AREA_SIZE, ESP_WATCHPOINT_STORE);
++}
++
++
++
++/* ---------------------------------------------- Misc Implementations -------------------------------------------------
++ *
++ * ------------------------------------------------------------------------------------------------------------------ */
++
++// --------------------- App Start-up ----------------------
++
++/* [refactor-todo]: See if we can include this through a header */
++extern void esp_startup_start_app_common(void);
++
++void esp_startup_start_app(void)
++{
++    esp_startup_start_app_common();
++
++    ESP_LOGI(TAG, "Starting scheduler.");
++    vTaskStartScheduler();
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/queue.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/queue.c
+new file mode 100644
+index 0000000000..8965e193d0
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/queue.c
+@@ -0,0 +1,787 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++#include <stdlib.h>
++#include <string.h>
++
++#include "FreeRTOS.h"
++#include "queue.h"
++
++/* Semaphores do not actually store or copy data, so have an item size of
++ * zero. */
++#define queueSEMAPHORE_QUEUE_ITEM_LENGTH    ( ( UBaseType_t ) 0 )
++#define queueMUTEX_GIVE_BLOCK_TIME          ( ( TickType_t ) 0U )
++
++typedef struct QueueDefinition
++{
++    struct rt_ipc_object *rt_ipc;
++} xQUEUE;
++typedef xQUEUE Queue_t;
++
++static volatile rt_uint8_t mutex_index = 0;
++static volatile rt_uint8_t sem_index = 0;
++static volatile rt_uint8_t queue_index = 0;
++
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
++                               BaseType_t xNewQueue )
++{
++    Queue_t * const pxQueue = xQueue;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++
++    configASSERT( pxQueue );
++
++    pipc = pxQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++
++    if ( type == RT_Object_Class_Semaphore )
++    {
++        rt_sem_control( ( rt_sem_t ) pipc, RT_IPC_CMD_RESET, ( void * ) 0);
++    }
++    else if ( type == RT_Object_Class_MessageQueue )
++    {
++        rt_mq_control( ( rt_mq_t ) pipc, RT_IPC_CMD_RESET, RT_NULL );
++    }
++
++    return pdPASS;
++}
++/*-----------------------------------------------------------*/
++
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++
++    QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
++                                             const UBaseType_t uxItemSize,
++                                             uint8_t * pucQueueStorage,
++                                             StaticQueue_t * pxStaticQueue,
++                                             const uint8_t ucQueueType )
++    {
++        Queue_t * pxNewQueue = NULL;
++        char name[RT_NAME_MAX] = {0};
++
++        /* The StaticQueue_t structure and the queue storage area must be
++         * supplied. */
++        configASSERT( pxStaticQueue );
++
++        if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
++            ( pxStaticQueue != NULL ) &&
++
++            /* A queue storage area should be provided if the item size is not 0, and
++             * should not be provided if the item size is 0. */
++            ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ) &&
++            ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ) )
++        {
++            if ( ucQueueType == queueQUEUE_TYPE_RECURSIVE_MUTEX || ucQueueType == queueQUEUE_TYPE_MUTEX )
++            {
++                rt_snprintf( name, RT_NAME_MAX, "mutex%02d", mutex_index++ );
++                rt_mutex_init( ( rt_mutex_t ) &( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.mutex, name, RT_IPC_FLAG_PRIO );
++            }
++            else if ( ucQueueType == queueQUEUE_TYPE_BINARY_SEMAPHORE || ucQueueType == queueQUEUE_TYPE_COUNTING_SEMAPHORE )
++            {
++                rt_snprintf( name, RT_NAME_MAX, "sem%02d", sem_index++ );
++                rt_sem_init( ( rt_sem_t ) &( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.semaphore, name, 0, RT_IPC_FLAG_PRIO );
++                ( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.semaphore.max_value = uxQueueLength;
++            }
++            else if ( ucQueueType == queueQUEUE_TYPE_BASE )
++            {
++                rt_snprintf( name, RT_NAME_MAX, "queue%02d", queue_index++ );
++                rt_mq_init( &( pxStaticQueue->ipc_obj ), name, pucQueueStorage, uxItemSize, QUEUE_BUFFER_SIZE( uxQueueLength, uxItemSize ), RT_IPC_FLAG_PRIO );
++            }
++            else
++            {
++                return pxNewQueue;
++            }
++            pxStaticQueue->rt_ipc = ( struct rt_ipc_object * ) &pxStaticQueue->ipc_obj;
++            pxNewQueue = ( QueueHandle_t ) pxStaticQueue;
++        }
++
++        return pxNewQueue;
++    }
++
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++/*-----------------------------------------------------------*/
++
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++
++    QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
++                                       const UBaseType_t uxItemSize,
++                                       const uint8_t ucQueueType )
++    {
++        Queue_t * pxNewQueue = NULL;
++        char name[RT_NAME_MAX] = {0};
++        struct rt_ipc_object * pipc = RT_NULL;
++
++        if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
++            /* Check for multiplication overflow. */
++            ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
++            /* Check for addition overflow. */
++            ( ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
++        {
++            pxNewQueue = ( Queue_t * ) RT_KERNEL_MALLOC( sizeof( Queue_t ) );
++            if ( pxNewQueue == NULL )
++            {
++                return ( QueueHandle_t ) pxNewQueue;
++            }
++            if ( ucQueueType == queueQUEUE_TYPE_RECURSIVE_MUTEX || ucQueueType == queueQUEUE_TYPE_MUTEX )
++            {
++                rt_snprintf( name, RT_NAME_MAX, "mutex%02d", mutex_index++ );
++                pipc = ( struct rt_ipc_object * ) rt_mutex_create( name, RT_IPC_FLAG_PRIO );
++            }
++            else if ( ucQueueType == queueQUEUE_TYPE_BINARY_SEMAPHORE || ucQueueType == queueQUEUE_TYPE_COUNTING_SEMAPHORE )
++            {
++                rt_snprintf( name, RT_NAME_MAX, "sem%02d", sem_index++ );
++                pipc = ( struct rt_ipc_object * ) RT_KERNEL_MALLOC( sizeof( struct rt_semaphore_wrapper ) );
++                if ( pipc != RT_NULL )
++                {
++                    rt_sem_init( ( rt_sem_t ) pipc, name, 0, RT_IPC_FLAG_PRIO );
++                    ( ( struct rt_semaphore_wrapper * ) pipc )->max_value = uxQueueLength;
++                    /* Mark as dynamic so we can distinguish in vQueueDelete */
++                    pipc->parent.type &= ~RT_Object_Class_Static;
++                }
++            }
++            else if ( ucQueueType == queueQUEUE_TYPE_BASE )
++            {
++                rt_snprintf( name, RT_NAME_MAX, "queue%02d", queue_index++ );
++                pipc = ( struct rt_ipc_object * ) rt_mq_create( name, uxItemSize, uxQueueLength, RT_IPC_FLAG_PRIO);
++            }
++
++            if ( pipc == RT_NULL )
++            {
++                RT_KERNEL_FREE( pxNewQueue );
++                return NULL;
++            }
++            pxNewQueue->rt_ipc = pipc;
++        }
++
++        return ( QueueHandle_t ) pxNewQueue;
++    }
++
++#endif /* configSUPPORT_STATIC_ALLOCATION */
++/*-----------------------------------------------------------*/
++
++#if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
++
++    QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
++    {
++        QueueHandle_t xNewQueue;
++        const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
++
++        xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
++        return xNewQueue;
++    }
++
++#endif /* configUSE_MUTEXES */
++/*-----------------------------------------------------------*/
++
++#if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++
++    QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
++                                           StaticQueue_t * pxStaticQueue )
++    {
++        QueueHandle_t xNewQueue;
++        const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
++
++        xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
++
++        return xNewQueue;
++    }
++
++#endif /* configUSE_MUTEXES */
++/*-----------------------------------------------------------*/
++
++#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
++
++    TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
++    {
++        TaskHandle_t pxReturn;
++        struct rt_ipc_object *pipc;
++        rt_uint8_t type;
++        rt_base_t level;
++
++        configASSERT( xSemaphore );
++
++        pipc = xSemaphore->rt_ipc;
++        RT_ASSERT( pipc != RT_NULL );
++        type = rt_object_get_type( &pipc->parent );
++
++        if ( type == RT_Object_Class_Mutex )
++        {
++            level = rt_hw_interrupt_disable();
++            pxReturn = ( TaskHandle_t ) ( ( rt_mutex_t ) pipc )->owner;
++            rt_hw_interrupt_enable( level );
++        }
++        else
++        {
++            pxReturn = NULL;
++        }
++
++        return pxReturn;
++    }
++
++#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
++/*-----------------------------------------------------------*/
++
++#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
++
++    TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
++    {
++        return xQueueGetMutexHolder( xSemaphore );
++    }
++
++#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_RECURSIVE_MUTEXES == 1 )
++
++    BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
++    {
++        Queue_t * const pxMutex = ( Queue_t * ) xMutex;
++        configASSERT( pxMutex );
++        return xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
++    }
++
++#endif /* configUSE_RECURSIVE_MUTEXES */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_RECURSIVE_MUTEXES == 1 )
++
++    BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
++                                         TickType_t xTicksToWait )
++    {
++        Queue_t * const pxMutex = ( Queue_t * ) xMutex;
++        configASSERT( pxMutex );
++        return xQueueSemaphoreTake( pxMutex, xTicksToWait );
++    }
++
++#endif /* configUSE_RECURSIVE_MUTEXES */
++/*-----------------------------------------------------------*/
++
++#if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++
++    QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
++                                                       const UBaseType_t uxInitialCount,
++                                                       StaticQueue_t * pxStaticQueue )
++    {
++        QueueHandle_t xHandle = NULL;
++
++        if( ( uxMaxCount != 0 ) &&
++            ( uxInitialCount <= uxMaxCount ) )
++        {
++            xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
++
++            if( xHandle != NULL )
++            {
++                ( ( rt_sem_t ) ( ( Queue_t * ) xHandle )->rt_ipc )->value = uxInitialCount;
++            }
++        }
++        else
++        {
++            configASSERT( xHandle );
++        }
++
++        return xHandle;
++    }
++
++#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
++/*-----------------------------------------------------------*/
++
++#if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
++
++    QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
++                                                 const UBaseType_t uxInitialCount )
++    {
++        QueueHandle_t xHandle = NULL;
++
++        if( ( uxMaxCount != 0 ) &&
++            ( uxInitialCount <= uxMaxCount ) )
++        {
++            xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
++
++            if( xHandle != NULL )
++            {
++                ( ( rt_sem_t ) ( ( Queue_t * ) xHandle )->rt_ipc )->value = uxInitialCount;
++            }
++        }
++        else
++        {
++            configASSERT( xHandle );
++        }
++
++        return xHandle;
++    }
++
++#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
++                              const void * const pvItemToQueue,
++                              TickType_t xTicksToWait,
++                              const BaseType_t xCopyPosition )
++{
++    Queue_t * const pxQueue = xQueue;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++    rt_base_t level;
++    rt_err_t err = -RT_ERROR;
++
++    configASSERT( pxQueue );
++    #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
++        {
++            configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
++        }
++    #endif
++
++    pipc = pxQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++    if ( type == RT_Object_Class_Mutex )
++    {
++        err = rt_mutex_release( ( rt_mutex_t ) pipc );
++    }
++    else if ( type == RT_Object_Class_Semaphore )
++    {
++        level = rt_hw_interrupt_disable();
++        if ( ( ( rt_sem_t ) pipc )->value < ( ( struct rt_semaphore_wrapper * ) pipc )->max_value )
++        {
++            err = rt_sem_release( ( rt_sem_t ) pipc );
++        }
++        rt_hw_interrupt_enable( level );
++    }
++    else if ( type == RT_Object_Class_MessageQueue )
++    {
++        if ( xCopyPosition == queueSEND_TO_BACK )
++        {
++            err = rt_mq_send_wait( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size, ( rt_int32_t ) xTicksToWait );
++        }
++        else if ( xCopyPosition == queueSEND_TO_FRONT )
++        {
++            // TODO: need to implement the timeout for LIFO
++            err = rt_mq_urgent( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size );
++        }
++    }
++
++    return rt_err_to_freertos( err );
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
++                                     const void * const pvItemToQueue,
++                                     BaseType_t * const pxHigherPriorityTaskWoken,
++                                     const BaseType_t xCopyPosition )
++{
++    Queue_t * const pxQueue = xQueue;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++    rt_err_t err = -RT_ERROR;
++
++    configASSERT( pxQueue );
++
++    pipc = pxQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++    if ( type == RT_Object_Class_MessageQueue )
++    {
++        if ( xCopyPosition == queueSEND_TO_BACK )
++        {
++            err = rt_mq_send( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size);
++        }
++        else if ( xCopyPosition == queueSEND_TO_FRONT )
++        {
++            err = rt_mq_urgent( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size );
++        }
++    }
++
++    return rt_err_to_freertos( err );
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
++                              BaseType_t * const pxHigherPriorityTaskWoken )
++{
++    Queue_t * const pxQueue = xQueue;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++    rt_base_t level;
++    rt_err_t err = -RT_ERROR;
++
++    configASSERT( pxQueue );
++
++    pipc = pxQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++    RT_ASSERT( type != RT_Object_Class_Mutex );
++    if ( type == RT_Object_Class_Semaphore )
++    {
++        level = rt_hw_interrupt_disable();
++        if ( ( ( rt_sem_t ) pipc )->value < ( ( struct rt_semaphore_wrapper * ) pipc )->max_value )
++        {
++            err = rt_sem_release( ( rt_sem_t ) pipc );
++        }
++        rt_hw_interrupt_enable( level );
++    }
++    if ( pxHigherPriorityTaskWoken != NULL )
++    {
++        *pxHigherPriorityTaskWoken = pdFALSE;
++    }
++
++    return rt_err_to_freertos( err );
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueReceive( QueueHandle_t xQueue,
++                          void * const pvBuffer,
++                          TickType_t xTicksToWait )
++{
++    Queue_t * const pxQueue = xQueue;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++    rt_err_t err = -RT_ERROR;
++
++    /* Check the queue pointer is not NULL. */
++    configASSERT( ( pxQueue ) );
++
++    /* Cannot block if the scheduler is suspended. */
++    #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
++        {
++            configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
++        }
++    #endif
++
++    pipc = pxQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++    if ( type == RT_Object_Class_MessageQueue )
++    {
++        err = rt_mq_recv( ( rt_mq_t ) pipc, pvBuffer, ( ( rt_mq_t ) pipc )->msg_size, ( rt_int32_t ) xTicksToWait );
++    }
++
++    return rt_err_to_freertos( err );
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
++                                TickType_t xTicksToWait )
++{
++    Queue_t * const pxQueue = xQueue;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++    rt_err_t err = -RT_ERROR;
++
++    /* Check the queue pointer is not NULL. */
++    configASSERT( ( pxQueue ) );
++
++    /* Cannot block if the scheduler is suspended. */
++    #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
++        {
++            configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
++        }
++    #endif
++
++    pipc = pxQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++    if ( type == RT_Object_Class_Mutex )
++    {
++        err = rt_mutex_take( ( rt_mutex_t ) pipc, ( rt_int32_t ) xTicksToWait );
++    }
++    else if ( type == RT_Object_Class_Semaphore )
++    {
++        err = rt_sem_take( ( rt_sem_t ) pipc, ( rt_int32_t ) xTicksToWait );
++    }
++
++    return rt_err_to_freertos( err );
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
++                                 void * const pvBuffer,
++                                 BaseType_t * const pxHigherPriorityTaskWoken )
++{
++    Queue_t * const pxQueue = xQueue;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++    rt_err_t err = -RT_ERROR;
++
++    configASSERT( pxQueue );
++
++    pipc = pxQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++    RT_ASSERT( type != RT_Object_Class_Mutex );
++    if ( type == RT_Object_Class_Semaphore )
++    {
++        err = rt_sem_take( ( rt_sem_t ) pipc, RT_WAITING_NO );
++    }
++    else if ( type == RT_Object_Class_MessageQueue )
++    {
++        err = rt_mq_recv( ( rt_mq_t ) pipc, pvBuffer, ( ( rt_mq_t ) pipc )->msg_size, RT_WAITING_NO );
++    }
++    if ( pxHigherPriorityTaskWoken != NULL )
++    {
++        *pxHigherPriorityTaskWoken = pdFALSE;
++    }
++
++    return rt_err_to_freertos( err );
++}
++/*-----------------------------------------------------------*/
++
++UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
++{
++    UBaseType_t uxReturn = 0;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++    rt_base_t level;
++
++    configASSERT( xQueue );
++
++    pipc = xQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++
++    level = rt_hw_interrupt_disable();
++
++    if ( type == RT_Object_Class_Mutex )
++    {
++        uxReturn = ( ( rt_mutex_t ) pipc )->value;
++    }
++    else if ( type == RT_Object_Class_Semaphore )
++    {
++        uxReturn = ( ( rt_sem_t ) pipc )->value;
++    }
++    else if ( type == RT_Object_Class_MessageQueue )
++    {
++        uxReturn = ( ( rt_mq_t ) pipc )->entry;
++    }
++
++    rt_hw_interrupt_enable( level );
++
++    return uxReturn;
++}
++/*-----------------------------------------------------------*/
++
++UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
++{
++    UBaseType_t uxReturn = 0;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++    rt_base_t level;
++
++    configASSERT( xQueue );
++
++    pipc = xQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++
++    level = rt_hw_interrupt_disable();
++
++    if ( type == RT_Object_Class_Mutex )
++    {
++        uxReturn = 1 - ( ( rt_mutex_t ) pipc )->value;
++    }
++    else if ( type == RT_Object_Class_Semaphore )
++    {
++        uxReturn = ( ( struct rt_semaphore_wrapper * ) pipc )->max_value - ( ( rt_sem_t ) pipc )->value;
++    }
++    else if ( type == RT_Object_Class_MessageQueue )
++    {
++        uxReturn = ( ( rt_mq_t ) pipc )->max_msgs - ( ( rt_mq_t ) pipc )->entry;
++    }
++
++    rt_hw_interrupt_enable( level );
++
++    return uxReturn;
++}
++/*-----------------------------------------------------------*/
++
++UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
++{
++    return uxQueueMessagesWaiting( xQueue );
++}
++/*-----------------------------------------------------------*/
++
++void vQueueDelete( QueueHandle_t xQueue )
++{
++    Queue_t * const pxQueue = xQueue;
++    struct rt_ipc_object *pipc;
++    rt_uint8_t type;
++
++    configASSERT( pxQueue );
++
++    pipc = pxQueue->rt_ipc;
++    RT_ASSERT( pipc != RT_NULL );
++    type = rt_object_get_type( &pipc->parent );
++#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++    if ( rt_object_is_systemobject( ( rt_object_t ) pipc ) )
++#endif
++    {
++    #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++        if ( type == RT_Object_Class_Mutex )
++        {
++            rt_mutex_detach( ( rt_mutex_t ) pipc );
++        }
++        else if ( type == RT_Object_Class_Semaphore )
++        {
++            rt_sem_detach( ( rt_sem_t ) pipc );
++        }
++        else if ( type == RT_Object_Class_MessageQueue )
++        {
++            rt_mq_detach( ( rt_mq_t ) pipc );
++        }
++    #endif
++#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++    }
++    else
++    {
++#endif
++    #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++        if ( type == RT_Object_Class_Mutex )
++        {
++            rt_mutex_delete( ( rt_mutex_t ) pipc );
++        }
++        else if ( type == RT_Object_Class_Semaphore )
++        {
++            /* Allocated with rt_sem_init in xQueueGenericCreate */
++            pipc->parent.type |= RT_Object_Class_Static;
++            rt_sem_detach( ( rt_sem_t ) pipc );
++            RT_KERNEL_FREE( pipc );
++        }
++        else if ( type == RT_Object_Class_MessageQueue )
++        {
++            rt_mq_delete( ( rt_mq_t ) pipc );
++        }
++        else
++        {
++            return;
++        }
++        RT_KERNEL_FREE( pxQueue );
++    #endif
++    }
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
++{
++    BaseType_t xReturn;
++
++    configASSERT( xQueue );
++
++    if( uxQueueMessagesWaiting( xQueue ) == ( UBaseType_t ) 0 )
++    {
++        xReturn = pdTRUE;
++    }
++    else
++    {
++        xReturn = pdFALSE;
++    }
++
++    return xReturn;
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
++{
++    BaseType_t xReturn;
++
++    configASSERT( xQueue );
++
++    if ( uxQueueSpacesAvailable( xQueue ) == ( UBaseType_t ) 0 )
++    {
++        xReturn = pdTRUE;
++    }
++    else
++    {
++        xReturn = pdFALSE;
++    }
++
++    return xReturn;
++}
++/*-----------------------------------------------------------*/
++
++/* Unimplemented */
++#include "esp_log.h"
++QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
++{
++    ESP_LOGE("freertos", "xQueueCreateSet unimplemented");
++    RT_ASSERT(0);
++    return NULL;
++}
++BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
++                           QueueSetHandle_t xQueueSet )
++{
++    ESP_LOGE("freertos", "xQueueAddToSet unimplemented");
++    RT_ASSERT(0);
++    return pdFAIL;
++}
++
++BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
++                                QueueSetHandle_t xQueueSet )
++{
++    ESP_LOGE("freertos", "xQueueRemoveFromSet unimplemented");
++    RT_ASSERT(0);
++    return pdFAIL;
++}
++
++QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
++                                            const TickType_t xTicksToWait )
++{
++    ESP_LOGE("freertos", "xQueueSelectFromSet unimplemented");
++    RT_ASSERT(0);
++    return NULL;
++}
++
++QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
++{
++    ESP_LOGE("freertos", "xQueueSelectFromSetFromISR unimplemented");
++    RT_ASSERT(0);
++    return NULL;
++}
++
++BaseType_t xQueuePeek( QueueHandle_t xQueue,
++                       void * const pvBuffer,
++                       TickType_t xTicksToWait )
++{
++    ESP_LOGE("freertos", "xQueuePeek unimplemented");
++    RT_ASSERT(0);
++    return pdFAIL;
++}
++
++BaseType_t xQueueOverwrite(QueueHandle_t xQueue, const void * pvItemToQueue)
++{
++    ESP_LOGE("freertos", "xQueueOverwrite unimplemented");
++    RT_ASSERT(0);
++    return pdFAIL;
++}
++
++BaseType_t xQueueOverwriteFromISR(QueueHandle_t xQueue, const void * pvItemToQueue, BaseType_t *pxHigherPriorityTaskWoken)
++{
++    ESP_LOGE("freertos", "xQueueOverwriteFromISR unimplemented");
++    RT_ASSERT(0);
++    return pdFAIL;  
++}
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/tasks.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/tasks.c
+new file mode 100644
+index 0000000000..d3e8c27a09
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/tasks.c
+@@ -0,0 +1,1254 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++/* Standard includes. */
++#include <stdlib.h>
++#include <string.h>
++
++/* FreeRTOS includes. */
++#include "FreeRTOS.h"
++#include "task.h"
++
++//TODO: check configMAX_PRIORITIES == RT_THREAD_PRIORITY_MAX
++#define FREERTOS_PRIORITY_TO_RTTHREAD(priority)    ( configMAX_PRIORITIES - 1 - ( priority ) )
++#define RTTHREAD_PRIORITY_TO_FREERTOS(priority)    ( RT_THREAD_PRIORITY_MAX - 1 - ( priority ) )
++
++/* Values that can be assigned to the ucNotifyState member of the TCB. */
++#define taskNOT_WAITING_NOTIFICATION              ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
++#define taskWAITING_NOTIFICATION                  ( ( uint8_t ) 1 )
++#define taskNOTIFICATION_RECEIVED                 ( ( uint8_t ) 2 )
++
++/*
++ * Several functions take a TaskHandle_t parameter that can optionally be NULL,
++ * where NULL is used to indicate that the handle of the currently executing
++ * task should be used in place of the parameter.  This macro simply checks to
++ * see if the parameter is NULL and returns a pointer to the appropriate TCB.
++ */
++#define prvGetTCBFromHandle( pxHandle )    ( ( ( pxHandle ) == NULL ) ? ( xTaskGetCurrentTaskHandle() ) : ( pxHandle ) )
++
++/*
++ * Task control block.  A task control block (TCB) is allocated for each task,
++ * and stores task state information, including a pointer to the task's context
++ * (the task's run time environment, including register values)
++ */
++typedef struct tskTaskControlBlock
++{
++    struct rt_thread thread;
++    #if ( configUSE_APPLICATION_TASK_TAG == 1 )
++        TaskHookFunction_t pxTaskTag;
++    #endif
++    #if ( configUSE_TASK_NOTIFICATIONS == 1 )
++        volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
++        volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
++    #endif
++    #if ( INCLUDE_xTaskAbortDelay == 1 )
++        uint8_t ucDelayAborted;
++    #endif
++} tskTCB;
++typedef tskTCB TCB_t;
++
++/* Other file private variables. --------------------------------*/
++static volatile BaseType_t xSchedulerRunning = pdFALSE;
++
++/*-----------------------------------------------------------*/
++
++/*
++ * Called after a Task_t structure has been allocated either statically or
++ * dynamically to fill in the structure's members.
++ */
++static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
++                                  const char * const pcName,
++                                  const uint32_t ulStackDepth,
++                                  void * const pvParameters,
++                                  UBaseType_t uxPriority,
++                                  TaskHandle_t * const pxCreatedTask,
++                                  TCB_t * pxNewTCB,
++                                  StackType_t * const puxStackBuffer );
++
++#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++
++    TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
++                                    const char * const pcName,
++                                    const uint32_t ulStackDepth,
++                                    void * const pvParameters,
++                                    UBaseType_t uxPriority,
++                                    StackType_t * const puxStackBuffer,
++                                    StaticTask_t * const pxTaskBuffer )
++    {
++        TCB_t * pxNewTCB;
++        TaskHandle_t xReturn = NULL;
++
++        configASSERT( puxStackBuffer != NULL );
++        configASSERT( pxTaskBuffer != NULL );
++
++        #if ( configASSERT_DEFINED == 1 )
++            {
++                /* Sanity check that the size of the structure used to declare a
++                 * variable of type StaticTask_t equals the size of the real task
++                 * structure. */
++                volatile size_t xSize = sizeof( StaticTask_t );
++                configASSERT( xSize == sizeof( TCB_t ) );
++                ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
++            }
++        #endif /* configASSERT_DEFINED */
++
++        if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
++        {
++            pxNewTCB = ( TCB_t * ) pxTaskBuffer;
++            prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, puxStackBuffer );
++            rt_thread_startup( ( rt_thread_t ) pxNewTCB );
++        }
++
++        return xReturn;
++    }
++
++#endif /* SUPPORT_STATIC_ALLOCATION */
++
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++
++    BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
++                            const char * const pcName,
++                            const configSTACK_DEPTH_TYPE usStackDepth,
++                            void * const pvParameters,
++                            UBaseType_t uxPriority,
++                            TaskHandle_t * const pxCreatedTask )
++    {
++        TCB_t * pxNewTCB;
++        BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
++        void * stack_start = RT_NULL;
++
++        pxNewTCB = ( TCB_t * ) RT_KERNEL_MALLOC( sizeof( TCB_t ) );
++        if ( pxNewTCB != NULL )
++        {
++            stack_start = RT_KERNEL_MALLOC( usStackDepth * sizeof( StackType_t ) );
++            if ( stack_start != RT_NULL )
++            {
++                prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, ( StackType_t * ) stack_start );
++                xReturn = pdPASS;
++                /* Mark as dynamic */
++                ( ( struct rt_thread * ) pxNewTCB )->type &= ~RT_Object_Class_Static;
++                rt_thread_startup( ( rt_thread_t ) pxNewTCB );
++            }
++            else
++            {
++                RT_KERNEL_FREE( pxNewTCB );
++            }
++        }
++
++        return xReturn;
++    }
++
++#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
++/*-----------------------------------------------------------*/
++
++/* ESP32 */
++#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++
++    BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pvTaskCode,
++                            const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
++                            const uint32_t usStackDepth,
++                            void * const pvParameters,
++                            UBaseType_t uxPriority,
++                            TaskHandle_t * const pvCreatedTask,
++                            const BaseType_t xCoreID)
++    {
++        ( void ) xCoreID;
++        return xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pvCreatedTask );
++    }
++
++#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
++/*-----------------------------------------------------------*/
++
++static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
++                                  const char * const pcName,
++                                  const uint32_t ulStackDepth,
++                                  void * const pvParameters,
++                                  UBaseType_t uxPriority,
++                                  TaskHandle_t * const pxCreatedTask,
++                                  TCB_t * pxNewTCB,
++                                  StackType_t * const puxStackBuffer )
++{
++    /* This is used as an array index so must ensure it's not too large. */
++    configASSERT( uxPriority < configMAX_PRIORITIES );
++
++    if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
++    {
++        uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
++    }
++
++    rt_thread_init( ( struct rt_thread * ) pxNewTCB, pcName, pxTaskCode, pvParameters,
++                    puxStackBuffer, ulStackDepth * sizeof( StackType_t ), FREERTOS_PRIORITY_TO_RTTHREAD( uxPriority ), 1 );
++
++#if ( configUSE_APPLICATION_TASK_TAG == 1 )
++    pxNewTCB->pxTaskTag = NULL;
++#endif
++
++#if ( configUSE_TASK_NOTIFICATIONS == 1 )
++    rt_memset( ( void * ) &( pxNewTCB->ulNotifiedValue[ 0 ] ), 0x00, sizeof( pxNewTCB->ulNotifiedValue ) );
++    rt_memset( ( void * ) &( pxNewTCB->ucNotifyState[ 0 ] ), 0x00, sizeof( pxNewTCB->ucNotifyState ) );
++#endif
++
++#if ( INCLUDE_xTaskAbortDelay == 1 )
++    pxNewTCB->ucDelayAborted = pdFALSE;
++#endif
++
++    if ( pxCreatedTask != NULL )
++    {
++        *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
++    }
++}
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_vTaskDelete == 1 )
++
++    void vTaskDelete( TaskHandle_t xTaskToDelete )
++    {
++        rt_thread_t thread = ( rt_thread_t ) prvGetTCBFromHandle( xTaskToDelete );
++        if ( thread == RT_NULL )
++        {
++            thread = rt_thread_self();
++        }
++    #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++        if ( rt_object_is_systemobject( ( rt_object_t ) thread ) )
++    #endif
++        {
++        #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++            rt_thread_detach( thread );
++        #endif
++    #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++        }
++        else
++        {
++    #endif
++        #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++            rt_thread_delete( thread );
++        #endif
++        }
++
++        if ( thread == rt_thread_self() )
++        {
++            rt_schedule();
++        }
++    }
++
++#endif /* INCLUDE_vTaskDelete */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_xTaskDelayUntil == 1 )
++
++    BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
++                                const TickType_t xTimeIncrement )
++    {
++        BaseType_t xShouldDelay = pdFALSE;
++        rt_base_t level;
++        rt_tick_t cur_tick;
++
++        RT_ASSERT( pxPreviousWakeTime != RT_NULL );
++        RT_ASSERT( xTimeIncrement > 0U );
++
++        level = rt_hw_interrupt_disable();
++        cur_tick = rt_tick_get();
++        if (cur_tick - *pxPreviousWakeTime < xTimeIncrement)
++        {
++            rt_thread_delay_until( pxPreviousWakeTime, xTimeIncrement );
++            xShouldDelay = pdTRUE;
++        }
++        rt_hw_interrupt_enable( level );
++
++        return xShouldDelay;
++    }
++
++#endif /* INCLUDE_xTaskDelayUntil */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_vTaskDelay == 1 )
++
++    void vTaskDelay( const TickType_t xTicksToDelay )
++    {
++        rt_thread_delay( xTicksToDelay );
++    }
++
++#endif /* INCLUDE_vTaskDelay */
++/*-----------------------------------------------------------*/
++
++#if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
++
++    eTaskState eTaskGetState( TaskHandle_t xTask )
++    {
++        eTaskState eReturn;
++        rt_thread_t thread = ( rt_thread_t ) xTask;
++        rt_base_t level;
++
++        configASSERT( xTask );
++
++        level = rt_hw_interrupt_disable();
++
++        switch ( thread->stat & RT_THREAD_STAT_MASK )
++        {
++            case RT_THREAD_READY:
++            {
++                eReturn = eReady;
++                break;
++            }
++            case RT_THREAD_SUSPEND:
++            {
++                /* If thread timer is activated it is blocked with a timeout */
++                if ( thread->thread_timer.parent.flag & RT_TIMER_FLAG_ACTIVATED )
++                {
++                    eReturn = eBlocked;
++                }
++                /* Otherwise it is suspended or blocked with an infinite timeout */
++                else
++                {
++                    eReturn = eSuspended;
++                }
++                break;
++            }
++            case RT_THREAD_RUNNING:
++            {
++                eReturn = eRunning;
++                break;
++            }
++            case RT_THREAD_CLOSE:
++            {
++                eReturn = eDeleted;
++                break;
++            }
++            default:
++                eReturn = eInvalid;
++        }
++
++        rt_hw_interrupt_enable( level );
++
++        return eReturn;
++    }
++
++#endif /* INCLUDE_eTaskGetState */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_uxTaskPriorityGet == 1 )
++
++    UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
++    {
++        UBaseType_t uxReturn;
++        rt_thread_t thread = ( rt_thread_t ) prvGetTCBFromHandle( xTask );
++        rt_base_t level;
++
++        level = rt_hw_interrupt_disable();
++        uxReturn = thread->current_priority;
++        rt_hw_interrupt_enable( level );
++
++        return RTTHREAD_PRIORITY_TO_FREERTOS( uxReturn );
++    }
++
++#endif /* INCLUDE_uxTaskPriorityGet */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_uxTaskPriorityGet == 1 )
++
++    UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
++    {
++        return uxTaskPriorityGet( xTask );
++    }
++
++#endif /* INCLUDE_uxTaskPriorityGet */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_vTaskPrioritySet == 1 )
++
++    void vTaskPrioritySet( TaskHandle_t xTask,
++                           UBaseType_t uxNewPriority )
++    {
++        extern rt_thread_t rt_current_thread;
++        rt_thread_t thread;
++        rt_uint8_t current_priority;
++        rt_bool_t need_schedule = RT_FALSE;
++        rt_base_t level;
++
++        configASSERT( uxNewPriority < configMAX_PRIORITIES );
++
++        /* Ensure the new priority is valid. */
++        if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
++        {
++            uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
++        }
++        uxNewPriority = FREERTOS_PRIORITY_TO_RTTHREAD( uxNewPriority );
++
++        level = rt_hw_interrupt_disable();
++
++        thread = ( rt_thread_t ) prvGetTCBFromHandle( xTask );
++        current_priority = thread->current_priority;
++        if ( current_priority != uxNewPriority )
++        {
++            rt_thread_control( thread, RT_THREAD_CTRL_CHANGE_PRIORITY, &uxNewPriority);
++            if ( uxNewPriority < current_priority )
++            {
++                /* The priority of a task other than the currently running task is being raised.
++                 * Need to schedule if the priority is raised above that of the running task */
++                if ( thread != rt_current_thread && uxNewPriority <= rt_current_thread->current_priority )
++                {
++                    need_schedule = RT_TRUE;
++                }
++            }
++            /* Setting the priority of the running task down means
++             * there may now be another task of higher priority that
++             * is ready to execute. */
++            else if ( thread == rt_current_thread )
++            {
++                need_schedule = RT_TRUE;
++            }
++        }
++
++        rt_hw_interrupt_enable( level );
++
++        if ( need_schedule == RT_TRUE )
++        {
++            rt_schedule();
++        }
++    }
++
++#endif /* INCLUDE_vTaskPrioritySet */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_vTaskSuspend == 1 )
++
++    void vTaskSuspend( TaskHandle_t xTaskToSuspend )
++    {
++        rt_thread_t thread = ( rt_thread_t ) prvGetTCBFromHandle( xTaskToSuspend );
++        if ( rt_thread_suspend( thread ) == RT_EOK )
++        {
++            rt_schedule();
++        }
++    }
++
++#endif /* INCLUDE_vTaskSuspend */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_vTaskSuspend == 1 )
++
++    void vTaskResume( TaskHandle_t xTaskToResume )
++    {
++        rt_thread_t thread = ( rt_thread_t ) xTaskToResume;
++        rt_bool_t need_schedule = RT_FALSE;
++        rt_base_t level;
++
++        /* It does not make sense to resume the calling task. */
++        configASSERT( xTaskToResume );
++
++        if ( thread != NULL && thread != rt_thread_self() )
++        {
++            level = rt_hw_interrupt_disable();
++            /* A task with higher priority than the current running task is ready */
++            if ( rt_thread_resume( thread ) == RT_EOK && thread->current_priority <= rt_thread_self()->current_priority )
++            {
++                need_schedule = RT_TRUE;
++            }
++            rt_hw_interrupt_enable( level );
++        }
++        if (need_schedule == RT_TRUE)
++        {
++            rt_schedule();
++        }
++    }
++
++#endif /* INCLUDE_vTaskSuspend */
++
++/*-----------------------------------------------------------*/
++
++#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
++
++    BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
++    {
++        vTaskResume( xTaskToResume );
++        return pdFALSE;
++    }
++
++#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
++/*-----------------------------------------------------------*/
++
++void vTaskStartScheduler( void )
++{
++    xSchedulerRunning = pdTRUE;
++}
++/*-----------------------------------------------------------*/
++
++void vTaskEndScheduler( void )
++{
++    xSchedulerRunning = pdFALSE;
++    vPortEndScheduler();
++}
++/*----------------------------------------------------------*/
++
++#if ( configUSE_NEWLIB_REENTRANT == 1 )
++struct _reent* __getreent(void) {
++    return _GLOBAL_REENT;
++}
++#endif
++
++void vTaskSuspendAll( void )
++{
++    rt_enter_critical();
++}
++/*----------------------------------------------------------*/
++
++BaseType_t xTaskResumeAll( void )
++{
++    rt_exit_critical();
++    return pdFALSE;
++}
++/*-----------------------------------------------------------*/
++
++TickType_t xTaskGetTickCount( void )
++{
++    return rt_tick_get();
++}
++/*-----------------------------------------------------------*/
++
++TickType_t xTaskGetTickCountFromISR( void )
++{
++    return rt_tick_get();
++}
++/*-----------------------------------------------------------*/
++
++UBaseType_t uxTaskGetNumberOfTasks( void )
++{
++    UBaseType_t uxReturn = 0;
++    rt_base_t level;
++    struct rt_object_information *information;
++    struct rt_list_node *node = RT_NULL;
++
++    information = rt_object_get_information( RT_Object_Class_Thread );
++    RT_ASSERT( information != RT_NULL );
++
++    level = rt_hw_interrupt_disable();
++
++    rt_list_for_each( node, &( information->object_list ) )
++    {
++        uxReturn += 1;
++    }
++
++    rt_hw_interrupt_enable( level );
++
++    return uxReturn;
++}
++/*-----------------------------------------------------------*/
++
++char * pcTaskGetName( TaskHandle_t xTaskToQuery )
++{
++    rt_thread_t thread = ( rt_thread_t ) prvGetTCBFromHandle( xTaskToQuery );
++    return &( thread->name[ 0 ] );
++}
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_xTaskGetHandle == 1 )
++
++    TaskHandle_t xTaskGetHandle( const char * pcNameToQuery )
++    {
++        return ( TaskHandle_t ) rt_thread_find( ( char * ) pcNameToQuery );
++    }
++
++#endif /* INCLUDE_xTaskGetHandle */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
++
++    TaskHandle_t xTaskGetIdleTaskHandle( void )
++    {
++        return ( TaskHandle_t ) rt_thread_find( "tidle0" );
++    }
++
++#endif /* INCLUDE_xTaskGetIdleTaskHandle */
++/*----------------------------------------------------------*/
++
++#if ( INCLUDE_xTaskAbortDelay == 1 )
++
++    BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
++    {
++        TCB_t * pxTCB = xTask;
++        BaseType_t xReturn;
++        rt_thread_t thread = ( rt_thread_t ) xTask;
++        rt_bool_t need_schedule = RT_FALSE;
++        rt_base_t level;
++
++        configASSERT( pxTCB );
++
++        level = rt_hw_interrupt_disable();
++
++        if ( eTaskGetState( xTask ) == eBlocked )
++        {
++            rt_thread_resume( thread );
++            thread->error = -RT_ETIMEOUT;
++            pxTCB->ucDelayAborted = pdTRUE;
++            if ( thread->current_priority < rt_thread_self()->current_priority ){
++                need_schedule = RT_TRUE;
++            }
++            xReturn = pdPASS;
++        }
++        else
++        {
++            xReturn = pdFAIL;
++        }
++
++        rt_hw_interrupt_enable( level );
++
++        if ( need_schedule == RT_TRUE )
++        {
++            rt_schedule();
++        }
++
++        return xReturn;
++    }
++
++#endif /* INCLUDE_xTaskAbortDelay */
++/*----------------------------------------------------------*/
++
++#if ( configUSE_APPLICATION_TASK_TAG == 1 )
++
++    void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
++                                     TaskHookFunction_t pxHookFunction )
++    {
++        TCB_t * xTCB = prvGetTCBFromHandle( xTask );
++        rt_base_t level;
++
++        level = rt_hw_interrupt_disable();
++        xTCB->pxTaskTag = pxHookFunction;
++        rt_hw_interrupt_enable( level );
++    }
++
++#endif /* configUSE_APPLICATION_TASK_TAG */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_APPLICATION_TASK_TAG == 1 )
++
++    TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
++    {
++        TaskHookFunction_t xReturn;
++        TCB_t * xTCB = prvGetTCBFromHandle( xTask );
++        rt_base_t level;
++
++        level = rt_hw_interrupt_disable();
++        xReturn = xTCB->pxTaskTag;
++        rt_hw_interrupt_enable( level );
++
++        return xReturn;
++    }
++
++#endif /* configUSE_APPLICATION_TASK_TAG */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_APPLICATION_TASK_TAG == 1 )
++
++    TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
++    {
++        return xTaskGetApplicationTaskTag( xTask );
++    }
++
++#endif /* configUSE_APPLICATION_TASK_TAG */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_APPLICATION_TASK_TAG == 1 )
++
++    BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
++                                             void * pvParameter )
++    {
++        BaseType_t xReturn;
++        TCB_t * xTCB = prvGetTCBFromHandle( xTask );
++
++        if( xTCB->pxTaskTag != NULL )
++        {
++            xReturn = xTCB->pxTaskTag( pvParameter );
++        }
++        else
++        {
++            xReturn = pdFAIL;
++        }
++
++        return xReturn;
++    }
++
++#endif /* configUSE_APPLICATION_TASK_TAG */
++/*-----------------------------------------------------------*/
++
++void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
++{
++    rt_base_t level;
++
++    configASSERT( pxTimeOut );
++    level = rt_hw_interrupt_disable();
++    pxTimeOut->xOverflowCount = 0;
++    pxTimeOut->xTimeOnEntering = ( TickType_t ) rt_tick_get();
++    rt_hw_interrupt_enable( level );
++}
++/*-----------------------------------------------------------*/
++
++void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
++{
++    /* For internal use only as it does not use a critical section. */
++    pxTimeOut->xOverflowCount = 0;
++    pxTimeOut->xTimeOnEntering = ( TickType_t ) rt_tick_get();;
++}
++/*-----------------------------------------------------------*/
++
++BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
++                                 TickType_t * const pxTicksToWait )
++{
++    TCB_t * pxCurrentTCB = ( TCB_t * ) rt_thread_self();
++    BaseType_t xReturn;
++    rt_base_t level;
++
++    configASSERT( pxTimeOut );
++    configASSERT( pxTicksToWait );
++
++    level = rt_hw_interrupt_disable();
++    /* Minor optimisation.  The tick count cannot change in this block. */
++    const TickType_t xConstTickCount = ( TickType_t ) rt_tick_get();
++    const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
++
++#if ( INCLUDE_xTaskAbortDelay == 1 )
++    if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
++    {
++        /* The delay was aborted, which is not the same as a time out,
++         * but has the same result. */
++        pxCurrentTCB->ucDelayAborted = pdFALSE;
++        xReturn = pdTRUE;
++    }
++    else
++#endif
++
++#if ( INCLUDE_vTaskSuspend == 1 )
++    if( *pxTicksToWait == portMAX_DELAY )
++    {
++        /* If INCLUDE_vTaskSuspend is set to 1 and the block time
++         * specified is the maximum block time then the task should block
++         * indefinitely, and therefore never time out. */
++        xReturn = pdFALSE;
++    }
++    else
++#endif
++
++    if( xElapsedTime < *pxTicksToWait )
++    {
++        /* Not a genuine timeout. Adjust parameters for time remaining. */
++        *pxTicksToWait -= xElapsedTime;
++        vTaskInternalSetTimeOutState( pxTimeOut );
++        xReturn = pdFALSE;
++    }
++    else
++    {
++        *pxTicksToWait = ( TickType_t ) 0;
++        xReturn = pdTRUE;
++    }
++    rt_hw_interrupt_enable( level );
++
++    return xReturn;
++}
++/*-----------------------------------------------------------*/
++
++#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) )
++
++    TaskHandle_t xTaskGetCurrentTaskHandle( void )
++    {
++        TaskHandle_t xReturn;
++
++        /* A critical section is not required as this is not called from
++         * an interrupt and the current TCB will always be the same for any
++         * individual execution thread. */
++        xReturn = ( TaskHandle_t ) rt_thread_self();
++
++        return xReturn;
++    }
++
++#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
++/*-----------------------------------------------------------*/
++
++#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
++
++    BaseType_t xTaskGetSchedulerState( void )
++    {
++        BaseType_t xReturn;
++
++        if( xSchedulerRunning == pdFALSE )
++        {
++            xReturn = taskSCHEDULER_NOT_STARTED;
++        }
++        else
++        {
++            if( rt_critical_level() == 0 )
++            {
++                xReturn = taskSCHEDULER_RUNNING;
++            }
++            else
++            {
++                xReturn = taskSCHEDULER_SUSPENDED;
++            }
++        }
++
++        return xReturn;
++    }
++
++#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) ) */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_TASK_NOTIFICATIONS == 1 )
++
++    uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWait,
++                                      BaseType_t xClearCountOnExit,
++                                      TickType_t xTicksToWait )
++    {
++        uint32_t ulReturn;
++        TCB_t * pxCurrentTCB = ( TCB_t * ) rt_thread_self();
++        rt_thread_t thread = ( rt_thread_t ) pxCurrentTCB;
++        rt_base_t level;
++
++        configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
++
++        level = rt_hw_interrupt_disable();
++        /* Only block if the notification count is not already non-zero. */
++        if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] == 0UL )
++        {
++            /* Mark this task as waiting for a notification. */
++            pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
++
++            if( xTicksToWait > ( TickType_t ) 0 )
++            {
++                rt_thread_suspend( thread );
++                if ( ( rt_int32_t ) xTicksToWait > 0 )
++                {
++                    rt_timer_control(&(thread->thread_timer),
++                                     RT_TIMER_CTRL_SET_TIME,
++                                     &xTicksToWait);
++                    rt_timer_start(&(thread->thread_timer));
++                }
++                rt_hw_interrupt_enable(level);
++                rt_schedule();
++                /* Clear thread error. */
++                thread->error = RT_EOK;
++            }
++        }
++        rt_hw_interrupt_enable( level );
++
++        level = rt_hw_interrupt_disable();
++        ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ];
++
++        if( ulReturn != 0UL )
++        {
++            if( xClearCountOnExit != pdFALSE )
++            {
++                pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = 0UL;
++            }
++            else
++            {
++                pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1;
++            }
++        }
++
++        pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
++        rt_hw_interrupt_enable( level );
++
++        return ulReturn;
++    }
++
++#endif /* configUSE_TASK_NOTIFICATIONS */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_TASK_NOTIFICATIONS == 1 )
++
++    BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWait,
++                                       uint32_t ulBitsToClearOnEntry,
++                                       uint32_t ulBitsToClearOnExit,
++                                       uint32_t * pulNotificationValue,
++                                       TickType_t xTicksToWait )
++    {
++        BaseType_t xReturn;
++        TCB_t * pxCurrentTCB = ( TCB_t * ) rt_thread_self();
++        rt_thread_t thread = ( rt_thread_t ) pxCurrentTCB;
++        rt_base_t level;
++
++        configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
++
++        level = rt_hw_interrupt_disable();
++        /* Only block if a notification is not already pending. */
++        if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
++        {
++            /* Clear bits in the task's notification value as bits may get
++             * set  by the notifying task or interrupt.  This can be used to
++             * clear the value to zero. */
++            pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry;
++
++            /* Mark this task as waiting for a notification. */
++            pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
++
++            if( xTicksToWait > ( TickType_t ) 0 )
++            {
++                rt_thread_suspend( thread );
++                if ( ( rt_int32_t ) xTicksToWait > 0 )
++                {
++                    rt_timer_control(&(thread->thread_timer),
++                                     RT_TIMER_CTRL_SET_TIME,
++                                     &xTicksToWait);
++                    rt_timer_start(&(thread->thread_timer));
++                }
++                rt_hw_interrupt_enable(level);
++                rt_schedule();
++                /* Clear thread error. It is not used to determine the function return value. */
++                thread->error = RT_EOK;
++            }
++            else
++            {
++                rt_hw_interrupt_enable( level );
++            }
++        }
++        else
++        {
++            rt_hw_interrupt_enable( level );
++        }
++
++        level = rt_hw_interrupt_disable();
++
++        if( pulNotificationValue != NULL )
++        {
++            /* Output the current notification value, which may or may not
++             * have changed. */
++            *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ];
++        }
++
++        /* If ucNotifyValue is set then either the task never entered the
++         * blocked state (because a notification was already pending) or the
++         * task unblocked because of a notification.  Otherwise the task
++         * unblocked because of a timeout. */
++        if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
++        {
++            /* A notification was not received. */
++            xReturn = pdFALSE;
++        }
++        else
++        {
++            /* A notification was already pending or a notification was
++             * received while the task was waiting. */
++            pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit;
++            xReturn = pdTRUE;
++        }
++
++        pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
++        rt_hw_interrupt_enable( level );
++
++        return xReturn;
++    }
++
++#endif /* configUSE_TASK_NOTIFICATIONS */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_TASK_NOTIFICATIONS == 1 )
++
++    BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
++                                   UBaseType_t uxIndexToNotify,
++                                   uint32_t ulValue,
++                                   eNotifyAction eAction,
++                                   uint32_t * pulPreviousNotificationValue )
++    {
++        TCB_t * pxTCB;
++        BaseType_t xReturn = pdPASS;
++        uint8_t ucOriginalNotifyState;
++        rt_base_t level;
++
++        configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
++        configASSERT( xTaskToNotify );
++        pxTCB = xTaskToNotify;
++
++        level = rt_hw_interrupt_disable();
++
++        if( pulPreviousNotificationValue != NULL )
++        {
++            *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
++        }
++
++        ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
++
++        pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
++
++        switch( eAction )
++        {
++            case eSetBits:
++                pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
++                break;
++
++            case eIncrement:
++                ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
++                break;
++
++            case eSetValueWithOverwrite:
++                pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
++                break;
++
++            case eSetValueWithoutOverwrite:
++
++                if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
++                {
++                    pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
++                }
++                else
++                {
++                    /* The value could not be written to the task. */
++                    xReturn = pdFAIL;
++                }
++
++                break;
++
++            case eNoAction:
++
++                /* The task is being notified without its notify value being
++                 * updated. */
++                break;
++
++            default:
++
++                /* Should not get here if all enums are handled.
++                 * Artificially force an assert by testing a value the
++                 * compiler can't assume is const. */
++                configASSERT( xTaskToNotify == NULL );
++
++                break;
++        }
++
++
++        /* If the task is in the blocked state specifically to wait for a
++         * notification then unblock it now. */
++        if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
++        {
++            rt_thread_resume( ( rt_thread_t ) pxTCB );
++
++            if( ( ( rt_thread_t ) pxTCB )->current_priority < rt_thread_self()->current_priority )
++            {
++                /* The notified task has a priority above the currently
++                 * executing task so a schedule is required. */
++                rt_schedule();
++            }
++        }
++        rt_hw_interrupt_enable( level );
++
++        return xReturn;
++    }
++
++#endif /* configUSE_TASK_NOTIFICATIONS */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_TASK_NOTIFICATIONS == 1 )
++
++    BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
++                                          UBaseType_t uxIndexToNotify,
++                                          uint32_t ulValue,
++                                          eNotifyAction eAction,
++                                          uint32_t * pulPreviousNotificationValue,
++                                          BaseType_t * pxHigherPriorityTaskWoken )
++    {
++        BaseType_t xReturn;
++
++        xReturn = xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue );
++        if ( pxHigherPriorityTaskWoken != NULL )
++        {
++            *pxHigherPriorityTaskWoken = pdFALSE;
++        }
++
++        return xReturn;
++    }
++
++#endif /* configUSE_TASK_NOTIFICATIONS */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_TASK_NOTIFICATIONS == 1 )
++
++    void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
++                                        UBaseType_t uxIndexToNotify,
++                                        BaseType_t * pxHigherPriorityTaskWoken )
++    {
++        xTaskNotifyGiveIndexed( xTaskToNotify, uxIndexToNotify );
++        if ( pxHigherPriorityTaskWoken != NULL )
++        {
++            *pxHigherPriorityTaskWoken = pdFALSE;
++        }
++    }
++
++#endif /* configUSE_TASK_NOTIFICATIONS */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_TASK_NOTIFICATIONS == 1 )
++
++    BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
++                                             UBaseType_t uxIndexToClear )
++    {
++        TCB_t * pxTCB;
++        BaseType_t xReturn;
++        rt_base_t level;
++
++        configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
++
++        /* If null is passed in here then it is the calling task that is having
++         * its notification state cleared. */
++        pxTCB = prvGetTCBFromHandle( xTask );
++
++        level = rt_hw_interrupt_disable();
++
++        if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
++        {
++            pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
++            xReturn = pdPASS;
++        }
++        else
++        {
++            xReturn = pdFAIL;
++        }
++
++        rt_hw_interrupt_enable( level );
++
++        return xReturn;
++    }
++
++#endif /* configUSE_TASK_NOTIFICATIONS */
++/*-----------------------------------------------------------*/
++
++#if ( configUSE_TASK_NOTIFICATIONS == 1 )
++
++    uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
++                                            UBaseType_t uxIndexToClear,
++                                            uint32_t ulBitsToClear )
++    {
++        TCB_t * pxTCB;
++        uint32_t ulReturn;
++        rt_base_t level;
++
++        /* If null is passed in here then it is the calling task that is having
++         * its notification state cleared. */
++        pxTCB = prvGetTCBFromHandle( xTask );
++
++        level = rt_hw_interrupt_disable();
++
++        /* Return the notification as it was before the bits were cleared,
++         * then clear the bit mask. */
++        ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
++        pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
++
++        rt_hw_interrupt_enable( level );
++
++        return ulReturn;
++    }
++
++#endif /* configUSE_TASK_NOTIFICATIONS */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
++
++/* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
++ * same except for their return type.  Using configSTACK_DEPTH_TYPE allows the
++ * user to determine the return type.  It gets around the problem of the value
++ * overflowing on 8-bit types without breaking backward compatibility for
++ * applications that expect an 8-bit return type. */
++    configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
++    {
++        uint32_t ulCount = 0U;
++        rt_thread_t thread = ( rt_thread_t ) prvGetTCBFromHandle( xTask );
++        rt_uint8_t * stack_addr = thread->stack_addr;
++
++    #ifdef ARCH_CPU_STACK_GROWS_UPWARD
++        stack_addr = stack_addr + thread->stack_size - 1;
++        while ( *stack_addr == '#' )
++        {
++            ulCount += 1;
++            stack_addr -= 1;
++        }
++    #else
++        while ( *stack_addr == '#' )
++        {
++            ulCount += 1;
++            stack_addr += 1;
++        }
++    #endif
++
++        ulCount /= ( uint32_t ) sizeof( StackType_t );
++
++        return ( configSTACK_DEPTH_TYPE ) ulCount;
++    }
++
++#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
++/*-----------------------------------------------------------*/
++
++#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
++
++    UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
++    {
++        return ( UBaseType_t ) uxTaskGetStackHighWaterMark2( xTask );
++    }
++
++#endif /* INCLUDE_uxTaskGetStackHighWaterMark */
++/*-----------------------------------------------------------*/
++
++
++/* ESP32 */
++BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
++{
++    ( void ) xTask;
++    return 0;
++}
++
++TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t cpuid )
++{
++    ( void ) cpuid;
++    return xTaskGetCurrentTaskHandle();
++}
++
++TaskHandle_t xTaskGetIdleTaskHandleForCPU( UBaseType_t cpuid )
++{
++    ( void ) cpuid;
++    return xTaskGetIdleTaskHandle();
++}
++
++/* Unimplemented */
++#include "esp_log.h"
++#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
++void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
++                                            BaseType_t xIndex,
++                                            void * pvValue )
++{
++    ESP_LOGE("freertos", "vTaskSetThreadLocalStoragePointer unimplemented");
++    RT_ASSERT(0);
++}
++void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
++                                               BaseType_t xIndex )
++{
++    ESP_LOGE("freertos", "pvTaskGetThreadLocalStoragePointer unimplemented");
++    RT_ASSERT(0);
++    return NULL;
++}
++#if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
++typedef void (*TlsDeleteCallbackFunction_t)( int, void * );
++void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue, TlsDeleteCallbackFunction_t pvDelCallback)
++{
++    ESP_LOGE("freertos", "vTaskSetThreadLocalStoragePointerAndDelCallback unimplemented");
++    RT_ASSERT(0);
++}
++#endif
++#endif
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/timers.c b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/timers.c
+new file mode 100644
+index 0000000000..b5b5693d53
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/FreeRTOS/timers.c
+@@ -0,0 +1,328 @@
++/*
++ * FreeRTOS Kernel V10.4.6
++ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
++ *
++ * SPDX-License-Identifier: MIT
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy of
++ * this software and associated documentation files (the "Software"), to deal in
++ * the Software without restriction, including without limitation the rights to
++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
++ * the Software, and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in all
++ * copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * https://www.FreeRTOS.org
++ * https://github.com/FreeRTOS
++ *
++ */
++
++/* Standard includes. */
++#include <stdlib.h>
++
++#include "FreeRTOS.h"
++#include "task.h"
++#include "queue.h"
++#include "timers.h"
++
++/* This entire source file will be skipped if the application is not configured
++ * to include software timer functionality.  This #if is closed at the very bottom
++ * of this file.  If you want to include software timer functionality then ensure
++ * configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */
++#if ( configUSE_TIMERS == 1 )
++
++    typedef void (* rt_timer_callback_t)(void *);
++
++/* The definition of the timers themselves. */
++    typedef struct tmrTimerControl
++    {
++        struct rt_timer timer;
++        void * pvTimerID;                           /*<< An ID to identify the timer.  This allows the timer to be identified when the same callback is used for multiple timers. */
++    } xTIMER;
++
++    typedef xTIMER Timer_t;
++
++/*-----------------------------------------------------------*/
++
++    #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++
++        TimerHandle_t xTimerCreate( const char * const pcTimerName,
++                                    const TickType_t xTimerPeriodInTicks,
++                                    const UBaseType_t uxAutoReload,
++                                    void * const pvTimerID,
++                                    TimerCallbackFunction_t pxCallbackFunction )
++        {
++            Timer_t * pxNewTimer;
++            rt_uint8_t flag = RT_TIMER_FLAG_SOFT_TIMER;
++
++            pxNewTimer = ( Timer_t * ) RT_KERNEL_MALLOC( sizeof( Timer_t ) );
++
++            if( pxNewTimer != RT_NULL )
++            {
++                if ( uxAutoReload != pdFALSE )
++                {
++                    flag |= RT_TIMER_FLAG_PERIODIC;
++                }
++                else
++                {
++                    flag |= RT_TIMER_FLAG_ONE_SHOT;
++                }
++                rt_timer_init( ( rt_timer_t ) pxNewTimer, pcTimerName, ( rt_timer_callback_t ) pxCallbackFunction, pxNewTimer, xTimerPeriodInTicks, flag );
++                pxNewTimer->pvTimerID = pvTimerID;
++                /* Mark as dynamic so we can distinguish when deleting */
++                ( ( rt_timer_t ) pxNewTimer )->parent.type &= ~RT_Object_Class_Static;
++            }
++
++            return pxNewTimer;
++        }
++
++    #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
++/*-----------------------------------------------------------*/
++
++    #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++
++        TimerHandle_t xTimerCreateStatic( const char * const pcTimerName,
++                                          const TickType_t xTimerPeriodInTicks,
++                                          const UBaseType_t uxAutoReload,
++                                          void * const pvTimerID,
++                                          TimerCallbackFunction_t pxCallbackFunction,
++                                          StaticTimer_t * pxTimerBuffer )
++        {
++            Timer_t * pxNewTimer;
++            rt_uint8_t flag = RT_TIMER_FLAG_SOFT_TIMER;
++
++            #if ( configASSERT_DEFINED == 1 )
++                {
++                    /* Sanity check that the size of the structure used to declare a
++                     * variable of type StaticTimer_t equals the size of the real timer
++                     * structure. */
++                    volatile size_t xSize = sizeof( StaticTimer_t );
++                    configASSERT( xSize == sizeof( Timer_t ) );
++                    ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
++                }
++            #endif /* configASSERT_DEFINED */
++
++            /* A pointer to a StaticTimer_t structure MUST be provided, use it. */
++            configASSERT( pxTimerBuffer );
++            pxNewTimer = ( Timer_t * ) pxTimerBuffer;
++
++            if( pxNewTimer != NULL )
++            {
++                if ( uxAutoReload != pdFALSE )
++                {
++                    flag |= RT_TIMER_FLAG_PERIODIC;
++                }
++                else
++                {
++                    flag |= RT_TIMER_FLAG_ONE_SHOT;
++                }
++                rt_timer_init( ( rt_timer_t ) pxNewTimer, pcTimerName, ( rt_timer_callback_t ) pxCallbackFunction, pxNewTimer, xTimerPeriodInTicks, flag );
++                pxNewTimer->pvTimerID = pvTimerID;
++            }
++
++            return pxNewTimer;
++        }
++
++    #endif /* configSUPPORT_STATIC_ALLOCATION */
++/*-----------------------------------------------------------*/
++
++    BaseType_t xTimerGenericCommand( TimerHandle_t xTimer,
++                                     const BaseType_t xCommandID,
++                                     const TickType_t xOptionalValue,
++                                     BaseType_t * const pxHigherPriorityTaskWoken,
++                                     const TickType_t xTicksToWait )
++    {
++        rt_err_t err = -RT_ERROR;
++
++        configASSERT( xTimer );
++
++        if ( ( xCommandID == tmrCOMMAND_START ) || ( xCommandID == tmrCOMMAND_START_FROM_ISR )
++             || ( xCommandID == tmrCOMMAND_RESET ) || ( xCommandID == tmrCOMMAND_RESET_FROM_ISR ) )
++        {
++            err = rt_timer_start( ( rt_timer_t ) xTimer );
++        }
++        else if ( ( xCommandID == tmrCOMMAND_STOP ) || ( xCommandID == tmrCOMMAND_STOP_FROM_ISR ) )
++        {
++            err = rt_timer_stop( ( rt_timer_t ) xTimer );
++        }
++        else if ( ( xCommandID == tmrCOMMAND_CHANGE_PERIOD ) || ( xCommandID == tmrCOMMAND_CHANGE_PERIOD_FROM_ISR ) )
++        {
++            if ( rt_timer_stop( ( rt_timer_t ) xTimer ) == RT_EOK )
++            {
++                if ( rt_timer_control( ( rt_timer_t ) xTimer, RT_TIMER_CTRL_SET_TIME, ( void * ) &xOptionalValue ) == RT_EOK )
++                {
++                    err = rt_timer_start( ( rt_timer_t ) xTimer );
++                }
++            }
++        }
++        else if ( xCommandID == tmrCOMMAND_DELETE )
++        {
++        #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++            if ( rt_object_is_systemobject( ( rt_object_t ) xTimer ) )
++        #endif
++            {
++            #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
++                err = rt_timer_detach( ( rt_timer_t ) xTimer );
++            #endif
++        #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
++            }
++            else
++            {
++        #endif
++            #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
++                ( ( rt_timer_t ) xTimer )->parent.type |= RT_Object_Class_Static;
++                err = rt_timer_detach( ( rt_timer_t ) xTimer );
++                RT_KERNEL_FREE( xTimer );
++            #endif
++            }
++        }
++
++        if ( ( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ) && ( xCommandID <= tmrCOMMAND_CHANGE_PERIOD_FROM_ISR ) && ( pxHigherPriorityTaskWoken != NULL ) )
++        {
++            *pxHigherPriorityTaskWoken = pdFALSE;
++        }
++
++        return rt_err_to_freertos( err );
++    }
++/*-----------------------------------------------------------*/
++
++    TaskHandle_t xTimerGetTimerDaemonTaskHandle( void )
++    {
++        return ( TaskHandle_t ) rt_thread_find( "timer" );
++    }
++/*-----------------------------------------------------------*/
++
++    TickType_t xTimerGetPeriod( TimerHandle_t xTimer )
++    {
++        Timer_t * pxTimer = xTimer;
++        rt_tick_t arg;
++
++        configASSERT( xTimer );
++        rt_timer_control( ( rt_timer_t ) pxTimer, RT_TIMER_CTRL_GET_TIME, &arg );
++
++        return ( TickType_t ) arg;
++    }
++/*-----------------------------------------------------------*/
++
++    void vTimerSetReloadMode( TimerHandle_t xTimer,
++                              const UBaseType_t uxAutoReload )
++    {
++        Timer_t * pxTimer = xTimer;
++
++        configASSERT( xTimer );
++        if ( uxAutoReload != pdFALSE )
++        {
++            rt_timer_control( ( rt_timer_t ) pxTimer, RT_TIMER_CTRL_SET_PERIODIC, RT_NULL );
++        }
++        else
++        {
++            rt_timer_control( ( rt_timer_t ) pxTimer, RT_TIMER_CTRL_SET_ONESHOT, RT_NULL );
++        }
++    }
++/*-----------------------------------------------------------*/
++
++    UBaseType_t uxTimerGetReloadMode( TimerHandle_t xTimer )
++    {
++        Timer_t * pxTimer = xTimer;
++        UBaseType_t uxReturn;
++        rt_base_t level;
++
++        configASSERT( xTimer );
++        level = rt_hw_interrupt_disable();
++        if ( ( ( rt_timer_t ) pxTimer )->parent.flag & RT_TIMER_FLAG_PERIODIC )
++        {
++            uxReturn = ( UBaseType_t ) pdTRUE;
++        }
++        else
++        {
++            uxReturn = ( UBaseType_t ) pdFALSE;
++        }
++        rt_hw_interrupt_enable( level );
++
++        return uxReturn;
++    }
++/*-----------------------------------------------------------*/
++
++    TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer )
++    {
++        Timer_t * pxTimer = xTimer;
++        TickType_t xReturn;
++
++        configASSERT( xTimer );
++        rt_timer_control( ( rt_timer_t ) pxTimer, RT_TIMER_CTRL_GET_REMAIN_TIME, &xReturn );
++
++        return xReturn;
++    }
++/*-----------------------------------------------------------*/
++
++    const char * pcTimerGetName( TimerHandle_t xTimer )
++    {
++        Timer_t * pxTimer = xTimer;
++
++        configASSERT( xTimer );
++        return ( ( rt_timer_t ) pxTimer )->parent.name;
++    }
++/*-----------------------------------------------------------*/
++
++    BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer )
++    {
++        BaseType_t xReturn;
++        Timer_t * pxTimer = xTimer;
++        rt_uint32_t arg;
++
++        configASSERT( xTimer );
++
++        rt_timer_control( ( rt_timer_t ) pxTimer, RT_TIMER_CTRL_GET_STATE, &arg );
++        if ( arg == RT_TIMER_FLAG_ACTIVATED )
++        {
++            xReturn = pdTRUE;
++        }
++        else
++        {
++            xReturn = pdFALSE;
++        }
++
++        return xReturn;
++    }
++/*-----------------------------------------------------------*/
++
++    void * pvTimerGetTimerID( const TimerHandle_t xTimer )
++    {
++        Timer_t * const pxTimer = xTimer;
++        void * pvReturn;
++        rt_base_t level;
++
++        configASSERT( xTimer );
++
++        level = rt_hw_interrupt_disable();
++        pvReturn = pxTimer->pvTimerID;
++        rt_hw_interrupt_enable( level );
++
++        return pvReturn;
++    }
++/*-----------------------------------------------------------*/
++
++    void vTimerSetTimerID( TimerHandle_t xTimer,
++                           void * pvNewID )
++    {
++        Timer_t * const pxTimer = xTimer;
++        rt_base_t level;
++
++        configASSERT( xTimer );
++
++        level = rt_hw_interrupt_disable();
++        pxTimer->pvTimerID = pvNewID;
++        rt_hw_interrupt_enable( level );
++    }
++/*-----------------------------------------------------------*/
++
++#endif /* configUSE_TIMERS == 1 */
+diff --git a/components/freertos/RT-Thread-wrapper-of-FreeRTOS/readme.md b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/readme.md
+new file mode 100644
+index 0000000000..6a0fafb4c7
+--- /dev/null
++++ b/components/freertos/RT-Thread-wrapper-of-FreeRTOS/readme.md
+@@ -0,0 +1,3 @@
++# RT-Thread操作系统的FreeRTOS兼容层
++## FreeRTOS Application Compatibility Layer (ACL) for RT-Thread
++## 让基于FreeRTOS开发的应用层无感地迁移到RT-Thread操作系统
+-- 
+2.32.0 (Apple Git-132)
+
+
+From 9981521890074b517074bf85a4e0a1f71cef851e Mon Sep 17 00:00:00 2001
+From: tangzz98 <tangz98@outlook.com>
+Date: Wed, 3 Aug 2022 16:17:06 +0800
+Subject: [PATCH 4/4] Update linker script for finsh
+
+---
+ components/esp_system/ld/esp32c3/sections.ld.in | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/components/esp_system/ld/esp32c3/sections.ld.in b/components/esp_system/ld/esp32c3/sections.ld.in
+index 8215237fff..561ae92318 100644
+--- a/components/esp_system/ld/esp32c3/sections.ld.in
++++ b/components/esp_system/ld/esp32c3/sections.ld.in
+@@ -248,6 +248,16 @@ SECTIONS
+     *(.fini.literal)
+     *(.fini)
+     *(.gnu.version)
++        
++    /* section information for finsh shell */
++    . = ALIGN(4);
++    __fsymtab_start = .;
++    KEEP(*(FSymTab))
++    __fsymtab_end = .;
++    . = ALIGN(4);
++    __vsymtab_start = .;
++    KEEP(*(VSymTab))
++    __vsymtab_end = .;
+ 
+     /** CPU will try to prefetch up to 16 bytes of
+       * of instructions. This means that any configuration (e.g. MMU, PMS) must allow
+-- 
+2.32.0 (Apple Git-132)
+

+ 2 - 3
bsp/ESP32_C3/sdkconfig

@@ -600,7 +600,7 @@ CONFIG_FREERTOS_CORETIMER_SYSTIMER_LVL1=y
 # CONFIG_FREERTOS_CORETIMER_SYSTIMER_LVL3 is not set
 CONFIG_FREERTOS_SYSTICK_USES_SYSTIMER=y
 CONFIG_FREERTOS_OPTIMIZED_SCHEDULER=y
-CONFIG_FREERTOS_HZ=100
+CONFIG_FREERTOS_HZ=1000
 CONFIG_FREERTOS_ASSERT_ON_UNTESTED_FUNCTION=y
 # CONFIG_FREERTOS_CHECK_STACKOVERFLOW_NONE is not set
 # CONFIG_FREERTOS_CHECK_STACKOVERFLOW_PTRVAL is not set
@@ -628,8 +628,7 @@ CONFIG_FREERTOS_CHECK_MUTEX_GIVEN_BY_OWNER=y
 # CONFIG_FREERTOS_CHECK_PORT_CRITICAL_COMPLIANCE is not set
 # CONFIG_FREERTOS_PLACE_FUNCTIONS_INTO_FLASH is not set
 CONFIG_FREERTOS_DEBUG_OCDAWARE=y
-CONFIG_FREERTOS_ENABLE_TASK_SNAPSHOT=y
-# CONFIG_FREERTOS_PLACE_SNAPSHOT_FUNS_INTO_FLASH is not set
+# CONFIG_FREERTOS_ENABLE_TASK_SNAPSHOT is not set
 # end of FreeRTOS
 
 #