|
|
@@ -2,9 +2,8 @@
|
|
|
* Test backported deletion behavior by creating tasks of various affinities and
|
|
|
* check if the task memory is freed immediately under the correct conditions.
|
|
|
*
|
|
|
- * The behavior of vTaskDelete() has been backported form FreeRTOS v9.0.0. This
|
|
|
- * results in the immediate freeing of task memory and the immediate execution
|
|
|
- * of deletion callbacks under the following conditions...
|
|
|
+ * The behavior of vTaskDelete() results in the immediate freeing of task memory
|
|
|
+ * and the immediate execution of deletion callbacks under the following conditions...
|
|
|
* - When deleting a task that is not currently running on either core
|
|
|
* - When deleting a task that is pinned to the same core (with respect to
|
|
|
* the core that calls vTaskDelete()
|
|
|
@@ -16,6 +15,7 @@
|
|
|
|
|
|
#include "freertos/FreeRTOS.h"
|
|
|
#include "freertos/task.h"
|
|
|
+#include "freertos/semphr.h"
|
|
|
#include "esp_heap_caps.h"
|
|
|
|
|
|
#include "unity.h"
|
|
|
@@ -84,3 +84,71 @@ TEST_CASE("FreeRTOS Delete Tasks", "[freertos]")
|
|
|
}
|
|
|
|
|
|
}
|
|
|
+
|
|
|
+
|
|
|
+typedef struct {
|
|
|
+ SemaphoreHandle_t sem;
|
|
|
+ volatile bool deleted; // Check the deleted task doesn't keep running after being deleted
|
|
|
+} tsk_blocks_param_t;
|
|
|
+
|
|
|
+/* Task blocks as often as possible
|
|
|
+ (two or more of these can share the same semaphore and "juggle" it around)
|
|
|
+*/
|
|
|
+static void tsk_blocks_frequently(void *param)
|
|
|
+{
|
|
|
+ tsk_blocks_param_t *p = (tsk_blocks_param_t *)param;
|
|
|
+ SemaphoreHandle_t sem = p->sem;
|
|
|
+ srand(xTaskGetTickCount() ^ (int)xTaskGetCurrentTaskHandle());
|
|
|
+ while (1) {
|
|
|
+ assert(!p->deleted);
|
|
|
+ esp_rom_delay_us(rand() % 10);
|
|
|
+ assert(!p->deleted);
|
|
|
+ xSemaphoreTake(sem, portMAX_DELAY);
|
|
|
+ assert(!p->deleted);
|
|
|
+ esp_rom_delay_us(rand() % 10);
|
|
|
+ assert(!p->deleted);
|
|
|
+ xSemaphoreGive(sem);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+TEST_CASE("FreeRTOS Delete Blocked Tasks", "[freertos]")
|
|
|
+{
|
|
|
+ TaskHandle_t blocking_tasks[portNUM_PROCESSORS + 1]; // one per CPU, plus one unpinned task
|
|
|
+ tsk_blocks_param_t params[portNUM_PROCESSORS + 1] = { 0 };
|
|
|
+
|
|
|
+ unsigned before = heap_caps_get_free_size(MALLOC_CAP_8BIT);
|
|
|
+ printf("Free memory at start %u\n", before);
|
|
|
+
|
|
|
+ /* Any bugs will depend on relative timing of destroying the tasks, so create & delete many times.
|
|
|
+
|
|
|
+ Stop early if it looks like some resources have not been properly cleaned up.
|
|
|
+
|
|
|
+ (1000 iterations takes about 9 seconds on ESP32 dual core)
|
|
|
+ */
|
|
|
+ for(unsigned iter = 0; iter < 1000; iter++) {
|
|
|
+ // Create everything
|
|
|
+ SemaphoreHandle_t sem = xSemaphoreCreateMutex();
|
|
|
+ for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
|
|
|
+ params[i].deleted = false;
|
|
|
+ params[i].sem = sem;
|
|
|
+
|
|
|
+ TEST_ASSERT_EQUAL(pdTRUE,
|
|
|
+ xTaskCreatePinnedToCore(tsk_blocks_frequently, "tsk_block", 4096, ¶ms[i],
|
|
|
+ UNITY_FREERTOS_PRIORITY - 1, &blocking_tasks[i],
|
|
|
+ i < portNUM_PROCESSORS ? i : tskNO_AFFINITY));
|
|
|
+ }
|
|
|
+
|
|
|
+ vTaskDelay(5); // Let the tasks juggle the mutex for a bit
|
|
|
+
|
|
|
+ for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
|
|
|
+ vTaskDelete(blocking_tasks[i]);
|
|
|
+ params[i].deleted = true;
|
|
|
+ }
|
|
|
+ vTaskDelay(4); // Yield to the idle task for cleanup
|
|
|
+
|
|
|
+ vSemaphoreDelete(sem);
|
|
|
+
|
|
|
+ // Check we haven't leaked resources yet
|
|
|
+ TEST_ASSERT_GREATER_OR_EQUAL(before - 256, heap_caps_get_free_size(MALLOC_CAP_8BIT));
|
|
|
+ }
|
|
|
+}
|