esp_event.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023
  1. /*
  2. * SPDX-FileCopyrightText: 2018-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include <stdio.h>
  9. #include <stdbool.h>
  10. #include "esp_log.h"
  11. #include "esp_event.h"
  12. #include "esp_event_internal.h"
  13. #include "esp_event_private.h"
  14. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  15. #include "esp_timer.h"
  16. #endif
  17. /* ---------------------------- Definitions --------------------------------- */
  18. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  19. // LOOP @<address, name> rx:<recieved events no.> dr:<dropped events no.>
  20. #define LOOP_DUMP_FORMAT "LOOP @%p,%s rx:%" PRIu32 " dr:%" PRIu32 "\n"
  21. // handler @<address> ev:<base, id> inv:<times invoked> time:<runtime>
  22. #define HANDLER_DUMP_FORMAT " HANDLER @%p ev:%s,%s inv:%" PRIu32 " time:%lld us\n"
  23. #define PRINT_DUMP_INFO(dst, sz, ...) do { \
  24. int cb = snprintf(dst, sz, __VA_ARGS__); \
  25. dst += cb; \
  26. sz -= cb; \
  27. } while(0);
  28. #endif
  29. /* ------------------------- Static Variables ------------------------------- */
  30. static const char* TAG = "event";
  31. static const char* esp_event_any_base = "any";
  32. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  33. static SLIST_HEAD(esp_event_loop_instance_list_t, esp_event_loop_instance) s_event_loops =
  34. SLIST_HEAD_INITIALIZER(s_event_loops);
  35. static portMUX_TYPE s_event_loops_spinlock = portMUX_INITIALIZER_UNLOCKED;
  36. #endif
  37. /* ------------------------- Static Functions ------------------------------- */
  38. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  39. static int esp_event_dump_prepare(void)
  40. {
  41. esp_event_loop_instance_t* loop_it;
  42. esp_event_loop_node_t *loop_node_it;
  43. esp_event_base_node_t* base_node_it;
  44. esp_event_id_node_t* id_node_it;
  45. esp_event_handler_node_t* handler_it;
  46. // Count the number of items to be printed. This is needed to compute how much memory to reserve.
  47. int loops = 0, handlers = 0;
  48. portENTER_CRITICAL(&s_event_loops_spinlock);
  49. SLIST_FOREACH(loop_it, &s_event_loops, next) {
  50. SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
  51. SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
  52. handlers++;
  53. }
  54. SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
  55. SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
  56. handlers++;
  57. }
  58. SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
  59. SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
  60. handlers++;
  61. }
  62. }
  63. }
  64. }
  65. loops++;
  66. }
  67. portEXIT_CRITICAL(&s_event_loops_spinlock);
  68. // Reserve slightly more memory than computed
  69. int allowance = 3;
  70. int size = (((loops + allowance) * (sizeof(LOOP_DUMP_FORMAT) + 10 + 20 + 2 * 11)) +
  71. ((handlers + allowance) * (sizeof(HANDLER_DUMP_FORMAT) + 10 + 2 * 20 + 11 + 20)));
  72. return size;
  73. }
  74. #endif
  75. static void esp_event_loop_run_task(void* args)
  76. {
  77. esp_err_t err;
  78. esp_event_loop_handle_t event_loop = (esp_event_loop_handle_t) args;
  79. ESP_LOGD(TAG, "running task for loop %p", event_loop);
  80. while(1) {
  81. err = esp_event_loop_run(event_loop, portMAX_DELAY);
  82. if (err != ESP_OK) {
  83. break;
  84. }
  85. }
  86. ESP_LOGE(TAG, "suspended task for loop %p", event_loop);
  87. vTaskSuspend(NULL);
  88. }
  89. static void handler_execute(esp_event_loop_instance_t* loop, esp_event_handler_node_t *handler, esp_event_post_instance_t post)
  90. {
  91. ESP_LOGD(TAG, "running post %s:%"PRIu32" with handler %p and context %p on loop %p", post.base, post.id, handler->handler_ctx->handler, &handler->handler_ctx, loop);
  92. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  93. int64_t start, diff;
  94. start = esp_timer_get_time();
  95. #endif
  96. // Execute the handler
  97. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  98. void* data_ptr = NULL;
  99. if (post.data_set) {
  100. if (post.data_allocated) {
  101. data_ptr = post.data.ptr;
  102. } else {
  103. data_ptr = &post.data.val;
  104. }
  105. }
  106. (*(handler->handler_ctx->handler))(handler->handler_ctx->arg, post.base, post.id, data_ptr);
  107. #else
  108. (*(handler->handler_ctx->handler))(handler->handler_ctx->arg, post.base, post.id, post.data);
  109. #endif
  110. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  111. diff = esp_timer_get_time() - start;
  112. xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
  113. // At this point handler may be already unregistered.
  114. // This happens in "handler instance can unregister itself" test case.
  115. // To prevent memory corruption error it's necessary to check if pointer is still valid.
  116. esp_event_loop_node_t* loop_node;
  117. esp_event_handler_node_t* handler_node;
  118. SLIST_FOREACH(loop_node, &(loop->loop_nodes), next) {
  119. SLIST_FOREACH(handler_node, &(loop_node->handlers), next) {
  120. if(handler_node == handler) {
  121. handler->invoked++;
  122. handler->time += diff;
  123. }
  124. }
  125. }
  126. xSemaphoreGive(loop->profiling_mutex);
  127. #endif
  128. }
  129. static esp_err_t handler_instances_add(esp_event_handler_nodes_t* handlers, esp_event_handler_t event_handler, void* event_handler_arg, esp_event_handler_instance_context_t **handler_ctx, bool legacy)
  130. {
  131. esp_event_handler_node_t *handler_instance = calloc(1, sizeof(*handler_instance));
  132. if (!handler_instance) return ESP_ERR_NO_MEM;
  133. esp_event_handler_instance_context_t *context = calloc(1, sizeof(*context));
  134. if (!context) {
  135. free(handler_instance);
  136. return ESP_ERR_NO_MEM;
  137. }
  138. context->handler = event_handler;
  139. context->arg = event_handler_arg;
  140. handler_instance->handler_ctx = context;
  141. if (SLIST_EMPTY(handlers)) {
  142. SLIST_INSERT_HEAD(handlers, handler_instance, next);
  143. }
  144. else {
  145. esp_event_handler_node_t *it = NULL, *last = NULL;
  146. SLIST_FOREACH(it, handlers, next) {
  147. if (legacy) {
  148. if(event_handler == it->handler_ctx->handler) {
  149. it->handler_ctx->arg = event_handler_arg;
  150. ESP_LOGW(TAG, "handler already registered, overwriting");
  151. free(handler_instance);
  152. free(context);
  153. return ESP_OK;
  154. }
  155. }
  156. last = it;
  157. }
  158. SLIST_INSERT_AFTER(last, handler_instance, next);
  159. }
  160. // If the caller didn't provide the handler instance context, don't set it.
  161. // It will be removed once the event loop is deleted.
  162. if (handler_ctx) {
  163. *handler_ctx = context;
  164. }
  165. return ESP_OK;
  166. }
  167. static esp_err_t base_node_add_handler(esp_event_base_node_t* base_node,
  168. int32_t id,
  169. esp_event_handler_t event_handler,
  170. void *event_handler_arg,
  171. esp_event_handler_instance_context_t **handler_ctx,
  172. bool legacy)
  173. {
  174. if (id == ESP_EVENT_ANY_ID) {
  175. return handler_instances_add(&(base_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  176. }
  177. else {
  178. esp_err_t err = ESP_OK;
  179. esp_event_id_node_t *it = NULL, *id_node = NULL, *last_id_node = NULL;
  180. SLIST_FOREACH(it, &(base_node->id_nodes), next) {
  181. if (it->id == id) {
  182. id_node = it;
  183. }
  184. last_id_node = it;
  185. }
  186. if (!last_id_node || !id_node) {
  187. id_node = (esp_event_id_node_t*) calloc(1, sizeof(*id_node));
  188. if (!id_node) {
  189. ESP_LOGE(TAG, "alloc for new id node failed");
  190. return ESP_ERR_NO_MEM;
  191. }
  192. id_node->id = id;
  193. SLIST_INIT(&(id_node->handlers));
  194. err = handler_instances_add(&(id_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  195. if (err == ESP_OK) {
  196. if (!last_id_node) {
  197. SLIST_INSERT_HEAD(&(base_node->id_nodes), id_node, next);
  198. }
  199. else {
  200. SLIST_INSERT_AFTER(last_id_node, id_node, next);
  201. }
  202. } else {
  203. free(id_node);
  204. }
  205. return err;
  206. }
  207. else {
  208. return handler_instances_add(&(id_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  209. }
  210. }
  211. }
  212. static esp_err_t loop_node_add_handler(esp_event_loop_node_t* loop_node,
  213. esp_event_base_t base,
  214. int32_t id,
  215. esp_event_handler_t event_handler,
  216. void *event_handler_arg,
  217. esp_event_handler_instance_context_t **handler_ctx,
  218. bool legacy)
  219. {
  220. if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
  221. return handler_instances_add(&(loop_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  222. }
  223. else {
  224. esp_err_t err = ESP_OK;
  225. esp_event_base_node_t *it = NULL, *base_node = NULL, *last_base_node = NULL;
  226. SLIST_FOREACH(it, &(loop_node->base_nodes), next) {
  227. if (it->base == base) {
  228. base_node = it;
  229. }
  230. last_base_node = it;
  231. }
  232. if (!last_base_node ||
  233. !base_node ||
  234. (base_node && !SLIST_EMPTY(&(base_node->id_nodes)) && id == ESP_EVENT_ANY_ID) ||
  235. (last_base_node && last_base_node->base != base && !SLIST_EMPTY(&(last_base_node->id_nodes)) && id == ESP_EVENT_ANY_ID)) {
  236. base_node = (esp_event_base_node_t*) calloc(1, sizeof(*base_node));
  237. if (!base_node) {
  238. ESP_LOGE(TAG, "alloc mem for new base node failed");
  239. return ESP_ERR_NO_MEM;
  240. }
  241. base_node->base = base;
  242. SLIST_INIT(&(base_node->handlers));
  243. SLIST_INIT(&(base_node->id_nodes));
  244. err = base_node_add_handler(base_node, id, event_handler, event_handler_arg, handler_ctx, legacy);
  245. if (err == ESP_OK) {
  246. if (!last_base_node) {
  247. SLIST_INSERT_HEAD(&(loop_node->base_nodes), base_node, next);
  248. }
  249. else {
  250. SLIST_INSERT_AFTER(last_base_node, base_node, next);
  251. }
  252. } else {
  253. free(base_node);
  254. }
  255. return err;
  256. } else {
  257. return base_node_add_handler(base_node, id, event_handler, event_handler_arg, handler_ctx, legacy);
  258. }
  259. }
  260. }
  261. static esp_err_t handler_instances_remove(esp_event_handler_nodes_t* handlers, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  262. {
  263. esp_event_handler_node_t *it, *temp;
  264. SLIST_FOREACH_SAFE(it, handlers, next, temp) {
  265. if (legacy) {
  266. if (it->handler_ctx->handler == handler_ctx->handler) {
  267. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  268. free(it->handler_ctx);
  269. free(it);
  270. return ESP_OK;
  271. }
  272. } else {
  273. if (it->handler_ctx == handler_ctx) {
  274. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  275. free(it->handler_ctx);
  276. free(it);
  277. return ESP_OK;
  278. }
  279. }
  280. }
  281. return ESP_ERR_NOT_FOUND;
  282. }
  283. static esp_err_t base_node_remove_handler(esp_event_base_node_t* base_node, int32_t id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  284. {
  285. if (id == ESP_EVENT_ANY_ID) {
  286. return handler_instances_remove(&(base_node->handlers), handler_ctx, legacy);
  287. }
  288. else {
  289. esp_event_id_node_t *it, *temp;
  290. SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
  291. if (it->id == id) {
  292. esp_err_t res = handler_instances_remove(&(it->handlers), handler_ctx, legacy);
  293. if (res == ESP_OK) {
  294. if (SLIST_EMPTY(&(it->handlers))) {
  295. SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
  296. free(it);
  297. return ESP_OK;
  298. }
  299. }
  300. }
  301. }
  302. }
  303. return ESP_ERR_NOT_FOUND;
  304. }
  305. static esp_err_t loop_node_remove_handler(esp_event_loop_node_t* loop_node, esp_event_base_t base, int32_t id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  306. {
  307. if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
  308. return handler_instances_remove(&(loop_node->handlers), handler_ctx, legacy);
  309. }
  310. else {
  311. esp_event_base_node_t *it, *temp;
  312. SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
  313. if (it->base == base) {
  314. esp_err_t res = base_node_remove_handler(it, id, handler_ctx, legacy);
  315. if (res == ESP_OK) {
  316. if (SLIST_EMPTY(&(it->handlers)) && SLIST_EMPTY(&(it->id_nodes))) {
  317. SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
  318. free(it);
  319. return ESP_OK;
  320. }
  321. }
  322. }
  323. }
  324. }
  325. return ESP_ERR_NOT_FOUND;
  326. }
  327. static void handler_instances_remove_all(esp_event_handler_nodes_t* handlers)
  328. {
  329. esp_event_handler_node_t *it, *temp;
  330. SLIST_FOREACH_SAFE(it, handlers, next, temp) {
  331. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  332. free(it->handler_ctx);
  333. free(it);
  334. }
  335. }
  336. static void base_node_remove_all_handler(esp_event_base_node_t* base_node)
  337. {
  338. handler_instances_remove_all(&(base_node->handlers));
  339. esp_event_id_node_t *it, *temp;
  340. SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
  341. handler_instances_remove_all(&(it->handlers));
  342. SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
  343. free(it);
  344. }
  345. }
  346. static void loop_node_remove_all_handler(esp_event_loop_node_t* loop_node)
  347. {
  348. handler_instances_remove_all(&(loop_node->handlers));
  349. esp_event_base_node_t *it, *temp;
  350. SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
  351. base_node_remove_all_handler(it);
  352. SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
  353. free(it);
  354. }
  355. }
  356. static void inline __attribute__((always_inline)) post_instance_delete(esp_event_post_instance_t* post)
  357. {
  358. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  359. if (post->data_allocated && post->data.ptr) {
  360. free(post->data.ptr);
  361. }
  362. #else
  363. if (post->data) {
  364. free(post->data);
  365. }
  366. #endif
  367. memset(post, 0, sizeof(*post));
  368. }
  369. /* ---------------------------- Public API --------------------------------- */
  370. esp_err_t esp_event_loop_create(const esp_event_loop_args_t* event_loop_args, esp_event_loop_handle_t* event_loop)
  371. {
  372. if (event_loop_args == NULL) {
  373. ESP_LOGE(TAG, "event_loop_args was NULL");
  374. return ESP_ERR_INVALID_ARG;
  375. }
  376. if (event_loop == NULL) {
  377. ESP_LOGE(TAG, "event_loop was NULL");
  378. return ESP_ERR_INVALID_ARG;
  379. }
  380. esp_event_loop_instance_t* loop;
  381. esp_err_t err = ESP_ERR_NO_MEM; // most likely error
  382. loop = calloc(1, sizeof(*loop));
  383. if (loop == NULL) {
  384. ESP_LOGE(TAG, "alloc for event loop failed");
  385. return err;
  386. }
  387. loop->queue = xQueueCreate(event_loop_args->queue_size , sizeof(esp_event_post_instance_t));
  388. if (loop->queue == NULL) {
  389. ESP_LOGE(TAG, "create event loop queue failed");
  390. goto on_err;
  391. }
  392. loop->mutex = xSemaphoreCreateRecursiveMutex();
  393. if (loop->mutex == NULL) {
  394. ESP_LOGE(TAG, "create event loop mutex failed");
  395. goto on_err;
  396. }
  397. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  398. loop->profiling_mutex = xSemaphoreCreateMutex();
  399. if (loop->profiling_mutex == NULL) {
  400. ESP_LOGE(TAG, "create event loop profiling mutex failed");
  401. goto on_err;
  402. }
  403. #endif
  404. SLIST_INIT(&(loop->loop_nodes));
  405. // Create the loop task if requested
  406. if (event_loop_args->task_name != NULL) {
  407. BaseType_t task_created = xTaskCreatePinnedToCore(esp_event_loop_run_task, event_loop_args->task_name,
  408. event_loop_args->task_stack_size, (void*) loop,
  409. event_loop_args->task_priority, &(loop->task), event_loop_args->task_core_id);
  410. if (task_created != pdPASS) {
  411. ESP_LOGE(TAG, "create task for loop failed");
  412. err = ESP_FAIL;
  413. goto on_err;
  414. }
  415. loop->name = event_loop_args->task_name;
  416. ESP_LOGD(TAG, "created task for loop %p", loop);
  417. } else {
  418. loop->name = "";
  419. loop->task = NULL;
  420. }
  421. loop->running_task = NULL;
  422. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  423. portENTER_CRITICAL(&s_event_loops_spinlock);
  424. SLIST_INSERT_HEAD(&s_event_loops, loop, next);
  425. portEXIT_CRITICAL(&s_event_loops_spinlock);
  426. #endif
  427. *event_loop = (esp_event_loop_handle_t) loop;
  428. ESP_LOGD(TAG, "created event loop %p", loop);
  429. return ESP_OK;
  430. on_err:
  431. if (loop->queue != NULL) {
  432. vQueueDelete(loop->queue);
  433. }
  434. if (loop->mutex != NULL) {
  435. vSemaphoreDelete(loop->mutex);
  436. }
  437. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  438. if (loop->profiling_mutex != NULL) {
  439. vSemaphoreDelete(loop->profiling_mutex);
  440. }
  441. #endif
  442. free(loop);
  443. return err;
  444. }
  445. // On event lookup performance: The library implements the event list as a linked list, which results to O(n)
  446. // lookup time. The test comparing this implementation to the O(lg n) performance of rbtrees
  447. // (https://github.com/freebsd/freebsd/blob/master/sys/sys/tree.h)
  448. // indicate that the difference is not that substantial, especially considering the additional
  449. // pointers per node of rbtrees. Code for the rbtree implementation of the event loop library is archived
  450. // in feature/esp_event_loop_library_rbtrees if needed.
  451. esp_err_t esp_event_loop_run(esp_event_loop_handle_t event_loop, TickType_t ticks_to_run)
  452. {
  453. assert(event_loop);
  454. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  455. esp_event_post_instance_t post;
  456. TickType_t marker = xTaskGetTickCount();
  457. TickType_t end = 0;
  458. #if (configUSE_16_BIT_TICKS == 1)
  459. int32_t remaining_ticks = ticks_to_run;
  460. #else
  461. int64_t remaining_ticks = ticks_to_run;
  462. #endif
  463. while(xQueueReceive(loop->queue, &post, ticks_to_run) == pdTRUE) {
  464. // The event has already been unqueued, so ensure it gets executed.
  465. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  466. loop->running_task = xTaskGetCurrentTaskHandle();
  467. bool exec = false;
  468. esp_event_handler_node_t *handler, *temp_handler;
  469. esp_event_loop_node_t *loop_node, *temp_node;
  470. esp_event_base_node_t *base_node, *temp_base;
  471. esp_event_id_node_t *id_node, *temp_id_node;
  472. SLIST_FOREACH_SAFE(loop_node, &(loop->loop_nodes), next, temp_node) {
  473. // Execute loop level handlers
  474. SLIST_FOREACH_SAFE(handler, &(loop_node->handlers), next, temp_handler) {
  475. handler_execute(loop, handler, post);
  476. exec |= true;
  477. }
  478. SLIST_FOREACH_SAFE(base_node, &(loop_node->base_nodes), next, temp_base) {
  479. if (base_node->base == post.base) {
  480. // Execute base level handlers
  481. SLIST_FOREACH_SAFE(handler, &(base_node->handlers), next, temp_handler) {
  482. handler_execute(loop, handler, post);
  483. exec |= true;
  484. }
  485. SLIST_FOREACH_SAFE(id_node, &(base_node->id_nodes), next, temp_id_node) {
  486. if (id_node->id == post.id) {
  487. // Execute id level handlers
  488. SLIST_FOREACH_SAFE(handler, &(id_node->handlers), next, temp_handler) {
  489. handler_execute(loop, handler, post);
  490. exec |= true;
  491. }
  492. // Skip to next base node
  493. break;
  494. }
  495. }
  496. }
  497. }
  498. }
  499. esp_event_base_t base = post.base;
  500. int32_t id = post.id;
  501. post_instance_delete(&post);
  502. if (ticks_to_run != portMAX_DELAY) {
  503. end = xTaskGetTickCount();
  504. remaining_ticks -= end - marker;
  505. // If the ticks to run expired, return to the caller
  506. if (remaining_ticks <= 0) {
  507. xSemaphoreGiveRecursive(loop->mutex);
  508. break;
  509. } else {
  510. marker = end;
  511. }
  512. }
  513. loop->running_task = NULL;
  514. xSemaphoreGiveRecursive(loop->mutex);
  515. if (!exec) {
  516. // No handlers were registered, not even loop/base level handlers
  517. ESP_LOGD(TAG, "no handlers have been registered for event %s:%"PRIu32" posted to loop %p", base, id, event_loop);
  518. }
  519. }
  520. return ESP_OK;
  521. }
  522. esp_err_t esp_event_loop_delete(esp_event_loop_handle_t event_loop)
  523. {
  524. assert(event_loop);
  525. ESP_LOGD(TAG, "deleting loop %p", (void*) event_loop);
  526. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  527. SemaphoreHandle_t loop_mutex = loop->mutex;
  528. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  529. SemaphoreHandle_t loop_profiling_mutex = loop->profiling_mutex;
  530. #endif
  531. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  532. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  533. xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
  534. portENTER_CRITICAL(&s_event_loops_spinlock);
  535. SLIST_REMOVE(&s_event_loops, loop, esp_event_loop_instance, next);
  536. portEXIT_CRITICAL(&s_event_loops_spinlock);
  537. #endif
  538. // Delete the task if it was created
  539. if (loop->task != NULL) {
  540. vTaskDelete(loop->task);
  541. }
  542. // Remove all registered events and handlers in the loop
  543. esp_event_loop_node_t *it, *temp;
  544. SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
  545. loop_node_remove_all_handler(it);
  546. SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
  547. free(it);
  548. }
  549. // Drop existing posts on the queue
  550. esp_event_post_instance_t post;
  551. while(xQueueReceive(loop->queue, &post, 0) == pdTRUE) {
  552. post_instance_delete(&post);
  553. }
  554. // Cleanup loop
  555. vQueueDelete(loop->queue);
  556. free(loop);
  557. // Free loop mutex before deleting
  558. xSemaphoreGiveRecursive(loop_mutex);
  559. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  560. xSemaphoreGive(loop_profiling_mutex);
  561. vSemaphoreDelete(loop_profiling_mutex);
  562. #endif
  563. vSemaphoreDelete(loop_mutex);
  564. return ESP_OK;
  565. }
  566. esp_err_t esp_event_handler_register_with_internal(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  567. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg,
  568. esp_event_handler_instance_context_t** handler_ctx_arg, bool legacy)
  569. {
  570. assert(event_loop);
  571. assert(event_handler);
  572. if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
  573. ESP_LOGE(TAG, "registering to any event base with specific id unsupported");
  574. return ESP_ERR_INVALID_ARG;
  575. }
  576. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  577. if (event_base == ESP_EVENT_ANY_BASE) {
  578. event_base = esp_event_any_base;
  579. }
  580. esp_err_t err = ESP_OK;
  581. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  582. esp_event_loop_node_t *loop_node = NULL, *last_loop_node = NULL;
  583. SLIST_FOREACH(loop_node, &(loop->loop_nodes), next) {
  584. last_loop_node = loop_node;
  585. }
  586. bool is_loop_level_handler = (event_base == esp_event_any_base) && (event_id == ESP_EVENT_ANY_ID);
  587. if (!last_loop_node ||
  588. (last_loop_node && !SLIST_EMPTY(&(last_loop_node->base_nodes)) && is_loop_level_handler)) {
  589. loop_node = (esp_event_loop_node_t*) calloc(1, sizeof(*loop_node));
  590. if (!loop_node) {
  591. ESP_LOGE(TAG, "alloc for new loop node failed");
  592. err = ESP_ERR_NO_MEM;
  593. goto on_err;
  594. }
  595. SLIST_INIT(&(loop_node->handlers));
  596. SLIST_INIT(&(loop_node->base_nodes));
  597. err = loop_node_add_handler(loop_node, event_base, event_id, event_handler, event_handler_arg, handler_ctx_arg, legacy);
  598. if (err == ESP_OK) {
  599. if (!last_loop_node) {
  600. SLIST_INSERT_HEAD(&(loop->loop_nodes), loop_node, next);
  601. }
  602. else {
  603. SLIST_INSERT_AFTER(last_loop_node, loop_node, next);
  604. }
  605. } else {
  606. free(loop_node);
  607. }
  608. }
  609. else {
  610. err = loop_node_add_handler(last_loop_node, event_base, event_id, event_handler, event_handler_arg, handler_ctx_arg, legacy);
  611. }
  612. on_err:
  613. xSemaphoreGiveRecursive(loop->mutex);
  614. return err;
  615. }
  616. esp_err_t esp_event_handler_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  617. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg)
  618. {
  619. return esp_event_handler_register_with_internal(event_loop, event_base, event_id, event_handler, event_handler_arg, NULL, true);
  620. }
  621. esp_err_t esp_event_handler_instance_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  622. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg,
  623. esp_event_handler_instance_t* handler_ctx_arg)
  624. {
  625. return esp_event_handler_register_with_internal(event_loop, event_base, event_id, event_handler, event_handler_arg, (esp_event_handler_instance_context_t**) handler_ctx_arg, false);
  626. }
  627. esp_err_t esp_event_handler_unregister_with_internal(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  628. int32_t event_id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  629. {
  630. assert(event_loop);
  631. assert(handler_ctx);
  632. if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
  633. ESP_LOGE(TAG, "unregistering to any event base with specific id unsupported");
  634. return ESP_FAIL;
  635. }
  636. if (event_base == ESP_EVENT_ANY_BASE) {
  637. event_base = esp_event_any_base;
  638. }
  639. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  640. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  641. esp_event_loop_node_t *it, *temp;
  642. SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
  643. esp_err_t res = loop_node_remove_handler(it, event_base, event_id, handler_ctx, legacy);
  644. if (res == ESP_OK && SLIST_EMPTY(&(it->base_nodes)) && SLIST_EMPTY(&(it->handlers))) {
  645. SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
  646. free(it);
  647. break;
  648. }
  649. }
  650. xSemaphoreGiveRecursive(loop->mutex);
  651. return ESP_OK;
  652. }
  653. esp_err_t esp_event_handler_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  654. int32_t event_id, esp_event_handler_t event_handler)
  655. {
  656. esp_event_handler_instance_context_t local_handler_ctx;
  657. local_handler_ctx.handler = event_handler;
  658. local_handler_ctx.arg = NULL;
  659. return esp_event_handler_unregister_with_internal(event_loop, event_base, event_id, &local_handler_ctx, true);
  660. }
  661. esp_err_t esp_event_handler_instance_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  662. int32_t event_id, esp_event_handler_instance_t handler_ctx_arg)
  663. {
  664. if (!handler_ctx_arg) return ESP_ERR_INVALID_ARG;
  665. return esp_event_handler_unregister_with_internal(event_loop, event_base, event_id, (esp_event_handler_instance_context_t*) handler_ctx_arg, false);
  666. }
  667. esp_err_t esp_event_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
  668. const void* event_data, size_t event_data_size, TickType_t ticks_to_wait)
  669. {
  670. assert(event_loop);
  671. if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
  672. return ESP_ERR_INVALID_ARG;
  673. }
  674. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  675. esp_event_post_instance_t post;
  676. memset((void*)(&post), 0, sizeof(post));
  677. if (event_data != NULL && event_data_size != 0) {
  678. // Make persistent copy of event data on heap.
  679. void* event_data_copy = calloc(1, event_data_size);
  680. if (event_data_copy == NULL) {
  681. return ESP_ERR_NO_MEM;
  682. }
  683. memcpy(event_data_copy, event_data, event_data_size);
  684. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  685. post.data.ptr = event_data_copy;
  686. post.data_allocated = true;
  687. post.data_set = true;
  688. #else
  689. post.data = event_data_copy;
  690. #endif
  691. }
  692. post.base = event_base;
  693. post.id = event_id;
  694. BaseType_t result = pdFALSE;
  695. // Find the task that currently executes the loop. It is safe to query loop->task since it is
  696. // not mutated since loop creation. ENSURE THIS REMAINS TRUE.
  697. if (loop->task == NULL) {
  698. // The loop has no dedicated task. Find out what task is currently running it.
  699. result = xSemaphoreTakeRecursive(loop->mutex, ticks_to_wait);
  700. if (result == pdTRUE) {
  701. if (loop->running_task != xTaskGetCurrentTaskHandle()) {
  702. xSemaphoreGiveRecursive(loop->mutex);
  703. result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
  704. } else {
  705. xSemaphoreGiveRecursive(loop->mutex);
  706. result = xQueueSendToBack(loop->queue, &post, 0);
  707. }
  708. }
  709. } else {
  710. // The loop has a dedicated task.
  711. if (loop->task != xTaskGetCurrentTaskHandle()) {
  712. result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
  713. } else {
  714. result = xQueueSendToBack(loop->queue, &post, 0);
  715. }
  716. }
  717. if (result != pdTRUE) {
  718. post_instance_delete(&post);
  719. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  720. atomic_fetch_add(&loop->events_dropped, 1);
  721. #endif
  722. return ESP_ERR_TIMEOUT;
  723. }
  724. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  725. atomic_fetch_add(&loop->events_recieved, 1);
  726. #endif
  727. return ESP_OK;
  728. }
  729. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  730. esp_err_t esp_event_isr_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
  731. const void* event_data, size_t event_data_size, BaseType_t* task_unblocked)
  732. {
  733. assert(event_loop);
  734. if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
  735. return ESP_ERR_INVALID_ARG;
  736. }
  737. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  738. esp_event_post_instance_t post;
  739. memset((void*)(&post), 0, sizeof(post));
  740. if (event_data_size > sizeof(post.data.val)) {
  741. return ESP_ERR_INVALID_ARG;
  742. }
  743. if (event_data != NULL && event_data_size != 0) {
  744. memcpy((void*)(&(post.data.val)), event_data, event_data_size);
  745. post.data_allocated = false;
  746. post.data_set = true;
  747. }
  748. post.base = event_base;
  749. post.id = event_id;
  750. BaseType_t result = pdFALSE;
  751. // Post the event from an ISR,
  752. result = xQueueSendToBackFromISR(loop->queue, &post, task_unblocked);
  753. if (result != pdTRUE) {
  754. post_instance_delete(&post);
  755. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  756. atomic_fetch_add(&loop->events_dropped, 1);
  757. #endif
  758. return ESP_FAIL;
  759. }
  760. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  761. atomic_fetch_add(&loop->events_recieved, 1);
  762. #endif
  763. return ESP_OK;
  764. }
  765. #endif
  766. esp_err_t esp_event_dump(FILE* file)
  767. {
  768. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  769. assert(file);
  770. esp_event_loop_instance_t* loop_it;
  771. esp_event_loop_node_t *loop_node_it;
  772. esp_event_base_node_t* base_node_it;
  773. esp_event_id_node_t* id_node_it;
  774. esp_event_handler_node_t* handler_it;
  775. // Allocate memory for printing
  776. int sz = esp_event_dump_prepare();
  777. char* buf = calloc(sz, sizeof(char));
  778. char* dst = buf;
  779. char id_str_buf[20];
  780. // Print info to buffer
  781. portENTER_CRITICAL(&s_event_loops_spinlock);
  782. SLIST_FOREACH(loop_it, &s_event_loops, next) {
  783. uint32_t events_recieved, events_dropped;
  784. events_recieved = atomic_load(&loop_it->events_recieved);
  785. events_dropped = atomic_load(&loop_it->events_dropped);
  786. PRINT_DUMP_INFO(dst, sz, LOOP_DUMP_FORMAT, loop_it, loop_it->task != NULL ? loop_it->name : "none" ,
  787. events_recieved, events_dropped);
  788. int sz_bak = sz;
  789. SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
  790. SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
  791. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, "ESP_EVENT_ANY_BASE",
  792. "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
  793. }
  794. SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
  795. SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
  796. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, base_node_it->base ,
  797. "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
  798. }
  799. SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
  800. SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
  801. memset(id_str_buf, 0, sizeof(id_str_buf));
  802. snprintf(id_str_buf, sizeof(id_str_buf), "%" PRIi32, id_node_it->id);
  803. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, base_node_it->base ,
  804. id_str_buf, handler_it->invoked, handler_it->time);
  805. }
  806. }
  807. }
  808. }
  809. // No handlers registered for this loop
  810. if (sz == sz_bak) {
  811. PRINT_DUMP_INFO(dst, sz, " NO HANDLERS REGISTERED\n");
  812. }
  813. }
  814. portEXIT_CRITICAL(&s_event_loops_spinlock);
  815. // Print the contents of the buffer to the file
  816. fprintf(file, buf);
  817. // Free the allocated buffer
  818. free(buf);
  819. #endif
  820. return ESP_OK;
  821. }