esp_event.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. // Copyright 2018 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdio.h>
  16. #include <stdbool.h>
  17. #include "esp_log.h"
  18. #include "esp_event.h"
  19. #include "esp_event_internal.h"
  20. #include "esp_event_private.h"
  21. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  22. #include "esp_timer.h"
  23. #endif
  24. /* ---------------------------- Definitions --------------------------------- */
  25. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  26. // LOOP @<address, name> rx:<recieved events no.> dr:<dropped events no.>
  27. #define LOOP_DUMP_FORMAT "LOOP @%p,%s rx:%u dr:%u\n"
  28. // handler @<address> ev:<base, id> inv:<times invoked> time:<runtime>
  29. #define HANDLER_DUMP_FORMAT " HANDLER @%p ev:%s,%s inv:%u time:%lld us\n"
  30. #define PRINT_DUMP_INFO(dst, sz, ...) do { \
  31. int cb = snprintf(dst, sz, __VA_ARGS__); \
  32. dst += cb; \
  33. sz -= cb; \
  34. } while(0);
  35. #endif
  36. /* ------------------------- Static Variables ------------------------------- */
  37. static const char* TAG = "event";
  38. static const char* esp_event_any_base = "any";
  39. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  40. static SLIST_HEAD(esp_event_loop_instance_list_t, esp_event_loop_instance) s_event_loops =
  41. SLIST_HEAD_INITIALIZER(s_event_loops);
  42. static portMUX_TYPE s_event_loops_spinlock = portMUX_INITIALIZER_UNLOCKED;
  43. #endif
  44. /* ------------------------- Static Functions ------------------------------- */
  45. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  46. static int esp_event_dump_prepare()
  47. {
  48. esp_event_loop_instance_t* loop_it;
  49. esp_event_loop_node_t *loop_node_it;
  50. esp_event_base_node_t* base_node_it;
  51. esp_event_id_node_t* id_node_it;
  52. esp_event_handler_instance_t* handler_it;
  53. // Count the number of items to be printed. This is needed to compute how much memory to reserve.
  54. int loops = 0, handlers = 0;
  55. portENTER_CRITICAL(&s_event_loops_spinlock);
  56. SLIST_FOREACH(loop_it, &s_event_loops, next) {
  57. SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
  58. SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
  59. handlers++;
  60. }
  61. SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
  62. SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
  63. handlers++;
  64. }
  65. SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
  66. SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
  67. handlers++;
  68. }
  69. }
  70. }
  71. }
  72. loops++;
  73. }
  74. portEXIT_CRITICAL(&s_event_loops_spinlock);
  75. // Reserve slightly more memory than computed
  76. int allowance = 3;
  77. int size = (((loops + allowance) * (sizeof(LOOP_DUMP_FORMAT) + 10 + 20 + 2 * 11)) +
  78. ((handlers + allowance) * (sizeof(HANDLER_DUMP_FORMAT) + 10 + 2 * 20 + 11 + 20)));
  79. return size;
  80. }
  81. #endif
  82. static void esp_event_loop_run_task(void* args)
  83. {
  84. esp_err_t err;
  85. esp_event_loop_handle_t event_loop = (esp_event_loop_handle_t) args;
  86. ESP_LOGD(TAG, "running task for loop %p", event_loop);
  87. while(1) {
  88. err = esp_event_loop_run(event_loop, portMAX_DELAY);
  89. if (err != ESP_OK) {
  90. break;
  91. }
  92. }
  93. ESP_LOGE(TAG, "suspended task for loop %p", event_loop);
  94. vTaskSuspend(NULL);
  95. }
  96. static void handler_execute(esp_event_loop_instance_t* loop, esp_event_handler_instance_t *handler, esp_event_post_instance_t post)
  97. {
  98. ESP_LOGD(TAG, "running post %s:%d with handler %p on loop %p", post.base, post.id, handler->handler, loop);
  99. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  100. int64_t start, diff;
  101. start = esp_timer_get_time();
  102. #endif
  103. // Execute the handler
  104. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  105. void* data_ptr = NULL;
  106. if (post.data_set) {
  107. if (post.data_allocated) {
  108. data_ptr = post.data.ptr;
  109. } else {
  110. data_ptr = &post.data.val;
  111. }
  112. }
  113. (*(handler->handler))(handler->arg, post.base, post.id, data_ptr);
  114. #else
  115. (*(handler->handler))(handler->arg, post.base, post.id, post.data);
  116. #endif
  117. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  118. diff = esp_timer_get_time() - start;
  119. xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
  120. handler->invoked++;
  121. handler->time += diff;
  122. xSemaphoreGive(loop->profiling_mutex);
  123. #endif
  124. }
  125. static esp_err_t handler_instances_add(esp_event_handler_instances_t* handlers, esp_event_handler_t handler, void* handler_arg)
  126. {
  127. esp_event_handler_instance_t* handler_instance = calloc(1, sizeof(*handler_instance));
  128. if (!handler_instance) {
  129. return ESP_ERR_NO_MEM;
  130. }
  131. handler_instance->handler = handler;
  132. handler_instance->arg = handler_arg;
  133. if (SLIST_EMPTY(handlers)) {
  134. SLIST_INSERT_HEAD(handlers, handler_instance, next);
  135. }
  136. else {
  137. esp_event_handler_instance_t *it = NULL, *last = NULL;
  138. SLIST_FOREACH(it, handlers, next) {
  139. if (handler == it->handler) {
  140. it->arg = handler_arg;
  141. ESP_LOGW(TAG, "handler already registered, overwriting");
  142. free(handler_instance);
  143. return ESP_OK;
  144. }
  145. last = it;
  146. }
  147. SLIST_INSERT_AFTER(last, handler_instance, next);
  148. }
  149. return ESP_OK;
  150. }
  151. static esp_err_t base_node_add_handler(esp_event_base_node_t* base_node, int32_t id, esp_event_handler_t handler, void* handler_arg)
  152. {
  153. if (id == ESP_EVENT_ANY_ID) {
  154. return handler_instances_add(&(base_node->handlers), handler, handler_arg);
  155. }
  156. else {
  157. esp_err_t err = ESP_OK;
  158. esp_event_id_node_t *it = NULL, *id_node = NULL, *last_id_node = NULL;
  159. SLIST_FOREACH(it, &(base_node->id_nodes), next) {
  160. if (it->id == id) {
  161. id_node = it;
  162. }
  163. last_id_node = it;
  164. }
  165. if (!last_id_node || !id_node) {
  166. id_node = (esp_event_id_node_t*) calloc(1, sizeof(*id_node));
  167. if (!id_node) {
  168. ESP_LOGE(TAG, "alloc for new id node failed");
  169. return ESP_ERR_NO_MEM;
  170. }
  171. id_node->id = id;
  172. SLIST_INIT(&(id_node->handlers));
  173. err = handler_instances_add(&(id_node->handlers), handler, handler_arg);
  174. if (err == ESP_OK) {
  175. if (!last_id_node) {
  176. SLIST_INSERT_HEAD(&(base_node->id_nodes), id_node, next);
  177. }
  178. else {
  179. SLIST_INSERT_AFTER(last_id_node, id_node, next);
  180. }
  181. } else {
  182. free(id_node);
  183. }
  184. return err;
  185. }
  186. else {
  187. return handler_instances_add(&(id_node->handlers), handler, handler_arg);
  188. }
  189. }
  190. }
  191. static esp_err_t loop_node_add_handler(esp_event_loop_node_t* loop_node, esp_event_base_t base, int32_t id, esp_event_handler_t handler, void* handler_arg)
  192. {
  193. if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
  194. return handler_instances_add(&(loop_node->handlers), handler, handler_arg);
  195. }
  196. else {
  197. esp_err_t err = ESP_OK;
  198. esp_event_base_node_t *it = NULL, *base_node = NULL, *last_base_node = NULL;
  199. SLIST_FOREACH(it, &(loop_node->base_nodes), next) {
  200. if (it->base == base) {
  201. base_node = it;
  202. }
  203. last_base_node = it;
  204. }
  205. if (!last_base_node ||
  206. !base_node ||
  207. (base_node && !SLIST_EMPTY(&(base_node->id_nodes)) && id == ESP_EVENT_ANY_ID) ||
  208. (last_base_node && last_base_node->base != base && !SLIST_EMPTY(&(last_base_node->id_nodes)) && id == ESP_EVENT_ANY_ID)) {
  209. base_node = (esp_event_base_node_t*) calloc(1, sizeof(*base_node));
  210. if (!base_node) {
  211. ESP_LOGE(TAG, "alloc mem for new base node failed");
  212. return ESP_ERR_NO_MEM;
  213. }
  214. base_node->base = base;
  215. SLIST_INIT(&(base_node->handlers));
  216. SLIST_INIT(&(base_node->id_nodes));
  217. err = base_node_add_handler(base_node, id, handler, handler_arg);
  218. if (err == ESP_OK) {
  219. if (!last_base_node) {
  220. SLIST_INSERT_HEAD(&(loop_node->base_nodes), base_node, next);
  221. }
  222. else {
  223. SLIST_INSERT_AFTER(last_base_node, base_node, next);
  224. }
  225. } else {
  226. free(base_node);
  227. }
  228. return err;
  229. } else {
  230. return base_node_add_handler(base_node, id, handler, handler_arg);
  231. }
  232. }
  233. }
  234. static esp_err_t handler_instances_remove(esp_event_handler_instances_t* handlers, esp_event_handler_t handler)
  235. {
  236. esp_event_handler_instance_t *it, *temp;
  237. SLIST_FOREACH_SAFE(it, handlers, next, temp) {
  238. if (it->handler == handler) {
  239. SLIST_REMOVE(handlers, it, esp_event_handler_instance, next);
  240. free(it);
  241. return ESP_OK;
  242. }
  243. }
  244. return ESP_ERR_NOT_FOUND;
  245. }
  246. static esp_err_t base_node_remove_handler(esp_event_base_node_t* base_node, int32_t id, esp_event_handler_t handler)
  247. {
  248. if (id == ESP_EVENT_ANY_ID) {
  249. return handler_instances_remove(&(base_node->handlers), handler);
  250. }
  251. else {
  252. esp_event_id_node_t *it, *temp;
  253. SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
  254. if (it->id == id) {
  255. esp_err_t res = handler_instances_remove(&(it->handlers), handler);
  256. if (res == ESP_OK) {
  257. if (SLIST_EMPTY(&(it->handlers))) {
  258. SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
  259. free(it);
  260. return ESP_OK;
  261. }
  262. }
  263. }
  264. }
  265. }
  266. return ESP_ERR_NOT_FOUND;
  267. }
  268. static esp_err_t loop_node_remove_handler(esp_event_loop_node_t* loop_node, esp_event_base_t base, int32_t id, esp_event_handler_t handler)
  269. {
  270. if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
  271. return handler_instances_remove(&(loop_node->handlers), handler);
  272. }
  273. else {
  274. esp_event_base_node_t *it, *temp;
  275. SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
  276. if (it->base == base) {
  277. esp_err_t res = base_node_remove_handler(it, id, handler);
  278. if (res == ESP_OK) {
  279. if (SLIST_EMPTY(&(it->handlers)) && SLIST_EMPTY(&(it->id_nodes))) {
  280. SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
  281. free(it);
  282. return ESP_OK;
  283. }
  284. }
  285. }
  286. }
  287. }
  288. return ESP_ERR_NOT_FOUND;
  289. }
  290. static void handler_instances_remove_all(esp_event_handler_instances_t* handlers)
  291. {
  292. esp_event_handler_instance_t *it, *temp;
  293. SLIST_FOREACH_SAFE(it, handlers, next, temp) {
  294. SLIST_REMOVE(handlers, it, esp_event_handler_instance, next);
  295. free(it);
  296. }
  297. }
  298. static void base_node_remove_all_handler(esp_event_base_node_t* base_node)
  299. {
  300. handler_instances_remove_all(&(base_node->handlers));
  301. esp_event_id_node_t *it, *temp;
  302. SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
  303. handler_instances_remove_all(&(it->handlers));
  304. SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
  305. free(it);
  306. }
  307. }
  308. static void loop_node_remove_all_handler(esp_event_loop_node_t* loop_node)
  309. {
  310. handler_instances_remove_all(&(loop_node->handlers));
  311. esp_event_base_node_t *it, *temp;
  312. SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
  313. base_node_remove_all_handler(it);
  314. SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
  315. free(it);
  316. }
  317. }
  318. static void inline __attribute__((always_inline)) post_instance_delete(esp_event_post_instance_t* post)
  319. {
  320. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  321. if (post->data_allocated && post->data.ptr) {
  322. free(post->data.ptr);
  323. }
  324. #else
  325. if (post->data) {
  326. free(post->data);
  327. }
  328. #endif
  329. memset(post, 0, sizeof(*post));
  330. }
  331. /* ---------------------------- Public API --------------------------------- */
  332. esp_err_t esp_event_loop_create(const esp_event_loop_args_t* event_loop_args, esp_event_loop_handle_t* event_loop)
  333. {
  334. if (event_loop_args == NULL) {
  335. ESP_LOGE(TAG, "event_loop_args was NULL");
  336. return ESP_ERR_INVALID_ARG;
  337. }
  338. if (event_loop == NULL) {
  339. ESP_LOGE(TAG, "event_loop was NULL");
  340. return ESP_ERR_INVALID_ARG;
  341. }
  342. esp_event_loop_instance_t* loop;
  343. esp_err_t err = ESP_ERR_NO_MEM; // most likely error
  344. loop = calloc(1, sizeof(*loop));
  345. if (loop == NULL) {
  346. ESP_LOGE(TAG, "alloc for event loop failed");
  347. return err;
  348. }
  349. loop->queue = xQueueCreate(event_loop_args->queue_size , sizeof(esp_event_post_instance_t));
  350. if (loop->queue == NULL) {
  351. ESP_LOGE(TAG, "create event loop queue failed");
  352. goto on_err;
  353. }
  354. loop->mutex = xSemaphoreCreateRecursiveMutex();
  355. if (loop->mutex == NULL) {
  356. ESP_LOGE(TAG, "create event loop mutex failed");
  357. goto on_err;
  358. }
  359. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  360. loop->profiling_mutex = xSemaphoreCreateMutex();
  361. if (loop->profiling_mutex == NULL) {
  362. ESP_LOGE(TAG, "create event loop profiling mutex failed");
  363. goto on_err;
  364. }
  365. #endif
  366. SLIST_INIT(&(loop->loop_nodes));
  367. // Create the loop task if requested
  368. if (event_loop_args->task_name != NULL) {
  369. BaseType_t task_created = xTaskCreatePinnedToCore(esp_event_loop_run_task, event_loop_args->task_name,
  370. event_loop_args->task_stack_size, (void*) loop,
  371. event_loop_args->task_priority, &(loop->task), event_loop_args->task_core_id);
  372. if (task_created != pdPASS) {
  373. ESP_LOGE(TAG, "create task for loop failed");
  374. err = ESP_FAIL;
  375. goto on_err;
  376. }
  377. loop->name = event_loop_args->task_name;
  378. ESP_LOGD(TAG, "created task for loop %p", loop);
  379. } else {
  380. loop->name = "";
  381. loop->task = NULL;
  382. }
  383. loop->running_task = NULL;
  384. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  385. portENTER_CRITICAL(&s_event_loops_spinlock);
  386. SLIST_INSERT_HEAD(&s_event_loops, loop, next);
  387. portEXIT_CRITICAL(&s_event_loops_spinlock);
  388. #endif
  389. *event_loop = (esp_event_loop_handle_t) loop;
  390. ESP_LOGD(TAG, "created event loop %p", loop);
  391. return ESP_OK;
  392. on_err:
  393. if (loop->queue != NULL) {
  394. vQueueDelete(loop->queue);
  395. }
  396. if (loop->mutex != NULL) {
  397. vSemaphoreDelete(loop->mutex);
  398. }
  399. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  400. if (loop->profiling_mutex != NULL) {
  401. vSemaphoreDelete(loop->profiling_mutex);
  402. }
  403. #endif
  404. free(loop);
  405. return err;
  406. }
  407. // On event lookup performance: The library implements the event list as a linked list, which results to O(n)
  408. // lookup time. The test comparing this implementation to the O(lg n) performance of rbtrees
  409. // (https://github.com/freebsd/freebsd/blob/master/sys/sys/tree.h)
  410. // indicate that the difference is not that substantial, especially considering the additional
  411. // pointers per node of rbtrees. Code for the rbtree implementation of the event loop library is archived
  412. // in feature/esp_event_loop_library_rbtrees if needed.
  413. esp_err_t esp_event_loop_run(esp_event_loop_handle_t event_loop, TickType_t ticks_to_run)
  414. {
  415. assert(event_loop);
  416. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  417. esp_event_post_instance_t post;
  418. TickType_t marker = xTaskGetTickCount();
  419. TickType_t end = 0;
  420. #if (configUSE_16_BIT_TICKS == 1)
  421. int32_t remaining_ticks = ticks_to_run;
  422. #else
  423. int64_t remaining_ticks = ticks_to_run;
  424. #endif
  425. while(xQueueReceive(loop->queue, &post, ticks_to_run) == pdTRUE) {
  426. // The event has already been unqueued, so ensure it gets executed.
  427. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  428. loop->running_task = xTaskGetCurrentTaskHandle();
  429. bool exec = false;
  430. esp_event_handler_instance_t *handler, *temp_handler;
  431. esp_event_loop_node_t *loop_node, *temp_node;
  432. esp_event_base_node_t *base_node, *temp_base;
  433. esp_event_id_node_t *id_node, *temp_id_node;
  434. SLIST_FOREACH_SAFE(loop_node, &(loop->loop_nodes), next, temp_node) {
  435. // Execute loop level handlers
  436. SLIST_FOREACH_SAFE(handler, &(loop_node->handlers), next, temp_handler) {
  437. handler_execute(loop, handler, post);
  438. exec |= true;
  439. }
  440. SLIST_FOREACH_SAFE(base_node, &(loop_node->base_nodes), next, temp_base) {
  441. if (base_node->base == post.base) {
  442. // Execute base level handlers
  443. SLIST_FOREACH_SAFE(handler, &(base_node->handlers), next, temp_handler) {
  444. handler_execute(loop, handler, post);
  445. exec |= true;
  446. }
  447. SLIST_FOREACH_SAFE(id_node, &(base_node->id_nodes), next, temp_id_node) {
  448. if (id_node->id == post.id) {
  449. // Execute id level handlers
  450. SLIST_FOREACH_SAFE(handler, &(id_node->handlers), next, temp_handler) {
  451. handler_execute(loop, handler, post);
  452. exec |= true;
  453. }
  454. // Skip to next base node
  455. break;
  456. }
  457. }
  458. }
  459. }
  460. }
  461. esp_event_base_t base = post.base;
  462. int32_t id = post.id;
  463. post_instance_delete(&post);
  464. if (ticks_to_run != portMAX_DELAY) {
  465. end = xTaskGetTickCount();
  466. remaining_ticks -= end - marker;
  467. // If the ticks to run expired, return to the caller
  468. if (remaining_ticks <= 0) {
  469. xSemaphoreGiveRecursive(loop->mutex);
  470. break;
  471. } else {
  472. marker = end;
  473. }
  474. }
  475. loop->running_task = NULL;
  476. xSemaphoreGiveRecursive(loop->mutex);
  477. if (!exec) {
  478. // No handlers were registered, not even loop/base level handlers
  479. ESP_LOGD(TAG, "no handlers have been registered for event %s:%d posted to loop %p", base, id, event_loop);
  480. }
  481. }
  482. return ESP_OK;
  483. }
  484. esp_err_t esp_event_loop_delete(esp_event_loop_handle_t event_loop)
  485. {
  486. assert(event_loop);
  487. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  488. SemaphoreHandle_t loop_mutex = loop->mutex;
  489. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  490. SemaphoreHandle_t loop_profiling_mutex = loop->profiling_mutex;
  491. #endif
  492. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  493. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  494. xSemaphoreTakeRecursive(loop->profiling_mutex, portMAX_DELAY);
  495. portENTER_CRITICAL(&s_event_loops_spinlock);
  496. SLIST_REMOVE(&s_event_loops, loop, esp_event_loop_instance, next);
  497. portEXIT_CRITICAL(&s_event_loops_spinlock);
  498. #endif
  499. // Delete the task if it was created
  500. if (loop->task != NULL) {
  501. vTaskDelete(loop->task);
  502. }
  503. // Remove all registered events and handlers in the loop
  504. esp_event_loop_node_t *it, *temp;
  505. SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
  506. loop_node_remove_all_handler(it);
  507. SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
  508. free(it);
  509. }
  510. // Drop existing posts on the queue
  511. esp_event_post_instance_t post;
  512. while(xQueueReceive(loop->queue, &post, 0) == pdTRUE) {
  513. post_instance_delete(&post);
  514. }
  515. // Cleanup loop
  516. vQueueDelete(loop->queue);
  517. free(loop);
  518. // Free loop mutex before deleting
  519. xSemaphoreGiveRecursive(loop_mutex);
  520. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  521. xSemaphoreGiveRecursive(loop_profiling_mutex);
  522. vSemaphoreDelete(loop_profiling_mutex);
  523. #endif
  524. vSemaphoreDelete(loop_mutex);
  525. ESP_LOGD(TAG, "deleted loop %p", (void*) event_loop);
  526. return ESP_OK;
  527. }
  528. esp_err_t esp_event_handler_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  529. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg)
  530. {
  531. assert(event_loop);
  532. assert(event_handler);
  533. if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
  534. ESP_LOGE(TAG, "registering to any event base with specific id unsupported");
  535. return ESP_ERR_INVALID_ARG;
  536. }
  537. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  538. if (event_base == ESP_EVENT_ANY_BASE) {
  539. event_base = esp_event_any_base;
  540. }
  541. esp_err_t err = ESP_OK;
  542. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  543. esp_event_loop_node_t *loop_node = NULL, *last_loop_node = NULL;
  544. SLIST_FOREACH(loop_node, &(loop->loop_nodes), next) {
  545. last_loop_node = loop_node;
  546. }
  547. bool is_loop_level_handler = (event_base == esp_event_any_base) && (event_id == ESP_EVENT_ANY_ID);
  548. if (!last_loop_node ||
  549. (last_loop_node && !SLIST_EMPTY(&(last_loop_node->base_nodes)) && is_loop_level_handler)) {
  550. loop_node = (esp_event_loop_node_t*) calloc(1, sizeof(*loop_node));
  551. SLIST_INIT(&(loop_node->handlers));
  552. SLIST_INIT(&(loop_node->base_nodes));
  553. if (!loop_node) {
  554. ESP_LOGE(TAG, "alloc for new loop node failed");
  555. err = ESP_ERR_NO_MEM;
  556. goto on_err;
  557. }
  558. err = loop_node_add_handler(loop_node, event_base, event_id, event_handler, event_handler_arg);
  559. if (err == ESP_OK) {
  560. if (!last_loop_node) {
  561. SLIST_INSERT_HEAD(&(loop->loop_nodes), loop_node, next);
  562. }
  563. else {
  564. SLIST_INSERT_AFTER(last_loop_node, loop_node, next);
  565. }
  566. } else {
  567. free(loop_node);
  568. }
  569. }
  570. else {
  571. err = loop_node_add_handler(last_loop_node, event_base, event_id, event_handler, event_handler_arg);
  572. }
  573. on_err:
  574. xSemaphoreGiveRecursive(loop->mutex);
  575. return err;
  576. }
  577. esp_err_t esp_event_handler_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  578. int32_t event_id, esp_event_handler_t event_handler)
  579. {
  580. assert(event_loop);
  581. assert(event_handler);
  582. if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
  583. ESP_LOGE(TAG, "unregistering to any event base with specific id unsupported");
  584. return ESP_FAIL;
  585. }
  586. if (event_base == ESP_EVENT_ANY_BASE) {
  587. event_base = esp_event_any_base;
  588. }
  589. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  590. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  591. esp_event_loop_node_t *it, *temp;
  592. SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
  593. esp_err_t res = loop_node_remove_handler(it, event_base, event_id, event_handler);
  594. if (res == ESP_OK && SLIST_EMPTY(&(it->base_nodes)) && SLIST_EMPTY(&(it->handlers))) {
  595. SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
  596. free(it);
  597. break;
  598. }
  599. }
  600. xSemaphoreGiveRecursive(loop->mutex);
  601. return ESP_OK;
  602. }
  603. esp_err_t esp_event_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
  604. void* event_data, size_t event_data_size, TickType_t ticks_to_wait)
  605. {
  606. assert(event_loop);
  607. if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
  608. return ESP_ERR_INVALID_ARG;
  609. }
  610. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  611. esp_event_post_instance_t post;
  612. memset((void*)(&post), 0, sizeof(post));
  613. if (event_data != NULL && event_data_size != 0) {
  614. // Make persistent copy of event data on heap.
  615. void* event_data_copy = calloc(1, event_data_size);
  616. if (event_data_copy == NULL) {
  617. return ESP_ERR_NO_MEM;
  618. }
  619. memcpy(event_data_copy, event_data, event_data_size);
  620. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  621. post.data.ptr = event_data_copy;
  622. post.data_allocated = true;
  623. post.data_set = true;
  624. #else
  625. post.data = event_data_copy;
  626. #endif
  627. }
  628. post.base = event_base;
  629. post.id = event_id;
  630. BaseType_t result = pdFALSE;
  631. // Find the task that currently executes the loop. It is safe to query loop->task since it is
  632. // not mutated since loop creation. ENSURE THIS REMAINS TRUE.
  633. if (loop->task == NULL) {
  634. // The loop has no dedicated task. Find out what task is currently running it.
  635. result = xSemaphoreTakeRecursive(loop->mutex, ticks_to_wait);
  636. if (result == pdTRUE) {
  637. if (loop->running_task != xTaskGetCurrentTaskHandle()) {
  638. xSemaphoreGiveRecursive(loop->mutex);
  639. result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
  640. } else {
  641. xSemaphoreGiveRecursive(loop->mutex);
  642. result = xQueueSendToBack(loop->queue, &post, 0);
  643. }
  644. }
  645. } else {
  646. // The loop has a dedicated task.
  647. if (loop->task != xTaskGetCurrentTaskHandle()) {
  648. result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
  649. } else {
  650. result = xQueueSendToBack(loop->queue, &post, 0);
  651. }
  652. }
  653. if (result != pdTRUE) {
  654. post_instance_delete(&post);
  655. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  656. atomic_fetch_add(&loop->events_dropped, 1);
  657. #endif
  658. return ESP_ERR_TIMEOUT;
  659. }
  660. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  661. atomic_fetch_add(&loop->events_recieved, 1);
  662. #endif
  663. return ESP_OK;
  664. }
  665. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  666. esp_err_t esp_event_isr_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
  667. void* event_data, size_t event_data_size, BaseType_t* task_unblocked)
  668. {
  669. assert(event_loop);
  670. if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
  671. return ESP_ERR_INVALID_ARG;
  672. }
  673. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  674. esp_event_post_instance_t post;
  675. memset((void*)(&post), 0, sizeof(post));
  676. if (event_data_size > sizeof(post.data.val)) {
  677. return ESP_ERR_INVALID_ARG;
  678. }
  679. if (event_data != NULL && event_data_size != 0) {
  680. memcpy((void*)(&(post.data.val)), event_data, event_data_size);
  681. post.data_allocated = false;
  682. post.data_set = true;
  683. }
  684. post.base = event_base;
  685. post.id = event_id;
  686. BaseType_t result = pdFALSE;
  687. // Post the event from an ISR,
  688. result = xQueueSendToBackFromISR(loop->queue, &post, task_unblocked);
  689. if (result != pdTRUE) {
  690. post_instance_delete(&post);
  691. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  692. atomic_fetch_add(&loop->events_dropped, 1);
  693. #endif
  694. return ESP_FAIL;
  695. }
  696. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  697. atomic_fetch_add(&loop->events_recieved, 1);
  698. #endif
  699. return ESP_OK;
  700. }
  701. #endif
  702. esp_err_t esp_event_dump(FILE* file)
  703. {
  704. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  705. assert(file);
  706. esp_event_loop_instance_t* loop_it;
  707. esp_event_loop_node_t *loop_node_it;
  708. esp_event_base_node_t* base_node_it;
  709. esp_event_id_node_t* id_node_it;
  710. esp_event_handler_instance_t* handler_it;
  711. // Allocate memory for printing
  712. int sz = esp_event_dump_prepare();
  713. char* buf = calloc(sz, sizeof(char));
  714. char* dst = buf;
  715. char id_str_buf[20];
  716. // Print info to buffer
  717. portENTER_CRITICAL(&s_event_loops_spinlock);
  718. SLIST_FOREACH(loop_it, &s_event_loops, next) {
  719. uint32_t events_recieved, events_dropped;
  720. events_recieved = atomic_load(&loop_it->events_recieved);
  721. events_dropped = atomic_load(&loop_it->events_dropped);
  722. PRINT_DUMP_INFO(dst, sz, LOOP_DUMP_FORMAT, loop_it, loop_it->task != NULL ? loop_it->name : "none" ,
  723. events_recieved, events_dropped);
  724. int sz_bak = sz;
  725. SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
  726. SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
  727. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler, "ESP_EVENT_ANY_BASE",
  728. "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
  729. }
  730. SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
  731. SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
  732. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler, base_node_it->base ,
  733. "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
  734. }
  735. SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
  736. SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
  737. memset(id_str_buf, 0, sizeof(id_str_buf));
  738. snprintf(id_str_buf, sizeof(id_str_buf), "%d", id_node_it->id);
  739. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler, base_node_it->base ,
  740. id_str_buf, handler_it->invoked, handler_it->time);
  741. }
  742. }
  743. }
  744. }
  745. // No handlers registered for this loop
  746. if (sz == sz_bak) {
  747. PRINT_DUMP_INFO(dst, sz, " NO HANDLERS REGISTERED\n");
  748. }
  749. }
  750. portEXIT_CRITICAL(&s_event_loops_spinlock);
  751. // Print the contents of the buffer to the file
  752. fprintf(file, buf);
  753. // Free the allocated buffer
  754. free(buf);
  755. #endif
  756. return ESP_OK;
  757. }