esp_event.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021
  1. // Copyright 2018 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdio.h>
  16. #include <stdbool.h>
  17. #include "esp_log.h"
  18. #include "esp_event.h"
  19. #include "esp_event_internal.h"
  20. #include "esp_event_private.h"
  21. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  22. #include "esp_timer.h"
  23. #endif
  24. /* ---------------------------- Definitions --------------------------------- */
  25. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  26. // LOOP @<address, name> rx:<recieved events no.> dr:<dropped events no.>
  27. #define LOOP_DUMP_FORMAT "LOOP @%p,%s rx:%u dr:%u\n"
  28. // handler @<address> ev:<base, id> inv:<times invoked> time:<runtime>
  29. #define HANDLER_DUMP_FORMAT " HANDLER @%p ev:%s,%s inv:%u time:%lld us\n"
  30. #define PRINT_DUMP_INFO(dst, sz, ...) do { \
  31. int cb = snprintf(dst, sz, __VA_ARGS__); \
  32. dst += cb; \
  33. sz -= cb; \
  34. } while(0);
  35. #endif
  36. /* ------------------------- Static Variables ------------------------------- */
  37. static const char* TAG = "event";
  38. static const char* esp_event_any_base = "any";
  39. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  40. static SLIST_HEAD(esp_event_loop_instance_list_t, esp_event_loop_instance) s_event_loops =
  41. SLIST_HEAD_INITIALIZER(s_event_loops);
  42. static portMUX_TYPE s_event_loops_spinlock = portMUX_INITIALIZER_UNLOCKED;
  43. #endif
  44. /* ------------------------- Static Functions ------------------------------- */
  45. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  46. static int esp_event_dump_prepare(void)
  47. {
  48. esp_event_loop_instance_t* loop_it;
  49. esp_event_loop_node_t *loop_node_it;
  50. esp_event_base_node_t* base_node_it;
  51. esp_event_id_node_t* id_node_it;
  52. esp_event_handler_node_t* handler_it;
  53. // Count the number of items to be printed. This is needed to compute how much memory to reserve.
  54. int loops = 0, handlers = 0;
  55. portENTER_CRITICAL(&s_event_loops_spinlock);
  56. SLIST_FOREACH(loop_it, &s_event_loops, next) {
  57. SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
  58. SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
  59. handlers++;
  60. }
  61. SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
  62. SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
  63. handlers++;
  64. }
  65. SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
  66. SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
  67. handlers++;
  68. }
  69. }
  70. }
  71. }
  72. loops++;
  73. }
  74. portEXIT_CRITICAL(&s_event_loops_spinlock);
  75. // Reserve slightly more memory than computed
  76. int allowance = 3;
  77. int size = (((loops + allowance) * (sizeof(LOOP_DUMP_FORMAT) + 10 + 20 + 2 * 11)) +
  78. ((handlers + allowance) * (sizeof(HANDLER_DUMP_FORMAT) + 10 + 2 * 20 + 11 + 20)));
  79. return size;
  80. }
  81. #endif
  82. static void esp_event_loop_run_task(void* args)
  83. {
  84. esp_err_t err;
  85. esp_event_loop_handle_t event_loop = (esp_event_loop_handle_t) args;
  86. ESP_LOGD(TAG, "running task for loop %p", event_loop);
  87. while(1) {
  88. err = esp_event_loop_run(event_loop, portMAX_DELAY);
  89. if (err != ESP_OK) {
  90. break;
  91. }
  92. }
  93. ESP_LOGE(TAG, "suspended task for loop %p", event_loop);
  94. vTaskSuspend(NULL);
  95. }
  96. static void handler_execute(esp_event_loop_instance_t* loop, esp_event_handler_node_t *handler, esp_event_post_instance_t post)
  97. {
  98. ESP_LOGD(TAG, "running post %s:%d with handler %p and context %p on loop %p", post.base, post.id, handler->handler_ctx->handler, &handler->handler_ctx, loop);
  99. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  100. int64_t start, diff;
  101. start = esp_timer_get_time();
  102. #endif
  103. // Execute the handler
  104. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  105. void* data_ptr = NULL;
  106. if (post.data_set) {
  107. if (post.data_allocated) {
  108. data_ptr = post.data.ptr;
  109. } else {
  110. data_ptr = &post.data.val;
  111. }
  112. }
  113. (*(handler->handler_ctx->handler))(handler->handler_ctx->arg, post.base, post.id, data_ptr);
  114. #else
  115. (*(handler->handler_ctx->handler))(handler->handler_ctx->arg, post.base, post.id, post.data);
  116. #endif
  117. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  118. diff = esp_timer_get_time() - start;
  119. xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
  120. handler->invoked++;
  121. handler->time += diff;
  122. xSemaphoreGive(loop->profiling_mutex);
  123. #endif
  124. }
  125. static esp_err_t handler_instances_add(esp_event_handler_nodes_t* handlers, esp_event_handler_t event_handler, void* event_handler_arg, esp_event_handler_instance_context_t **handler_ctx, bool legacy)
  126. {
  127. esp_event_handler_node_t *handler_instance = calloc(1, sizeof(*handler_instance));
  128. if (!handler_instance) return ESP_ERR_NO_MEM;
  129. esp_event_handler_instance_context_t *context = calloc(1, sizeof(*context));
  130. if (!context) {
  131. free(handler_instance);
  132. return ESP_ERR_NO_MEM;
  133. }
  134. context->handler = event_handler;
  135. context->arg = event_handler_arg;
  136. handler_instance->handler_ctx = context;
  137. if (SLIST_EMPTY(handlers)) {
  138. SLIST_INSERT_HEAD(handlers, handler_instance, next);
  139. }
  140. else {
  141. esp_event_handler_node_t *it = NULL, *last = NULL;
  142. SLIST_FOREACH(it, handlers, next) {
  143. if (legacy) {
  144. if(event_handler == it->handler_ctx->handler) {
  145. it->handler_ctx->arg = event_handler_arg;
  146. ESP_LOGW(TAG, "handler already registered, overwriting");
  147. free(handler_instance);
  148. free(context);
  149. return ESP_OK;
  150. }
  151. }
  152. last = it;
  153. }
  154. SLIST_INSERT_AFTER(last, handler_instance, next);
  155. }
  156. // If the caller didn't provide the handler instance context, don't set it.
  157. // It will be removed once the event loop is deleted.
  158. if (handler_ctx) {
  159. *handler_ctx = context;
  160. }
  161. return ESP_OK;
  162. }
  163. static esp_err_t base_node_add_handler(esp_event_base_node_t* base_node,
  164. int32_t id,
  165. esp_event_handler_t event_handler,
  166. void *event_handler_arg,
  167. esp_event_handler_instance_context_t **handler_ctx,
  168. bool legacy)
  169. {
  170. if (id == ESP_EVENT_ANY_ID) {
  171. return handler_instances_add(&(base_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  172. }
  173. else {
  174. esp_err_t err = ESP_OK;
  175. esp_event_id_node_t *it = NULL, *id_node = NULL, *last_id_node = NULL;
  176. SLIST_FOREACH(it, &(base_node->id_nodes), next) {
  177. if (it->id == id) {
  178. id_node = it;
  179. }
  180. last_id_node = it;
  181. }
  182. if (!last_id_node || !id_node) {
  183. id_node = (esp_event_id_node_t*) calloc(1, sizeof(*id_node));
  184. if (!id_node) {
  185. ESP_LOGE(TAG, "alloc for new id node failed");
  186. return ESP_ERR_NO_MEM;
  187. }
  188. id_node->id = id;
  189. SLIST_INIT(&(id_node->handlers));
  190. err = handler_instances_add(&(id_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  191. if (err == ESP_OK) {
  192. if (!last_id_node) {
  193. SLIST_INSERT_HEAD(&(base_node->id_nodes), id_node, next);
  194. }
  195. else {
  196. SLIST_INSERT_AFTER(last_id_node, id_node, next);
  197. }
  198. } else {
  199. free(id_node);
  200. }
  201. return err;
  202. }
  203. else {
  204. return handler_instances_add(&(id_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  205. }
  206. }
  207. }
  208. static esp_err_t loop_node_add_handler(esp_event_loop_node_t* loop_node,
  209. esp_event_base_t base,
  210. int32_t id,
  211. esp_event_handler_t event_handler,
  212. void *event_handler_arg,
  213. esp_event_handler_instance_context_t **handler_ctx,
  214. bool legacy)
  215. {
  216. if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
  217. return handler_instances_add(&(loop_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  218. }
  219. else {
  220. esp_err_t err = ESP_OK;
  221. esp_event_base_node_t *it = NULL, *base_node = NULL, *last_base_node = NULL;
  222. SLIST_FOREACH(it, &(loop_node->base_nodes), next) {
  223. if (it->base == base) {
  224. base_node = it;
  225. }
  226. last_base_node = it;
  227. }
  228. if (!last_base_node ||
  229. !base_node ||
  230. (base_node && !SLIST_EMPTY(&(base_node->id_nodes)) && id == ESP_EVENT_ANY_ID) ||
  231. (last_base_node && last_base_node->base != base && !SLIST_EMPTY(&(last_base_node->id_nodes)) && id == ESP_EVENT_ANY_ID)) {
  232. base_node = (esp_event_base_node_t*) calloc(1, sizeof(*base_node));
  233. if (!base_node) {
  234. ESP_LOGE(TAG, "alloc mem for new base node failed");
  235. return ESP_ERR_NO_MEM;
  236. }
  237. base_node->base = base;
  238. SLIST_INIT(&(base_node->handlers));
  239. SLIST_INIT(&(base_node->id_nodes));
  240. err = base_node_add_handler(base_node, id, event_handler, event_handler_arg, handler_ctx, legacy);
  241. if (err == ESP_OK) {
  242. if (!last_base_node) {
  243. SLIST_INSERT_HEAD(&(loop_node->base_nodes), base_node, next);
  244. }
  245. else {
  246. SLIST_INSERT_AFTER(last_base_node, base_node, next);
  247. }
  248. } else {
  249. free(base_node);
  250. }
  251. return err;
  252. } else {
  253. return base_node_add_handler(base_node, id, event_handler, event_handler_arg, handler_ctx, legacy);
  254. }
  255. }
  256. }
  257. static esp_err_t handler_instances_remove(esp_event_handler_nodes_t* handlers, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  258. {
  259. esp_event_handler_node_t *it, *temp;
  260. SLIST_FOREACH_SAFE(it, handlers, next, temp) {
  261. if (legacy) {
  262. if (it->handler_ctx->handler == handler_ctx->handler) {
  263. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  264. free(it->handler_ctx);
  265. free(it);
  266. return ESP_OK;
  267. }
  268. } else {
  269. if (it->handler_ctx == handler_ctx) {
  270. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  271. free(it->handler_ctx);
  272. free(it);
  273. return ESP_OK;
  274. }
  275. }
  276. }
  277. return ESP_ERR_NOT_FOUND;
  278. }
  279. static esp_err_t base_node_remove_handler(esp_event_base_node_t* base_node, int32_t id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  280. {
  281. if (id == ESP_EVENT_ANY_ID) {
  282. return handler_instances_remove(&(base_node->handlers), handler_ctx, legacy);
  283. }
  284. else {
  285. esp_event_id_node_t *it, *temp;
  286. SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
  287. if (it->id == id) {
  288. esp_err_t res = handler_instances_remove(&(it->handlers), handler_ctx, legacy);
  289. if (res == ESP_OK) {
  290. if (SLIST_EMPTY(&(it->handlers))) {
  291. SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
  292. free(it);
  293. return ESP_OK;
  294. }
  295. }
  296. }
  297. }
  298. }
  299. return ESP_ERR_NOT_FOUND;
  300. }
  301. static esp_err_t loop_node_remove_handler(esp_event_loop_node_t* loop_node, esp_event_base_t base, int32_t id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  302. {
  303. if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
  304. return handler_instances_remove(&(loop_node->handlers), handler_ctx, legacy);
  305. }
  306. else {
  307. esp_event_base_node_t *it, *temp;
  308. SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
  309. if (it->base == base) {
  310. esp_err_t res = base_node_remove_handler(it, id, handler_ctx, legacy);
  311. if (res == ESP_OK) {
  312. if (SLIST_EMPTY(&(it->handlers)) && SLIST_EMPTY(&(it->id_nodes))) {
  313. SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
  314. free(it);
  315. return ESP_OK;
  316. }
  317. }
  318. }
  319. }
  320. }
  321. return ESP_ERR_NOT_FOUND;
  322. }
  323. static void handler_instances_remove_all(esp_event_handler_nodes_t* handlers)
  324. {
  325. esp_event_handler_node_t *it, *temp;
  326. SLIST_FOREACH_SAFE(it, handlers, next, temp) {
  327. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  328. free(it->handler_ctx);
  329. free(it);
  330. }
  331. }
  332. static void base_node_remove_all_handler(esp_event_base_node_t* base_node)
  333. {
  334. handler_instances_remove_all(&(base_node->handlers));
  335. esp_event_id_node_t *it, *temp;
  336. SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
  337. handler_instances_remove_all(&(it->handlers));
  338. SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
  339. free(it);
  340. }
  341. }
  342. static void loop_node_remove_all_handler(esp_event_loop_node_t* loop_node)
  343. {
  344. handler_instances_remove_all(&(loop_node->handlers));
  345. esp_event_base_node_t *it, *temp;
  346. SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
  347. base_node_remove_all_handler(it);
  348. SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
  349. free(it);
  350. }
  351. }
  352. static void inline __attribute__((always_inline)) post_instance_delete(esp_event_post_instance_t* post)
  353. {
  354. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  355. if (post->data_allocated && post->data.ptr) {
  356. free(post->data.ptr);
  357. }
  358. #else
  359. if (post->data) {
  360. free(post->data);
  361. }
  362. #endif
  363. memset(post, 0, sizeof(*post));
  364. }
  365. /* ---------------------------- Public API --------------------------------- */
  366. esp_err_t esp_event_loop_create(const esp_event_loop_args_t* event_loop_args, esp_event_loop_handle_t* event_loop)
  367. {
  368. if (event_loop_args == NULL) {
  369. ESP_LOGE(TAG, "event_loop_args was NULL");
  370. return ESP_ERR_INVALID_ARG;
  371. }
  372. if (event_loop == NULL) {
  373. ESP_LOGE(TAG, "event_loop was NULL");
  374. return ESP_ERR_INVALID_ARG;
  375. }
  376. esp_event_loop_instance_t* loop;
  377. esp_err_t err = ESP_ERR_NO_MEM; // most likely error
  378. loop = calloc(1, sizeof(*loop));
  379. if (loop == NULL) {
  380. ESP_LOGE(TAG, "alloc for event loop failed");
  381. return err;
  382. }
  383. loop->queue = xQueueCreate(event_loop_args->queue_size , sizeof(esp_event_post_instance_t));
  384. if (loop->queue == NULL) {
  385. ESP_LOGE(TAG, "create event loop queue failed");
  386. goto on_err;
  387. }
  388. loop->mutex = xSemaphoreCreateRecursiveMutex();
  389. if (loop->mutex == NULL) {
  390. ESP_LOGE(TAG, "create event loop mutex failed");
  391. goto on_err;
  392. }
  393. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  394. loop->profiling_mutex = xSemaphoreCreateMutex();
  395. if (loop->profiling_mutex == NULL) {
  396. ESP_LOGE(TAG, "create event loop profiling mutex failed");
  397. goto on_err;
  398. }
  399. #endif
  400. SLIST_INIT(&(loop->loop_nodes));
  401. // Create the loop task if requested
  402. if (event_loop_args->task_name != NULL) {
  403. BaseType_t task_created = xTaskCreatePinnedToCore(esp_event_loop_run_task, event_loop_args->task_name,
  404. event_loop_args->task_stack_size, (void*) loop,
  405. event_loop_args->task_priority, &(loop->task), event_loop_args->task_core_id);
  406. if (task_created != pdPASS) {
  407. ESP_LOGE(TAG, "create task for loop failed");
  408. err = ESP_FAIL;
  409. goto on_err;
  410. }
  411. loop->name = event_loop_args->task_name;
  412. ESP_LOGD(TAG, "created task for loop %p", loop);
  413. } else {
  414. loop->name = "";
  415. loop->task = NULL;
  416. }
  417. loop->running_task = NULL;
  418. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  419. portENTER_CRITICAL(&s_event_loops_spinlock);
  420. SLIST_INSERT_HEAD(&s_event_loops, loop, next);
  421. portEXIT_CRITICAL(&s_event_loops_spinlock);
  422. #endif
  423. *event_loop = (esp_event_loop_handle_t) loop;
  424. ESP_LOGD(TAG, "created event loop %p", loop);
  425. return ESP_OK;
  426. on_err:
  427. if (loop->queue != NULL) {
  428. vQueueDelete(loop->queue);
  429. }
  430. if (loop->mutex != NULL) {
  431. vSemaphoreDelete(loop->mutex);
  432. }
  433. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  434. if (loop->profiling_mutex != NULL) {
  435. vSemaphoreDelete(loop->profiling_mutex);
  436. }
  437. #endif
  438. free(loop);
  439. return err;
  440. }
  441. // On event lookup performance: The library implements the event list as a linked list, which results to O(n)
  442. // lookup time. The test comparing this implementation to the O(lg n) performance of rbtrees
  443. // (https://github.com/freebsd/freebsd/blob/master/sys/sys/tree.h)
  444. // indicate that the difference is not that substantial, especially considering the additional
  445. // pointers per node of rbtrees. Code for the rbtree implementation of the event loop library is archived
  446. // in feature/esp_event_loop_library_rbtrees if needed.
  447. esp_err_t esp_event_loop_run(esp_event_loop_handle_t event_loop, TickType_t ticks_to_run)
  448. {
  449. assert(event_loop);
  450. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  451. esp_event_post_instance_t post;
  452. TickType_t marker = xTaskGetTickCount();
  453. TickType_t end = 0;
  454. #if (configUSE_16_BIT_TICKS == 1)
  455. int32_t remaining_ticks = ticks_to_run;
  456. #else
  457. int64_t remaining_ticks = ticks_to_run;
  458. #endif
  459. while(xQueueReceive(loop->queue, &post, ticks_to_run) == pdTRUE) {
  460. // The event has already been unqueued, so ensure it gets executed.
  461. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  462. loop->running_task = xTaskGetCurrentTaskHandle();
  463. bool exec = false;
  464. esp_event_handler_node_t *handler, *temp_handler;
  465. esp_event_loop_node_t *loop_node, *temp_node;
  466. esp_event_base_node_t *base_node, *temp_base;
  467. esp_event_id_node_t *id_node, *temp_id_node;
  468. SLIST_FOREACH_SAFE(loop_node, &(loop->loop_nodes), next, temp_node) {
  469. // Execute loop level handlers
  470. SLIST_FOREACH_SAFE(handler, &(loop_node->handlers), next, temp_handler) {
  471. handler_execute(loop, handler, post);
  472. exec |= true;
  473. }
  474. SLIST_FOREACH_SAFE(base_node, &(loop_node->base_nodes), next, temp_base) {
  475. if (base_node->base == post.base) {
  476. // Execute base level handlers
  477. SLIST_FOREACH_SAFE(handler, &(base_node->handlers), next, temp_handler) {
  478. handler_execute(loop, handler, post);
  479. exec |= true;
  480. }
  481. SLIST_FOREACH_SAFE(id_node, &(base_node->id_nodes), next, temp_id_node) {
  482. if (id_node->id == post.id) {
  483. // Execute id level handlers
  484. SLIST_FOREACH_SAFE(handler, &(id_node->handlers), next, temp_handler) {
  485. handler_execute(loop, handler, post);
  486. exec |= true;
  487. }
  488. // Skip to next base node
  489. break;
  490. }
  491. }
  492. }
  493. }
  494. }
  495. esp_event_base_t base = post.base;
  496. int32_t id = post.id;
  497. post_instance_delete(&post);
  498. if (ticks_to_run != portMAX_DELAY) {
  499. end = xTaskGetTickCount();
  500. remaining_ticks -= end - marker;
  501. // If the ticks to run expired, return to the caller
  502. if (remaining_ticks <= 0) {
  503. xSemaphoreGiveRecursive(loop->mutex);
  504. break;
  505. } else {
  506. marker = end;
  507. }
  508. }
  509. loop->running_task = NULL;
  510. xSemaphoreGiveRecursive(loop->mutex);
  511. if (!exec) {
  512. // No handlers were registered, not even loop/base level handlers
  513. ESP_LOGD(TAG, "no handlers have been registered for event %s:%d posted to loop %p", base, id, event_loop);
  514. }
  515. }
  516. return ESP_OK;
  517. }
  518. esp_err_t esp_event_loop_delete(esp_event_loop_handle_t event_loop)
  519. {
  520. assert(event_loop);
  521. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  522. SemaphoreHandle_t loop_mutex = loop->mutex;
  523. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  524. SemaphoreHandle_t loop_profiling_mutex = loop->profiling_mutex;
  525. #endif
  526. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  527. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  528. xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
  529. portENTER_CRITICAL(&s_event_loops_spinlock);
  530. SLIST_REMOVE(&s_event_loops, loop, esp_event_loop_instance, next);
  531. portEXIT_CRITICAL(&s_event_loops_spinlock);
  532. #endif
  533. // Delete the task if it was created
  534. if (loop->task != NULL) {
  535. vTaskDelete(loop->task);
  536. }
  537. // Remove all registered events and handlers in the loop
  538. esp_event_loop_node_t *it, *temp;
  539. SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
  540. loop_node_remove_all_handler(it);
  541. SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
  542. free(it);
  543. }
  544. // Drop existing posts on the queue
  545. esp_event_post_instance_t post;
  546. while(xQueueReceive(loop->queue, &post, 0) == pdTRUE) {
  547. post_instance_delete(&post);
  548. }
  549. // Cleanup loop
  550. vQueueDelete(loop->queue);
  551. free(loop);
  552. // Free loop mutex before deleting
  553. xSemaphoreGiveRecursive(loop_mutex);
  554. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  555. xSemaphoreGive(loop_profiling_mutex);
  556. vSemaphoreDelete(loop_profiling_mutex);
  557. #endif
  558. vSemaphoreDelete(loop_mutex);
  559. ESP_LOGD(TAG, "deleted loop %p", (void*) event_loop);
  560. return ESP_OK;
  561. }
  562. esp_err_t esp_event_handler_register_with_internal(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  563. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg,
  564. esp_event_handler_instance_context_t** handler_ctx_arg, bool legacy)
  565. {
  566. assert(event_loop);
  567. assert(event_handler);
  568. if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
  569. ESP_LOGE(TAG, "registering to any event base with specific id unsupported");
  570. return ESP_ERR_INVALID_ARG;
  571. }
  572. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  573. if (event_base == ESP_EVENT_ANY_BASE) {
  574. event_base = esp_event_any_base;
  575. }
  576. esp_err_t err = ESP_OK;
  577. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  578. esp_event_loop_node_t *loop_node = NULL, *last_loop_node = NULL;
  579. SLIST_FOREACH(loop_node, &(loop->loop_nodes), next) {
  580. last_loop_node = loop_node;
  581. }
  582. bool is_loop_level_handler = (event_base == esp_event_any_base) && (event_id == ESP_EVENT_ANY_ID);
  583. if (!last_loop_node ||
  584. (last_loop_node && !SLIST_EMPTY(&(last_loop_node->base_nodes)) && is_loop_level_handler)) {
  585. loop_node = (esp_event_loop_node_t*) calloc(1, sizeof(*loop_node));
  586. if (!loop_node) {
  587. ESP_LOGE(TAG, "alloc for new loop node failed");
  588. err = ESP_ERR_NO_MEM;
  589. goto on_err;
  590. }
  591. SLIST_INIT(&(loop_node->handlers));
  592. SLIST_INIT(&(loop_node->base_nodes));
  593. err = loop_node_add_handler(loop_node, event_base, event_id, event_handler, event_handler_arg, handler_ctx_arg, legacy);
  594. if (err == ESP_OK) {
  595. if (!last_loop_node) {
  596. SLIST_INSERT_HEAD(&(loop->loop_nodes), loop_node, next);
  597. }
  598. else {
  599. SLIST_INSERT_AFTER(last_loop_node, loop_node, next);
  600. }
  601. } else {
  602. free(loop_node);
  603. }
  604. }
  605. else {
  606. err = loop_node_add_handler(last_loop_node, event_base, event_id, event_handler, event_handler_arg, handler_ctx_arg, legacy);
  607. }
  608. on_err:
  609. xSemaphoreGiveRecursive(loop->mutex);
  610. return err;
  611. }
  612. esp_err_t esp_event_handler_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  613. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg)
  614. {
  615. return esp_event_handler_register_with_internal(event_loop, event_base, event_id, event_handler, event_handler_arg, NULL, true);
  616. }
  617. esp_err_t esp_event_handler_instance_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  618. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg,
  619. esp_event_handler_instance_t* handler_ctx_arg)
  620. {
  621. return esp_event_handler_register_with_internal(event_loop, event_base, event_id, event_handler, event_handler_arg, (esp_event_handler_instance_context_t**) handler_ctx_arg, false);
  622. }
  623. esp_err_t esp_event_handler_unregister_with_internal(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  624. int32_t event_id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  625. {
  626. assert(event_loop);
  627. assert(handler_ctx);
  628. if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
  629. ESP_LOGE(TAG, "unregistering to any event base with specific id unsupported");
  630. return ESP_FAIL;
  631. }
  632. if (event_base == ESP_EVENT_ANY_BASE) {
  633. event_base = esp_event_any_base;
  634. }
  635. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  636. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  637. esp_event_loop_node_t *it, *temp;
  638. SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
  639. esp_err_t res = loop_node_remove_handler(it, event_base, event_id, handler_ctx, legacy);
  640. if (res == ESP_OK && SLIST_EMPTY(&(it->base_nodes)) && SLIST_EMPTY(&(it->handlers))) {
  641. SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
  642. free(it);
  643. break;
  644. }
  645. }
  646. xSemaphoreGiveRecursive(loop->mutex);
  647. return ESP_OK;
  648. }
  649. esp_err_t esp_event_handler_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  650. int32_t event_id, esp_event_handler_t event_handler)
  651. {
  652. esp_event_handler_instance_context_t local_handler_ctx;
  653. local_handler_ctx.handler = event_handler;
  654. local_handler_ctx.arg = NULL;
  655. return esp_event_handler_unregister_with_internal(event_loop, event_base, event_id, &local_handler_ctx, true);
  656. }
  657. esp_err_t esp_event_handler_instance_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  658. int32_t event_id, esp_event_handler_instance_t handler_ctx_arg)
  659. {
  660. if (!handler_ctx_arg) return ESP_ERR_INVALID_ARG;
  661. return esp_event_handler_unregister_with_internal(event_loop, event_base, event_id, (esp_event_handler_instance_context_t*) handler_ctx_arg, false);
  662. }
  663. esp_err_t esp_event_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
  664. void* event_data, size_t event_data_size, TickType_t ticks_to_wait)
  665. {
  666. assert(event_loop);
  667. if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
  668. return ESP_ERR_INVALID_ARG;
  669. }
  670. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  671. esp_event_post_instance_t post;
  672. memset((void*)(&post), 0, sizeof(post));
  673. if (event_data != NULL && event_data_size != 0) {
  674. // Make persistent copy of event data on heap.
  675. void* event_data_copy = calloc(1, event_data_size);
  676. if (event_data_copy == NULL) {
  677. return ESP_ERR_NO_MEM;
  678. }
  679. memcpy(event_data_copy, event_data, event_data_size);
  680. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  681. post.data.ptr = event_data_copy;
  682. post.data_allocated = true;
  683. post.data_set = true;
  684. #else
  685. post.data = event_data_copy;
  686. #endif
  687. }
  688. post.base = event_base;
  689. post.id = event_id;
  690. BaseType_t result = pdFALSE;
  691. // Find the task that currently executes the loop. It is safe to query loop->task since it is
  692. // not mutated since loop creation. ENSURE THIS REMAINS TRUE.
  693. if (loop->task == NULL) {
  694. // The loop has no dedicated task. Find out what task is currently running it.
  695. result = xSemaphoreTakeRecursive(loop->mutex, ticks_to_wait);
  696. if (result == pdTRUE) {
  697. if (loop->running_task != xTaskGetCurrentTaskHandle()) {
  698. xSemaphoreGiveRecursive(loop->mutex);
  699. result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
  700. } else {
  701. xSemaphoreGiveRecursive(loop->mutex);
  702. result = xQueueSendToBack(loop->queue, &post, 0);
  703. }
  704. }
  705. } else {
  706. // The loop has a dedicated task.
  707. if (loop->task != xTaskGetCurrentTaskHandle()) {
  708. result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
  709. } else {
  710. result = xQueueSendToBack(loop->queue, &post, 0);
  711. }
  712. }
  713. if (result != pdTRUE) {
  714. post_instance_delete(&post);
  715. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  716. atomic_fetch_add(&loop->events_dropped, 1);
  717. #endif
  718. return ESP_ERR_TIMEOUT;
  719. }
  720. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  721. atomic_fetch_add(&loop->events_recieved, 1);
  722. #endif
  723. return ESP_OK;
  724. }
  725. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  726. esp_err_t esp_event_isr_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
  727. void* event_data, size_t event_data_size, BaseType_t* task_unblocked)
  728. {
  729. assert(event_loop);
  730. if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
  731. return ESP_ERR_INVALID_ARG;
  732. }
  733. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  734. esp_event_post_instance_t post;
  735. memset((void*)(&post), 0, sizeof(post));
  736. if (event_data_size > sizeof(post.data.val)) {
  737. return ESP_ERR_INVALID_ARG;
  738. }
  739. if (event_data != NULL && event_data_size != 0) {
  740. memcpy((void*)(&(post.data.val)), event_data, event_data_size);
  741. post.data_allocated = false;
  742. post.data_set = true;
  743. }
  744. post.base = event_base;
  745. post.id = event_id;
  746. BaseType_t result = pdFALSE;
  747. // Post the event from an ISR,
  748. result = xQueueSendToBackFromISR(loop->queue, &post, task_unblocked);
  749. if (result != pdTRUE) {
  750. post_instance_delete(&post);
  751. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  752. atomic_fetch_add(&loop->events_dropped, 1);
  753. #endif
  754. return ESP_FAIL;
  755. }
  756. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  757. atomic_fetch_add(&loop->events_recieved, 1);
  758. #endif
  759. return ESP_OK;
  760. }
  761. #endif
  762. esp_err_t esp_event_dump(FILE* file)
  763. {
  764. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  765. assert(file);
  766. esp_event_loop_instance_t* loop_it;
  767. esp_event_loop_node_t *loop_node_it;
  768. esp_event_base_node_t* base_node_it;
  769. esp_event_id_node_t* id_node_it;
  770. esp_event_handler_node_t* handler_it;
  771. // Allocate memory for printing
  772. int sz = esp_event_dump_prepare();
  773. char* buf = calloc(sz, sizeof(char));
  774. char* dst = buf;
  775. char id_str_buf[20];
  776. // Print info to buffer
  777. portENTER_CRITICAL(&s_event_loops_spinlock);
  778. SLIST_FOREACH(loop_it, &s_event_loops, next) {
  779. uint32_t events_recieved, events_dropped;
  780. events_recieved = atomic_load(&loop_it->events_recieved);
  781. events_dropped = atomic_load(&loop_it->events_dropped);
  782. PRINT_DUMP_INFO(dst, sz, LOOP_DUMP_FORMAT, loop_it, loop_it->task != NULL ? loop_it->name : "none" ,
  783. events_recieved, events_dropped);
  784. int sz_bak = sz;
  785. SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
  786. SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
  787. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, "ESP_EVENT_ANY_BASE",
  788. "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
  789. }
  790. SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
  791. SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
  792. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, base_node_it->base ,
  793. "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
  794. }
  795. SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
  796. SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
  797. memset(id_str_buf, 0, sizeof(id_str_buf));
  798. snprintf(id_str_buf, sizeof(id_str_buf), "%d", id_node_it->id);
  799. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, base_node_it->base ,
  800. id_str_buf, handler_it->invoked, handler_it->time);
  801. }
  802. }
  803. }
  804. }
  805. // No handlers registered for this loop
  806. if (sz == sz_bak) {
  807. PRINT_DUMP_INFO(dst, sz, " NO HANDLERS REGISTERED\n");
  808. }
  809. }
  810. portEXIT_CRITICAL(&s_event_loops_spinlock);
  811. // Print the contents of the buffer to the file
  812. fprintf(file, buf);
  813. // Free the allocated buffer
  814. free(buf);
  815. #endif
  816. return ESP_OK;
  817. }