esp_event.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include <stdio.h>
  9. #include <stdbool.h>
  10. #include "esp_log.h"
  11. #include "esp_event.h"
  12. #include "esp_event_internal.h"
  13. #include "esp_event_private.h"
  14. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  15. #include "esp_timer.h"
  16. #endif
  17. /* ---------------------------- Definitions --------------------------------- */
  18. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  19. // LOOP @<address, name> rx:<recieved events no.> dr:<dropped events no.>
  20. #define LOOP_DUMP_FORMAT "LOOP @%p,%s rx:%u dr:%u\n"
  21. // handler @<address> ev:<base, id> inv:<times invoked> time:<runtime>
  22. #define HANDLER_DUMP_FORMAT " HANDLER @%p ev:%s,%s inv:%u time:%lld us\n"
  23. #define PRINT_DUMP_INFO(dst, sz, ...) do { \
  24. int cb = snprintf(dst, sz, __VA_ARGS__); \
  25. dst += cb; \
  26. sz -= cb; \
  27. } while(0);
  28. #endif
  29. /* ------------------------- Static Variables ------------------------------- */
  30. static const char* TAG = "event";
  31. static const char* esp_event_any_base = "any";
  32. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  33. static SLIST_HEAD(esp_event_loop_instance_list_t, esp_event_loop_instance) s_event_loops =
  34. SLIST_HEAD_INITIALIZER(s_event_loops);
  35. static portMUX_TYPE s_event_loops_spinlock = portMUX_INITIALIZER_UNLOCKED;
  36. #endif
  37. /* ------------------------- Static Functions ------------------------------- */
  38. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  39. static int esp_event_dump_prepare(void)
  40. {
  41. esp_event_loop_instance_t* loop_it;
  42. esp_event_loop_node_t *loop_node_it;
  43. esp_event_base_node_t* base_node_it;
  44. esp_event_id_node_t* id_node_it;
  45. esp_event_handler_node_t* handler_it;
  46. // Count the number of items to be printed. This is needed to compute how much memory to reserve.
  47. int loops = 0, handlers = 0;
  48. portENTER_CRITICAL(&s_event_loops_spinlock);
  49. SLIST_FOREACH(loop_it, &s_event_loops, next) {
  50. SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
  51. SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
  52. handlers++;
  53. }
  54. SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
  55. SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
  56. handlers++;
  57. }
  58. SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
  59. SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
  60. handlers++;
  61. }
  62. }
  63. }
  64. }
  65. loops++;
  66. }
  67. portEXIT_CRITICAL(&s_event_loops_spinlock);
  68. // Reserve slightly more memory than computed
  69. int allowance = 3;
  70. int size = (((loops + allowance) * (sizeof(LOOP_DUMP_FORMAT) + 10 + 20 + 2 * 11)) +
  71. ((handlers + allowance) * (sizeof(HANDLER_DUMP_FORMAT) + 10 + 2 * 20 + 11 + 20)));
  72. return size;
  73. }
  74. #endif
  75. static void esp_event_loop_run_task(void* args)
  76. {
  77. esp_err_t err;
  78. esp_event_loop_handle_t event_loop = (esp_event_loop_handle_t) args;
  79. ESP_LOGD(TAG, "running task for loop %p", event_loop);
  80. while(1) {
  81. err = esp_event_loop_run(event_loop, portMAX_DELAY);
  82. if (err != ESP_OK) {
  83. break;
  84. }
  85. }
  86. ESP_LOGE(TAG, "suspended task for loop %p", event_loop);
  87. vTaskSuspend(NULL);
  88. }
  89. static void handler_execute(esp_event_loop_instance_t* loop, esp_event_handler_node_t *handler, esp_event_post_instance_t post)
  90. {
  91. ESP_LOGD(TAG, "running post %s:%d with handler %p and context %p on loop %p", post.base, post.id, handler->handler_ctx->handler, &handler->handler_ctx, loop);
  92. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  93. int64_t start, diff;
  94. start = esp_timer_get_time();
  95. #endif
  96. // Execute the handler
  97. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  98. void* data_ptr = NULL;
  99. if (post.data_set) {
  100. if (post.data_allocated) {
  101. data_ptr = post.data.ptr;
  102. } else {
  103. data_ptr = &post.data.val;
  104. }
  105. }
  106. (*(handler->handler_ctx->handler))(handler->handler_ctx->arg, post.base, post.id, data_ptr);
  107. #else
  108. (*(handler->handler_ctx->handler))(handler->handler_ctx->arg, post.base, post.id, post.data);
  109. #endif
  110. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  111. diff = esp_timer_get_time() - start;
  112. xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
  113. handler->invoked++;
  114. handler->time += diff;
  115. xSemaphoreGive(loop->profiling_mutex);
  116. #endif
  117. }
  118. static esp_err_t handler_instances_add(esp_event_handler_nodes_t* handlers, esp_event_handler_t event_handler, void* event_handler_arg, esp_event_handler_instance_context_t **handler_ctx, bool legacy)
  119. {
  120. esp_event_handler_node_t *handler_instance = calloc(1, sizeof(*handler_instance));
  121. if (!handler_instance) return ESP_ERR_NO_MEM;
  122. esp_event_handler_instance_context_t *context = calloc(1, sizeof(*context));
  123. if (!context) {
  124. free(handler_instance);
  125. return ESP_ERR_NO_MEM;
  126. }
  127. context->handler = event_handler;
  128. context->arg = event_handler_arg;
  129. handler_instance->handler_ctx = context;
  130. if (SLIST_EMPTY(handlers)) {
  131. SLIST_INSERT_HEAD(handlers, handler_instance, next);
  132. }
  133. else {
  134. esp_event_handler_node_t *it = NULL, *last = NULL;
  135. SLIST_FOREACH(it, handlers, next) {
  136. if (legacy) {
  137. if(event_handler == it->handler_ctx->handler) {
  138. it->handler_ctx->arg = event_handler_arg;
  139. ESP_LOGW(TAG, "handler already registered, overwriting");
  140. free(handler_instance);
  141. free(context);
  142. return ESP_OK;
  143. }
  144. }
  145. last = it;
  146. }
  147. SLIST_INSERT_AFTER(last, handler_instance, next);
  148. }
  149. // If the caller didn't provide the handler instance context, don't set it.
  150. // It will be removed once the event loop is deleted.
  151. if (handler_ctx) {
  152. *handler_ctx = context;
  153. }
  154. return ESP_OK;
  155. }
  156. static esp_err_t base_node_add_handler(esp_event_base_node_t* base_node,
  157. int32_t id,
  158. esp_event_handler_t event_handler,
  159. void *event_handler_arg,
  160. esp_event_handler_instance_context_t **handler_ctx,
  161. bool legacy)
  162. {
  163. if (id == ESP_EVENT_ANY_ID) {
  164. return handler_instances_add(&(base_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  165. }
  166. else {
  167. esp_err_t err = ESP_OK;
  168. esp_event_id_node_t *it = NULL, *id_node = NULL, *last_id_node = NULL;
  169. SLIST_FOREACH(it, &(base_node->id_nodes), next) {
  170. if (it->id == id) {
  171. id_node = it;
  172. }
  173. last_id_node = it;
  174. }
  175. if (!last_id_node || !id_node) {
  176. id_node = (esp_event_id_node_t*) calloc(1, sizeof(*id_node));
  177. if (!id_node) {
  178. ESP_LOGE(TAG, "alloc for new id node failed");
  179. return ESP_ERR_NO_MEM;
  180. }
  181. id_node->id = id;
  182. SLIST_INIT(&(id_node->handlers));
  183. err = handler_instances_add(&(id_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  184. if (err == ESP_OK) {
  185. if (!last_id_node) {
  186. SLIST_INSERT_HEAD(&(base_node->id_nodes), id_node, next);
  187. }
  188. else {
  189. SLIST_INSERT_AFTER(last_id_node, id_node, next);
  190. }
  191. } else {
  192. free(id_node);
  193. }
  194. return err;
  195. }
  196. else {
  197. return handler_instances_add(&(id_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  198. }
  199. }
  200. }
  201. static esp_err_t loop_node_add_handler(esp_event_loop_node_t* loop_node,
  202. esp_event_base_t base,
  203. int32_t id,
  204. esp_event_handler_t event_handler,
  205. void *event_handler_arg,
  206. esp_event_handler_instance_context_t **handler_ctx,
  207. bool legacy)
  208. {
  209. if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
  210. return handler_instances_add(&(loop_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
  211. }
  212. else {
  213. esp_err_t err = ESP_OK;
  214. esp_event_base_node_t *it = NULL, *base_node = NULL, *last_base_node = NULL;
  215. SLIST_FOREACH(it, &(loop_node->base_nodes), next) {
  216. if (it->base == base) {
  217. base_node = it;
  218. }
  219. last_base_node = it;
  220. }
  221. if (!last_base_node ||
  222. !base_node ||
  223. (base_node && !SLIST_EMPTY(&(base_node->id_nodes)) && id == ESP_EVENT_ANY_ID) ||
  224. (last_base_node && last_base_node->base != base && !SLIST_EMPTY(&(last_base_node->id_nodes)) && id == ESP_EVENT_ANY_ID)) {
  225. base_node = (esp_event_base_node_t*) calloc(1, sizeof(*base_node));
  226. if (!base_node) {
  227. ESP_LOGE(TAG, "alloc mem for new base node failed");
  228. return ESP_ERR_NO_MEM;
  229. }
  230. base_node->base = base;
  231. SLIST_INIT(&(base_node->handlers));
  232. SLIST_INIT(&(base_node->id_nodes));
  233. err = base_node_add_handler(base_node, id, event_handler, event_handler_arg, handler_ctx, legacy);
  234. if (err == ESP_OK) {
  235. if (!last_base_node) {
  236. SLIST_INSERT_HEAD(&(loop_node->base_nodes), base_node, next);
  237. }
  238. else {
  239. SLIST_INSERT_AFTER(last_base_node, base_node, next);
  240. }
  241. } else {
  242. free(base_node);
  243. }
  244. return err;
  245. } else {
  246. return base_node_add_handler(base_node, id, event_handler, event_handler_arg, handler_ctx, legacy);
  247. }
  248. }
  249. }
  250. static esp_err_t handler_instances_remove(esp_event_handler_nodes_t* handlers, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  251. {
  252. esp_event_handler_node_t *it, *temp;
  253. SLIST_FOREACH_SAFE(it, handlers, next, temp) {
  254. if (legacy) {
  255. if (it->handler_ctx->handler == handler_ctx->handler) {
  256. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  257. free(it->handler_ctx);
  258. free(it);
  259. return ESP_OK;
  260. }
  261. } else {
  262. if (it->handler_ctx == handler_ctx) {
  263. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  264. free(it->handler_ctx);
  265. free(it);
  266. return ESP_OK;
  267. }
  268. }
  269. }
  270. return ESP_ERR_NOT_FOUND;
  271. }
  272. static esp_err_t base_node_remove_handler(esp_event_base_node_t* base_node, int32_t id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  273. {
  274. if (id == ESP_EVENT_ANY_ID) {
  275. return handler_instances_remove(&(base_node->handlers), handler_ctx, legacy);
  276. }
  277. else {
  278. esp_event_id_node_t *it, *temp;
  279. SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
  280. if (it->id == id) {
  281. esp_err_t res = handler_instances_remove(&(it->handlers), handler_ctx, legacy);
  282. if (res == ESP_OK) {
  283. if (SLIST_EMPTY(&(it->handlers))) {
  284. SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
  285. free(it);
  286. return ESP_OK;
  287. }
  288. }
  289. }
  290. }
  291. }
  292. return ESP_ERR_NOT_FOUND;
  293. }
  294. static esp_err_t loop_node_remove_handler(esp_event_loop_node_t* loop_node, esp_event_base_t base, int32_t id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  295. {
  296. if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
  297. return handler_instances_remove(&(loop_node->handlers), handler_ctx, legacy);
  298. }
  299. else {
  300. esp_event_base_node_t *it, *temp;
  301. SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
  302. if (it->base == base) {
  303. esp_err_t res = base_node_remove_handler(it, id, handler_ctx, legacy);
  304. if (res == ESP_OK) {
  305. if (SLIST_EMPTY(&(it->handlers)) && SLIST_EMPTY(&(it->id_nodes))) {
  306. SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
  307. free(it);
  308. return ESP_OK;
  309. }
  310. }
  311. }
  312. }
  313. }
  314. return ESP_ERR_NOT_FOUND;
  315. }
  316. static void handler_instances_remove_all(esp_event_handler_nodes_t* handlers)
  317. {
  318. esp_event_handler_node_t *it, *temp;
  319. SLIST_FOREACH_SAFE(it, handlers, next, temp) {
  320. SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
  321. free(it->handler_ctx);
  322. free(it);
  323. }
  324. }
  325. static void base_node_remove_all_handler(esp_event_base_node_t* base_node)
  326. {
  327. handler_instances_remove_all(&(base_node->handlers));
  328. esp_event_id_node_t *it, *temp;
  329. SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
  330. handler_instances_remove_all(&(it->handlers));
  331. SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
  332. free(it);
  333. }
  334. }
  335. static void loop_node_remove_all_handler(esp_event_loop_node_t* loop_node)
  336. {
  337. handler_instances_remove_all(&(loop_node->handlers));
  338. esp_event_base_node_t *it, *temp;
  339. SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
  340. base_node_remove_all_handler(it);
  341. SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
  342. free(it);
  343. }
  344. }
  345. static void inline __attribute__((always_inline)) post_instance_delete(esp_event_post_instance_t* post)
  346. {
  347. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  348. if (post->data_allocated && post->data.ptr) {
  349. free(post->data.ptr);
  350. }
  351. #else
  352. if (post->data) {
  353. free(post->data);
  354. }
  355. #endif
  356. memset(post, 0, sizeof(*post));
  357. }
  358. /* ---------------------------- Public API --------------------------------- */
  359. esp_err_t esp_event_loop_create(const esp_event_loop_args_t* event_loop_args, esp_event_loop_handle_t* event_loop)
  360. {
  361. if (event_loop_args == NULL) {
  362. ESP_LOGE(TAG, "event_loop_args was NULL");
  363. return ESP_ERR_INVALID_ARG;
  364. }
  365. if (event_loop == NULL) {
  366. ESP_LOGE(TAG, "event_loop was NULL");
  367. return ESP_ERR_INVALID_ARG;
  368. }
  369. esp_event_loop_instance_t* loop;
  370. esp_err_t err = ESP_ERR_NO_MEM; // most likely error
  371. loop = calloc(1, sizeof(*loop));
  372. if (loop == NULL) {
  373. ESP_LOGE(TAG, "alloc for event loop failed");
  374. return err;
  375. }
  376. loop->queue = xQueueCreate(event_loop_args->queue_size , sizeof(esp_event_post_instance_t));
  377. if (loop->queue == NULL) {
  378. ESP_LOGE(TAG, "create event loop queue failed");
  379. goto on_err;
  380. }
  381. loop->mutex = xSemaphoreCreateRecursiveMutex();
  382. if (loop->mutex == NULL) {
  383. ESP_LOGE(TAG, "create event loop mutex failed");
  384. goto on_err;
  385. }
  386. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  387. loop->profiling_mutex = xSemaphoreCreateMutex();
  388. if (loop->profiling_mutex == NULL) {
  389. ESP_LOGE(TAG, "create event loop profiling mutex failed");
  390. goto on_err;
  391. }
  392. #endif
  393. SLIST_INIT(&(loop->loop_nodes));
  394. // Create the loop task if requested
  395. if (event_loop_args->task_name != NULL) {
  396. BaseType_t task_created = xTaskCreatePinnedToCore(esp_event_loop_run_task, event_loop_args->task_name,
  397. event_loop_args->task_stack_size, (void*) loop,
  398. event_loop_args->task_priority, &(loop->task), event_loop_args->task_core_id);
  399. if (task_created != pdPASS) {
  400. ESP_LOGE(TAG, "create task for loop failed");
  401. err = ESP_FAIL;
  402. goto on_err;
  403. }
  404. loop->name = event_loop_args->task_name;
  405. ESP_LOGD(TAG, "created task for loop %p", loop);
  406. } else {
  407. loop->name = "";
  408. loop->task = NULL;
  409. }
  410. loop->running_task = NULL;
  411. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  412. portENTER_CRITICAL(&s_event_loops_spinlock);
  413. SLIST_INSERT_HEAD(&s_event_loops, loop, next);
  414. portEXIT_CRITICAL(&s_event_loops_spinlock);
  415. #endif
  416. *event_loop = (esp_event_loop_handle_t) loop;
  417. ESP_LOGD(TAG, "created event loop %p", loop);
  418. return ESP_OK;
  419. on_err:
  420. if (loop->queue != NULL) {
  421. vQueueDelete(loop->queue);
  422. }
  423. if (loop->mutex != NULL) {
  424. vSemaphoreDelete(loop->mutex);
  425. }
  426. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  427. if (loop->profiling_mutex != NULL) {
  428. vSemaphoreDelete(loop->profiling_mutex);
  429. }
  430. #endif
  431. free(loop);
  432. return err;
  433. }
  434. // On event lookup performance: The library implements the event list as a linked list, which results to O(n)
  435. // lookup time. The test comparing this implementation to the O(lg n) performance of rbtrees
  436. // (https://github.com/freebsd/freebsd/blob/master/sys/sys/tree.h)
  437. // indicate that the difference is not that substantial, especially considering the additional
  438. // pointers per node of rbtrees. Code for the rbtree implementation of the event loop library is archived
  439. // in feature/esp_event_loop_library_rbtrees if needed.
  440. esp_err_t esp_event_loop_run(esp_event_loop_handle_t event_loop, TickType_t ticks_to_run)
  441. {
  442. assert(event_loop);
  443. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  444. esp_event_post_instance_t post;
  445. TickType_t marker = xTaskGetTickCount();
  446. TickType_t end = 0;
  447. #if (configUSE_16_BIT_TICKS == 1)
  448. int32_t remaining_ticks = ticks_to_run;
  449. #else
  450. int64_t remaining_ticks = ticks_to_run;
  451. #endif
  452. while(xQueueReceive(loop->queue, &post, ticks_to_run) == pdTRUE) {
  453. // The event has already been unqueued, so ensure it gets executed.
  454. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  455. loop->running_task = xTaskGetCurrentTaskHandle();
  456. bool exec = false;
  457. esp_event_handler_node_t *handler, *temp_handler;
  458. esp_event_loop_node_t *loop_node, *temp_node;
  459. esp_event_base_node_t *base_node, *temp_base;
  460. esp_event_id_node_t *id_node, *temp_id_node;
  461. SLIST_FOREACH_SAFE(loop_node, &(loop->loop_nodes), next, temp_node) {
  462. // Execute loop level handlers
  463. SLIST_FOREACH_SAFE(handler, &(loop_node->handlers), next, temp_handler) {
  464. handler_execute(loop, handler, post);
  465. exec |= true;
  466. }
  467. SLIST_FOREACH_SAFE(base_node, &(loop_node->base_nodes), next, temp_base) {
  468. if (base_node->base == post.base) {
  469. // Execute base level handlers
  470. SLIST_FOREACH_SAFE(handler, &(base_node->handlers), next, temp_handler) {
  471. handler_execute(loop, handler, post);
  472. exec |= true;
  473. }
  474. SLIST_FOREACH_SAFE(id_node, &(base_node->id_nodes), next, temp_id_node) {
  475. if (id_node->id == post.id) {
  476. // Execute id level handlers
  477. SLIST_FOREACH_SAFE(handler, &(id_node->handlers), next, temp_handler) {
  478. handler_execute(loop, handler, post);
  479. exec |= true;
  480. }
  481. // Skip to next base node
  482. break;
  483. }
  484. }
  485. }
  486. }
  487. }
  488. esp_event_base_t base = post.base;
  489. int32_t id = post.id;
  490. post_instance_delete(&post);
  491. if (ticks_to_run != portMAX_DELAY) {
  492. end = xTaskGetTickCount();
  493. remaining_ticks -= end - marker;
  494. // If the ticks to run expired, return to the caller
  495. if (remaining_ticks <= 0) {
  496. xSemaphoreGiveRecursive(loop->mutex);
  497. break;
  498. } else {
  499. marker = end;
  500. }
  501. }
  502. loop->running_task = NULL;
  503. xSemaphoreGiveRecursive(loop->mutex);
  504. if (!exec) {
  505. // No handlers were registered, not even loop/base level handlers
  506. ESP_LOGD(TAG, "no handlers have been registered for event %s:%d posted to loop %p", base, id, event_loop);
  507. }
  508. }
  509. return ESP_OK;
  510. }
  511. esp_err_t esp_event_loop_delete(esp_event_loop_handle_t event_loop)
  512. {
  513. assert(event_loop);
  514. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  515. SemaphoreHandle_t loop_mutex = loop->mutex;
  516. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  517. SemaphoreHandle_t loop_profiling_mutex = loop->profiling_mutex;
  518. #endif
  519. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  520. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  521. xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
  522. portENTER_CRITICAL(&s_event_loops_spinlock);
  523. SLIST_REMOVE(&s_event_loops, loop, esp_event_loop_instance, next);
  524. portEXIT_CRITICAL(&s_event_loops_spinlock);
  525. #endif
  526. // Delete the task if it was created
  527. if (loop->task != NULL) {
  528. vTaskDelete(loop->task);
  529. }
  530. // Remove all registered events and handlers in the loop
  531. esp_event_loop_node_t *it, *temp;
  532. SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
  533. loop_node_remove_all_handler(it);
  534. SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
  535. free(it);
  536. }
  537. // Drop existing posts on the queue
  538. esp_event_post_instance_t post;
  539. while(xQueueReceive(loop->queue, &post, 0) == pdTRUE) {
  540. post_instance_delete(&post);
  541. }
  542. // Cleanup loop
  543. vQueueDelete(loop->queue);
  544. free(loop);
  545. // Free loop mutex before deleting
  546. xSemaphoreGiveRecursive(loop_mutex);
  547. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  548. xSemaphoreGive(loop_profiling_mutex);
  549. vSemaphoreDelete(loop_profiling_mutex);
  550. #endif
  551. vSemaphoreDelete(loop_mutex);
  552. ESP_LOGD(TAG, "deleted loop %p", (void*) event_loop);
  553. return ESP_OK;
  554. }
  555. esp_err_t esp_event_handler_register_with_internal(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  556. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg,
  557. esp_event_handler_instance_context_t** handler_ctx_arg, bool legacy)
  558. {
  559. assert(event_loop);
  560. assert(event_handler);
  561. if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
  562. ESP_LOGE(TAG, "registering to any event base with specific id unsupported");
  563. return ESP_ERR_INVALID_ARG;
  564. }
  565. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  566. if (event_base == ESP_EVENT_ANY_BASE) {
  567. event_base = esp_event_any_base;
  568. }
  569. esp_err_t err = ESP_OK;
  570. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  571. esp_event_loop_node_t *loop_node = NULL, *last_loop_node = NULL;
  572. SLIST_FOREACH(loop_node, &(loop->loop_nodes), next) {
  573. last_loop_node = loop_node;
  574. }
  575. bool is_loop_level_handler = (event_base == esp_event_any_base) && (event_id == ESP_EVENT_ANY_ID);
  576. if (!last_loop_node ||
  577. (last_loop_node && !SLIST_EMPTY(&(last_loop_node->base_nodes)) && is_loop_level_handler)) {
  578. loop_node = (esp_event_loop_node_t*) calloc(1, sizeof(*loop_node));
  579. if (!loop_node) {
  580. ESP_LOGE(TAG, "alloc for new loop node failed");
  581. err = ESP_ERR_NO_MEM;
  582. goto on_err;
  583. }
  584. SLIST_INIT(&(loop_node->handlers));
  585. SLIST_INIT(&(loop_node->base_nodes));
  586. err = loop_node_add_handler(loop_node, event_base, event_id, event_handler, event_handler_arg, handler_ctx_arg, legacy);
  587. if (err == ESP_OK) {
  588. if (!last_loop_node) {
  589. SLIST_INSERT_HEAD(&(loop->loop_nodes), loop_node, next);
  590. }
  591. else {
  592. SLIST_INSERT_AFTER(last_loop_node, loop_node, next);
  593. }
  594. } else {
  595. free(loop_node);
  596. }
  597. }
  598. else {
  599. err = loop_node_add_handler(last_loop_node, event_base, event_id, event_handler, event_handler_arg, handler_ctx_arg, legacy);
  600. }
  601. on_err:
  602. xSemaphoreGiveRecursive(loop->mutex);
  603. return err;
  604. }
  605. esp_err_t esp_event_handler_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  606. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg)
  607. {
  608. return esp_event_handler_register_with_internal(event_loop, event_base, event_id, event_handler, event_handler_arg, NULL, true);
  609. }
  610. esp_err_t esp_event_handler_instance_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  611. int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg,
  612. esp_event_handler_instance_t* handler_ctx_arg)
  613. {
  614. return esp_event_handler_register_with_internal(event_loop, event_base, event_id, event_handler, event_handler_arg, (esp_event_handler_instance_context_t**) handler_ctx_arg, false);
  615. }
  616. esp_err_t esp_event_handler_unregister_with_internal(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  617. int32_t event_id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
  618. {
  619. assert(event_loop);
  620. assert(handler_ctx);
  621. if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
  622. ESP_LOGE(TAG, "unregistering to any event base with specific id unsupported");
  623. return ESP_FAIL;
  624. }
  625. if (event_base == ESP_EVENT_ANY_BASE) {
  626. event_base = esp_event_any_base;
  627. }
  628. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  629. xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
  630. esp_event_loop_node_t *it, *temp;
  631. SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
  632. esp_err_t res = loop_node_remove_handler(it, event_base, event_id, handler_ctx, legacy);
  633. if (res == ESP_OK && SLIST_EMPTY(&(it->base_nodes)) && SLIST_EMPTY(&(it->handlers))) {
  634. SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
  635. free(it);
  636. break;
  637. }
  638. }
  639. xSemaphoreGiveRecursive(loop->mutex);
  640. return ESP_OK;
  641. }
  642. esp_err_t esp_event_handler_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  643. int32_t event_id, esp_event_handler_t event_handler)
  644. {
  645. esp_event_handler_instance_context_t local_handler_ctx;
  646. local_handler_ctx.handler = event_handler;
  647. local_handler_ctx.arg = NULL;
  648. return esp_event_handler_unregister_with_internal(event_loop, event_base, event_id, &local_handler_ctx, true);
  649. }
  650. esp_err_t esp_event_handler_instance_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
  651. int32_t event_id, esp_event_handler_instance_t handler_ctx_arg)
  652. {
  653. if (!handler_ctx_arg) return ESP_ERR_INVALID_ARG;
  654. return esp_event_handler_unregister_with_internal(event_loop, event_base, event_id, (esp_event_handler_instance_context_t*) handler_ctx_arg, false);
  655. }
  656. esp_err_t esp_event_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
  657. const void* event_data, size_t event_data_size, TickType_t ticks_to_wait)
  658. {
  659. assert(event_loop);
  660. if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
  661. return ESP_ERR_INVALID_ARG;
  662. }
  663. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  664. esp_event_post_instance_t post;
  665. memset((void*)(&post), 0, sizeof(post));
  666. if (event_data != NULL && event_data_size != 0) {
  667. // Make persistent copy of event data on heap.
  668. void* event_data_copy = calloc(1, event_data_size);
  669. if (event_data_copy == NULL) {
  670. return ESP_ERR_NO_MEM;
  671. }
  672. memcpy(event_data_copy, event_data, event_data_size);
  673. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  674. post.data.ptr = event_data_copy;
  675. post.data_allocated = true;
  676. post.data_set = true;
  677. #else
  678. post.data = event_data_copy;
  679. #endif
  680. }
  681. post.base = event_base;
  682. post.id = event_id;
  683. BaseType_t result = pdFALSE;
  684. // Find the task that currently executes the loop. It is safe to query loop->task since it is
  685. // not mutated since loop creation. ENSURE THIS REMAINS TRUE.
  686. if (loop->task == NULL) {
  687. // The loop has no dedicated task. Find out what task is currently running it.
  688. result = xSemaphoreTakeRecursive(loop->mutex, ticks_to_wait);
  689. if (result == pdTRUE) {
  690. if (loop->running_task != xTaskGetCurrentTaskHandle()) {
  691. xSemaphoreGiveRecursive(loop->mutex);
  692. result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
  693. } else {
  694. xSemaphoreGiveRecursive(loop->mutex);
  695. result = xQueueSendToBack(loop->queue, &post, 0);
  696. }
  697. }
  698. } else {
  699. // The loop has a dedicated task.
  700. if (loop->task != xTaskGetCurrentTaskHandle()) {
  701. result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
  702. } else {
  703. result = xQueueSendToBack(loop->queue, &post, 0);
  704. }
  705. }
  706. if (result != pdTRUE) {
  707. post_instance_delete(&post);
  708. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  709. atomic_fetch_add(&loop->events_dropped, 1);
  710. #endif
  711. return ESP_ERR_TIMEOUT;
  712. }
  713. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  714. atomic_fetch_add(&loop->events_recieved, 1);
  715. #endif
  716. return ESP_OK;
  717. }
  718. #if CONFIG_ESP_EVENT_POST_FROM_ISR
  719. esp_err_t esp_event_isr_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
  720. const void* event_data, size_t event_data_size, BaseType_t* task_unblocked)
  721. {
  722. assert(event_loop);
  723. if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
  724. return ESP_ERR_INVALID_ARG;
  725. }
  726. esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
  727. esp_event_post_instance_t post;
  728. memset((void*)(&post), 0, sizeof(post));
  729. if (event_data_size > sizeof(post.data.val)) {
  730. return ESP_ERR_INVALID_ARG;
  731. }
  732. if (event_data != NULL && event_data_size != 0) {
  733. memcpy((void*)(&(post.data.val)), event_data, event_data_size);
  734. post.data_allocated = false;
  735. post.data_set = true;
  736. }
  737. post.base = event_base;
  738. post.id = event_id;
  739. BaseType_t result = pdFALSE;
  740. // Post the event from an ISR,
  741. result = xQueueSendToBackFromISR(loop->queue, &post, task_unblocked);
  742. if (result != pdTRUE) {
  743. post_instance_delete(&post);
  744. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  745. atomic_fetch_add(&loop->events_dropped, 1);
  746. #endif
  747. return ESP_FAIL;
  748. }
  749. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  750. atomic_fetch_add(&loop->events_recieved, 1);
  751. #endif
  752. return ESP_OK;
  753. }
  754. #endif
  755. esp_err_t esp_event_dump(FILE* file)
  756. {
  757. #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
  758. assert(file);
  759. esp_event_loop_instance_t* loop_it;
  760. esp_event_loop_node_t *loop_node_it;
  761. esp_event_base_node_t* base_node_it;
  762. esp_event_id_node_t* id_node_it;
  763. esp_event_handler_node_t* handler_it;
  764. // Allocate memory for printing
  765. int sz = esp_event_dump_prepare();
  766. char* buf = calloc(sz, sizeof(char));
  767. char* dst = buf;
  768. char id_str_buf[20];
  769. // Print info to buffer
  770. portENTER_CRITICAL(&s_event_loops_spinlock);
  771. SLIST_FOREACH(loop_it, &s_event_loops, next) {
  772. uint32_t events_recieved, events_dropped;
  773. events_recieved = atomic_load(&loop_it->events_recieved);
  774. events_dropped = atomic_load(&loop_it->events_dropped);
  775. PRINT_DUMP_INFO(dst, sz, LOOP_DUMP_FORMAT, loop_it, loop_it->task != NULL ? loop_it->name : "none" ,
  776. events_recieved, events_dropped);
  777. int sz_bak = sz;
  778. SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
  779. SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
  780. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, "ESP_EVENT_ANY_BASE",
  781. "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
  782. }
  783. SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
  784. SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
  785. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, base_node_it->base ,
  786. "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
  787. }
  788. SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
  789. SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
  790. memset(id_str_buf, 0, sizeof(id_str_buf));
  791. snprintf(id_str_buf, sizeof(id_str_buf), "%d", id_node_it->id);
  792. PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, base_node_it->base ,
  793. id_str_buf, handler_it->invoked, handler_it->time);
  794. }
  795. }
  796. }
  797. }
  798. // No handlers registered for this loop
  799. if (sz == sz_bak) {
  800. PRINT_DUMP_INFO(dst, sz, " NO HANDLERS REGISTERED\n");
  801. }
  802. }
  803. portEXIT_CRITICAL(&s_event_loops_spinlock);
  804. // Print the contents of the buffer to the file
  805. fprintf(file, buf);
  806. // Free the allocated buffer
  807. free(buf);
  808. #endif
  809. return ESP_OK;
  810. }