intr_alloc.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdint.h>
  7. #include <stdio.h>
  8. #include <stdlib.h>
  9. #include <stdbool.h>
  10. #include <string.h>
  11. #include <esp_types.h>
  12. #include <limits.h>
  13. #include <assert.h>
  14. #include "sdkconfig.h"
  15. #include "freertos/FreeRTOS.h"
  16. #include "freertos/task.h"
  17. #include "esp_err.h"
  18. #include "esp_log.h"
  19. #include "esp_memory_utils.h"
  20. #include "esp_intr_alloc.h"
  21. #include "esp_attr.h"
  22. #include "esp_cpu.h"
  23. #include "esp_private/rtc_ctrl.h"
  24. #include "soc/interrupts.h"
  25. #include "soc/soc_caps.h"
  26. #include "sdkconfig.h"
  27. #if !CONFIG_FREERTOS_UNICORE
  28. #include "esp_ipc.h"
  29. #endif
  30. /* For targets that uses a CLIC as their interrupt controller, CPU_INT_LINES_COUNT represents the external interrupts count */
  31. #define CPU_INT_LINES_COUNT 32
  32. static const char* TAG = "intr_alloc";
  33. #define ETS_INTERNAL_TIMER0_INTR_NO 6
  34. #define ETS_INTERNAL_TIMER1_INTR_NO 15
  35. #define ETS_INTERNAL_TIMER2_INTR_NO 16
  36. #define ETS_INTERNAL_SW0_INTR_NO 7
  37. #define ETS_INTERNAL_SW1_INTR_NO 29
  38. #define ETS_INTERNAL_PROFILING_INTR_NO 11
  39. /*
  40. Define this to debug the choices made when allocating the interrupt. This leads to much debugging
  41. output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
  42. being triggered, that is why it is separate from the normal LOG* scheme.
  43. */
  44. // #define DEBUG_INT_ALLOC_DECISIONS
  45. #ifdef DEBUG_INT_ALLOC_DECISIONS
  46. # define ALCHLOG(...) ESP_EARLY_LOGD(TAG, __VA_ARGS__)
  47. #else
  48. # define ALCHLOG(...) do {} while (0)
  49. #endif
  50. typedef struct shared_vector_desc_t shared_vector_desc_t;
  51. typedef struct vector_desc_t vector_desc_t;
  52. struct shared_vector_desc_t {
  53. int disabled: 1;
  54. int source: 8;
  55. volatile uint32_t *statusreg;
  56. uint32_t statusmask;
  57. intr_handler_t isr;
  58. void *arg;
  59. shared_vector_desc_t *next;
  60. };
  61. #define VECDESC_FL_RESERVED (1<<0)
  62. #define VECDESC_FL_INIRAM (1<<1)
  63. #define VECDESC_FL_SHARED (1<<2)
  64. #define VECDESC_FL_NONSHARED (1<<3)
  65. //Pack using bitfields for better memory use
  66. struct vector_desc_t {
  67. int flags: 16; //OR of VECDESC_FL_* defines
  68. unsigned int cpu: 1;
  69. unsigned int intno: 5;
  70. int source: 8; //Interrupt mux flags, used when not shared
  71. shared_vector_desc_t *shared_vec_info; //used when VECDESC_FL_SHARED
  72. vector_desc_t *next;
  73. };
  74. /** Interrupt handler associated data structure */
  75. typedef struct intr_handle_data_t {
  76. vector_desc_t *vector_desc;
  77. shared_vector_desc_t *shared_vector_desc;
  78. } intr_handle_data_t;
  79. typedef struct non_shared_isr_arg_t non_shared_isr_arg_t;
  80. struct non_shared_isr_arg_t {
  81. intr_handler_t isr;
  82. void *isr_arg;
  83. int source;
  84. };
  85. //Linked list of vector descriptions, sorted by cpu.intno value
  86. static vector_desc_t *vector_desc_head = NULL;
  87. //This bitmask has an 1 if the int should be disabled when the flash is disabled.
  88. static uint32_t non_iram_int_mask[SOC_CPU_CORES_NUM];
  89. //This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable.
  90. static uint32_t non_iram_int_disabled[SOC_CPU_CORES_NUM];
  91. static bool non_iram_int_disabled_flag[SOC_CPU_CORES_NUM];
  92. static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
  93. //Inserts an item into vector_desc list so that the list is sorted
  94. //with an incrementing cpu.intno value.
  95. static void insert_vector_desc(vector_desc_t *to_insert)
  96. {
  97. vector_desc_t *vd = vector_desc_head;
  98. vector_desc_t *prev = NULL;
  99. while(vd != NULL) {
  100. if (vd->cpu > to_insert->cpu) break;
  101. if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) break;
  102. prev = vd;
  103. vd = vd->next;
  104. }
  105. if ((vector_desc_head == NULL) || (prev == NULL)) {
  106. //First item
  107. to_insert->next = vd;
  108. vector_desc_head = to_insert;
  109. } else {
  110. prev->next = to_insert;
  111. to_insert->next = vd;
  112. }
  113. }
  114. //Returns a vector_desc entry for an intno/cpu, or NULL if none exists.
  115. static vector_desc_t *find_desc_for_int(int intno, int cpu)
  116. {
  117. vector_desc_t *vd = vector_desc_head;
  118. while(vd != NULL) {
  119. if (vd->cpu == cpu && vd->intno == intno) {
  120. break;
  121. }
  122. vd = vd->next;
  123. }
  124. return vd;
  125. }
  126. //Returns a vector_desc entry for an intno/cpu.
  127. //Either returns a preexisting one or allocates a new one and inserts
  128. //it into the list. Returns NULL on malloc fail.
  129. static vector_desc_t *get_desc_for_int(int intno, int cpu)
  130. {
  131. vector_desc_t *vd = find_desc_for_int(intno, cpu);
  132. if (vd == NULL) {
  133. vector_desc_t *newvd = heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
  134. if (newvd == NULL) {
  135. return NULL;
  136. }
  137. memset(newvd, 0, sizeof(vector_desc_t));
  138. newvd->intno = intno;
  139. newvd->cpu = cpu;
  140. insert_vector_desc(newvd);
  141. return newvd;
  142. } else {
  143. return vd;
  144. }
  145. }
  146. //Returns a vector_desc entry for a source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs
  147. static vector_desc_t * find_desc_for_source(int source, int cpu)
  148. {
  149. vector_desc_t *vd = vector_desc_head;
  150. while(vd != NULL) {
  151. if (!(vd->flags & VECDESC_FL_SHARED)) {
  152. if (vd->source == source && cpu == vd->cpu) {
  153. break;
  154. }
  155. } else if (vd->cpu == cpu) {
  156. // check only shared vds for the correct cpu, otherwise skip
  157. bool found = false;
  158. shared_vector_desc_t *svd = vd->shared_vec_info;
  159. assert(svd != NULL);
  160. while(svd) {
  161. if (svd->source == source) {
  162. found = true;
  163. break;
  164. }
  165. svd = svd->next;
  166. }
  167. if (found) {
  168. break;
  169. }
  170. }
  171. vd = vd->next;
  172. }
  173. return vd;
  174. }
  175. esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
  176. {
  177. if (intno>31) {
  178. return ESP_ERR_INVALID_ARG;
  179. }
  180. if (cpu >= SOC_CPU_CORES_NUM) {
  181. return ESP_ERR_INVALID_ARG;
  182. }
  183. portENTER_CRITICAL(&spinlock);
  184. vector_desc_t *vd = get_desc_for_int(intno, cpu);
  185. if (vd == NULL) {
  186. portEXIT_CRITICAL(&spinlock);
  187. return ESP_ERR_NO_MEM;
  188. }
  189. vd->flags = VECDESC_FL_SHARED;
  190. if (is_int_ram) {
  191. vd->flags |= VECDESC_FL_INIRAM;
  192. }
  193. portEXIT_CRITICAL(&spinlock);
  194. return ESP_OK;
  195. }
  196. esp_err_t esp_intr_reserve(int intno, int cpu)
  197. {
  198. if (intno > 31) {
  199. return ESP_ERR_INVALID_ARG;
  200. }
  201. if (cpu >= SOC_CPU_CORES_NUM) {
  202. return ESP_ERR_INVALID_ARG;
  203. }
  204. portENTER_CRITICAL(&spinlock);
  205. vector_desc_t *vd = get_desc_for_int(intno, cpu);
  206. if (vd == NULL) {
  207. portEXIT_CRITICAL(&spinlock);
  208. return ESP_ERR_NO_MEM;
  209. }
  210. vd->flags = VECDESC_FL_RESERVED;
  211. portEXIT_CRITICAL(&spinlock);
  212. return ESP_OK;
  213. }
  214. static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force)
  215. {
  216. //Check if interrupt is not reserved by design
  217. int x = vd->intno;
  218. esp_cpu_intr_desc_t intr_desc;
  219. esp_cpu_intr_get_desc(cpu, x, &intr_desc);
  220. if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
  221. ALCHLOG("....Unusable: reserved");
  222. return false;
  223. }
  224. if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force == -1) {
  225. ALCHLOG("....Unusable: special-purpose int");
  226. return false;
  227. }
  228. #ifndef SOC_CPU_HAS_FLEXIBLE_INTC
  229. //Check if the interrupt priority is acceptable
  230. if (!(flags & (1 << intr_desc.priority))) {
  231. ALCHLOG("....Unusable: incompatible priority");
  232. return false;
  233. }
  234. //check if edge/level type matches what we want
  235. if (((flags & ESP_INTR_FLAG_EDGE) && (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL)) ||
  236. (((!(flags & ESP_INTR_FLAG_EDGE)) && (intr_desc.type == ESP_CPU_INTR_TYPE_EDGE)))) {
  237. ALCHLOG("....Unusable: incompatible trigger type");
  238. return false;
  239. }
  240. #endif
  241. //check if interrupt is reserved at runtime
  242. if (vd->flags & VECDESC_FL_RESERVED) {
  243. ALCHLOG("....Unusable: reserved at runtime.");
  244. return false;
  245. }
  246. //Ints can't be both shared and non-shared.
  247. assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
  248. //check if interrupt already is in use by a non-shared interrupt
  249. if (vd->flags & VECDESC_FL_NONSHARED) {
  250. ALCHLOG("....Unusable: already in (non-shared) use.");
  251. return false;
  252. }
  253. // check shared interrupt flags
  254. if (vd->flags & VECDESC_FL_SHARED) {
  255. if (flags & ESP_INTR_FLAG_SHARED) {
  256. bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
  257. bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
  258. //Bail out if int is shared, but iram property doesn't match what we want.
  259. if ((vd->flags & VECDESC_FL_SHARED) && (desc_in_iram_flag != in_iram_flag)) {
  260. ALCHLOG("....Unusable: shared but iram prop doesn't match");
  261. return false;
  262. }
  263. } else {
  264. //We need an unshared IRQ; can't use shared ones; bail out if this is shared.
  265. ALCHLOG("...Unusable: int is shared, we need non-shared.");
  266. return false;
  267. }
  268. } else if (esp_cpu_intr_has_handler(x)) {
  269. //Check if interrupt already is allocated by esp_cpu_intr_set_handler
  270. ALCHLOG("....Unusable: already allocated");
  271. return false;
  272. }
  273. return true;
  274. }
  275. //Locate a free interrupt compatible with the flags given.
  276. //The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
  277. //When a CPU is forced, the ESP_CPU_INTR_DESC_FLAG_SPECIAL marked interrupts are also accepted.
  278. static int get_available_int(int flags, int cpu, int force, int source)
  279. {
  280. int x;
  281. int best=-1;
  282. int bestPriority=9;
  283. int bestSharedCt=INT_MAX;
  284. //Default vector desc, for vectors not in the linked list
  285. vector_desc_t empty_vect_desc;
  286. memset(&empty_vect_desc, 0, sizeof(vector_desc_t));
  287. //Level defaults to any low/med interrupt
  288. if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
  289. flags |= ESP_INTR_FLAG_LOWMED;
  290. }
  291. ALCHLOG("get_available_int: try to find existing. Cpu: %d, Source: %d", cpu, source);
  292. vector_desc_t *vd = find_desc_for_source(source, cpu);
  293. if (vd) {
  294. // if existing vd found, don't need to search any more.
  295. ALCHLOG("get_available_int: existing vd found. intno: %d", vd->intno);
  296. if ( force != -1 && force != vd->intno ) {
  297. ALCHLOG("get_available_int: intr forced but does not match existing. existing intno: %d, force: %d", vd->intno, force);
  298. } else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
  299. ALCHLOG("get_available_int: existing vd invalid.");
  300. } else {
  301. best = vd->intno;
  302. }
  303. return best;
  304. }
  305. if (force != -1) {
  306. ALCHLOG("get_available_int: try to find force. Cpu: %d, Source: %d, Force: %d", cpu, source, force);
  307. //if force assigned, don't need to search any more.
  308. vd = find_desc_for_int(force, cpu);
  309. if (vd == NULL) {
  310. //if existing vd not found, just check the default state for the intr.
  311. empty_vect_desc.intno = force;
  312. vd = &empty_vect_desc;
  313. }
  314. if (is_vect_desc_usable(vd, flags, cpu, force)) {
  315. best = vd->intno;
  316. } else {
  317. ALCHLOG("get_avalaible_int: forced vd invalid.");
  318. }
  319. return best;
  320. }
  321. ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu);
  322. /* No allocated handlers as well as forced intr, iterate over the 32 possible interrupts */
  323. for (x = 0; x < CPU_INT_LINES_COUNT; x++) {
  324. //Grab the vector_desc for this vector.
  325. vd = find_desc_for_int(x, cpu);
  326. if (vd == NULL) {
  327. empty_vect_desc.intno = x;
  328. vd = &empty_vect_desc;
  329. }
  330. esp_cpu_intr_desc_t intr_desc;
  331. esp_cpu_intr_get_desc(cpu, x, &intr_desc);
  332. ALCHLOG("Int %d reserved %d priority %d %s hasIsr %d",
  333. x, intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD, intr_desc.priority,
  334. intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL? "LEVEL" : "EDGE", esp_cpu_intr_has_handler(x));
  335. if (!is_vect_desc_usable(vd, flags, cpu, force)) {
  336. continue;
  337. }
  338. if (flags & ESP_INTR_FLAG_SHARED) {
  339. //We're allocating a shared int.
  340. //See if int already is used as a shared interrupt.
  341. if (vd->flags & VECDESC_FL_SHARED) {
  342. //We can use this already-marked-as-shared interrupt. Count the already attached isrs in order to see
  343. //how useful it is.
  344. int no = 0;
  345. shared_vector_desc_t *svdesc = vd->shared_vec_info;
  346. while (svdesc != NULL) {
  347. no++;
  348. svdesc = svdesc->next;
  349. }
  350. if (no<bestSharedCt || bestPriority > intr_desc.priority) {
  351. //Seems like this shared vector is both okay and has the least amount of ISRs already attached to it.
  352. best = x;
  353. bestSharedCt = no;
  354. bestPriority = intr_desc.priority;
  355. ALCHLOG("...int %d more usable as a shared int: has %d existing vectors", x, no);
  356. } else {
  357. ALCHLOG("...worse than int %d", best);
  358. }
  359. } else {
  360. if (best == -1) {
  361. //We haven't found a feasible shared interrupt yet. This one is still free and usable, even if
  362. //not marked as shared.
  363. //Remember it in case we don't find any other shared interrupt that qualifies.
  364. if (bestPriority > intr_desc.priority) {
  365. best = x;
  366. bestPriority = intr_desc.priority;
  367. ALCHLOG("...int %d usable as a new shared int", x);
  368. }
  369. } else {
  370. ALCHLOG("...already have a shared int");
  371. }
  372. }
  373. } else {
  374. //Seems this interrupt is feasible. Select it and break out of the loop; no need to search further.
  375. if (bestPriority > intr_desc.priority) {
  376. best = x;
  377. bestPriority = intr_desc.priority;
  378. } else {
  379. ALCHLOG("...worse than int %d", best);
  380. }
  381. }
  382. }
  383. ALCHLOG("get_available_int: using int %d", best);
  384. //Okay, by now we have looked at all potential interrupts and hopefully have selected the best one in best.
  385. return best;
  386. }
  387. //Common shared isr handler. Chain-call all ISRs.
  388. static void IRAM_ATTR shared_intr_isr(void *arg)
  389. {
  390. vector_desc_t *vd = (vector_desc_t*)arg;
  391. shared_vector_desc_t *sh_vec = vd->shared_vec_info;
  392. portENTER_CRITICAL_ISR(&spinlock);
  393. while(sh_vec) {
  394. if (!sh_vec->disabled) {
  395. if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) {
  396. traceISR_ENTER(sh_vec->source + ETS_INTERNAL_INTR_SOURCE_OFF);
  397. sh_vec->isr(sh_vec->arg);
  398. // check if we will return to scheduler or to interrupted task after ISR
  399. if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
  400. traceISR_EXIT();
  401. }
  402. }
  403. }
  404. sh_vec = sh_vec->next;
  405. }
  406. portEXIT_CRITICAL_ISR(&spinlock);
  407. }
  408. #if CONFIG_APPTRACE_SV_ENABLE
  409. //Common non-shared isr handler wrapper.
  410. static void IRAM_ATTR non_shared_intr_isr(void *arg)
  411. {
  412. non_shared_isr_arg_t *ns_isr_arg = (non_shared_isr_arg_t*)arg;
  413. portENTER_CRITICAL_ISR(&spinlock);
  414. traceISR_ENTER(ns_isr_arg->source + ETS_INTERNAL_INTR_SOURCE_OFF);
  415. // FIXME: can we call ISR and check os_task_switch_is_pended() after releasing spinlock?
  416. // when CONFIG_APPTRACE_SV_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
  417. ns_isr_arg->isr(ns_isr_arg->isr_arg);
  418. // check if we will return to scheduler or to interrupted task after ISR
  419. if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
  420. traceISR_EXIT();
  421. }
  422. portEXIT_CRITICAL_ISR(&spinlock);
  423. }
  424. #endif
  425. //We use ESP_EARLY_LOG* here because this can be called before the scheduler is running.
  426. esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusreg, uint32_t intrstatusmask, intr_handler_t handler,
  427. void *arg, intr_handle_t *ret_handle)
  428. {
  429. intr_handle_data_t *ret=NULL;
  430. int force = -1;
  431. ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): checking args", esp_cpu_get_core_id());
  432. //Shared interrupts should be level-triggered.
  433. if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
  434. return ESP_ERR_INVALID_ARG;
  435. }
  436. //You can't set an handler / arg for a non-C-callable interrupt.
  437. if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
  438. return ESP_ERR_INVALID_ARG;
  439. }
  440. //Shared ints should have handler and non-processor-local source
  441. if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source<0)) {
  442. return ESP_ERR_INVALID_ARG;
  443. }
  444. //Statusreg should have a mask
  445. if (intrstatusreg && !intrstatusmask) {
  446. return ESP_ERR_INVALID_ARG;
  447. }
  448. //If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
  449. //ToDo: if we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
  450. //we need to make sure the interrupt is connected to the CPU0.
  451. //CPU1 does not have access to the RTC fast memory through this region.
  452. if ((flags & ESP_INTR_FLAG_IRAM) && handler && !esp_ptr_in_iram(handler) && !esp_ptr_in_rtc_iram_fast(handler)) {
  453. return ESP_ERR_INVALID_ARG;
  454. }
  455. //Default to prio 1 for shared interrupts. Default to prio 1, 2 or 3 for non-shared interrupts.
  456. if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
  457. if (flags & ESP_INTR_FLAG_SHARED) {
  458. flags |= ESP_INTR_FLAG_LEVEL1;
  459. } else {
  460. flags |= ESP_INTR_FLAG_LOWMED;
  461. }
  462. }
  463. ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): Args okay. Resulting flags 0x%X", esp_cpu_get_core_id(), flags);
  464. //Check 'special' interrupt sources. These are tied to one specific interrupt, so we
  465. //have to force get_free_int to only look at that.
  466. if (source == ETS_INTERNAL_TIMER0_INTR_SOURCE) {
  467. force = ETS_INTERNAL_TIMER0_INTR_NO;
  468. }
  469. if (source == ETS_INTERNAL_TIMER1_INTR_SOURCE) {
  470. force = ETS_INTERNAL_TIMER1_INTR_NO;
  471. }
  472. if (source == ETS_INTERNAL_TIMER2_INTR_SOURCE) {
  473. force = ETS_INTERNAL_TIMER2_INTR_NO;
  474. }
  475. if (source == ETS_INTERNAL_SW0_INTR_SOURCE) {
  476. force = ETS_INTERNAL_SW0_INTR_NO;
  477. }
  478. if (source == ETS_INTERNAL_SW1_INTR_SOURCE) {
  479. force = ETS_INTERNAL_SW1_INTR_NO;
  480. }
  481. if (source == ETS_INTERNAL_PROFILING_INTR_SOURCE) {
  482. force = ETS_INTERNAL_PROFILING_INTR_NO;
  483. }
  484. //Allocate a return handle. If we end up not needing it, we'll free it later on.
  485. ret = heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
  486. if (ret == NULL) {
  487. return ESP_ERR_NO_MEM;
  488. }
  489. portENTER_CRITICAL(&spinlock);
  490. uint32_t cpu = esp_cpu_get_core_id();
  491. //See if we can find an interrupt that matches the flags.
  492. int intr = get_available_int(flags, cpu, force, source);
  493. if (intr == -1) {
  494. //None found. Bail out.
  495. portEXIT_CRITICAL(&spinlock);
  496. free(ret);
  497. ESP_LOGE(TAG, "No free interrupt inputs for %s interrupt (flags 0x%X)", esp_isr_names[source], flags);
  498. return ESP_ERR_NOT_FOUND;
  499. }
  500. //Get an int vector desc for int.
  501. vector_desc_t *vd = get_desc_for_int(intr, cpu);
  502. if (vd == NULL) {
  503. portEXIT_CRITICAL(&spinlock);
  504. free(ret);
  505. return ESP_ERR_NO_MEM;
  506. }
  507. //Allocate that int!
  508. if (flags & ESP_INTR_FLAG_SHARED) {
  509. //Populate vector entry and add to linked list.
  510. shared_vector_desc_t *sh_vec = heap_caps_malloc(sizeof(shared_vector_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
  511. if (sh_vec == NULL) {
  512. portEXIT_CRITICAL(&spinlock);
  513. free(ret);
  514. return ESP_ERR_NO_MEM;
  515. }
  516. memset(sh_vec, 0, sizeof(shared_vector_desc_t));
  517. sh_vec->statusreg = (uint32_t*)intrstatusreg;
  518. sh_vec->statusmask = intrstatusmask;
  519. sh_vec->isr = handler;
  520. sh_vec->arg = arg;
  521. sh_vec->next = vd->shared_vec_info;
  522. sh_vec->source = source;
  523. sh_vec->disabled = 0;
  524. vd->shared_vec_info = sh_vec;
  525. vd->flags |= VECDESC_FL_SHARED;
  526. //(Re-)set shared isr handler to new value.
  527. esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)shared_intr_isr, vd);
  528. } else {
  529. //Mark as unusable for other interrupt sources. This is ours now!
  530. vd->flags = VECDESC_FL_NONSHARED;
  531. if (handler) {
  532. #if CONFIG_APPTRACE_SV_ENABLE
  533. non_shared_isr_arg_t *ns_isr_arg = heap_caps_malloc(sizeof(non_shared_isr_arg_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
  534. if (!ns_isr_arg) {
  535. portEXIT_CRITICAL(&spinlock);
  536. free(ret);
  537. return ESP_ERR_NO_MEM;
  538. }
  539. ns_isr_arg->isr = handler;
  540. ns_isr_arg->isr_arg = arg;
  541. ns_isr_arg->source = source;
  542. esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)non_shared_intr_isr, ns_isr_arg);
  543. #else
  544. esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)handler, arg);
  545. #endif
  546. }
  547. if (flags & ESP_INTR_FLAG_EDGE) {
  548. esp_cpu_intr_edge_ack(intr);
  549. }
  550. vd->source = source;
  551. }
  552. if (flags & ESP_INTR_FLAG_IRAM) {
  553. vd->flags |= VECDESC_FL_INIRAM;
  554. non_iram_int_mask[cpu] &= ~(1<<intr);
  555. } else {
  556. vd->flags &= ~VECDESC_FL_INIRAM;
  557. non_iram_int_mask[cpu] |= (1<<intr);
  558. }
  559. if (source>=0) {
  560. esp_rom_route_intr_matrix(cpu, source, intr);
  561. }
  562. //Fill return handle data.
  563. ret->vector_desc = vd;
  564. ret->shared_vector_desc = vd->shared_vec_info;
  565. //Enable int at CPU-level;
  566. ESP_INTR_ENABLE(intr);
  567. //If interrupt has to be started disabled, do that now; ints won't be enabled for real until the end
  568. //of the critical section.
  569. if (flags & ESP_INTR_FLAG_INTRDISABLED) {
  570. esp_intr_disable(ret);
  571. }
  572. #ifdef SOC_CPU_HAS_FLEXIBLE_INTC
  573. //Extract the level from the interrupt passed flags
  574. int level = esp_intr_flags_to_level(flags);
  575. esp_cpu_intr_set_priority(intr, level);
  576. if (flags & ESP_INTR_FLAG_EDGE) {
  577. esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_EDGE);
  578. } else {
  579. esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_LEVEL);
  580. }
  581. #endif
  582. portEXIT_CRITICAL(&spinlock);
  583. //Fill return handle if needed, otherwise free handle.
  584. if (ret_handle != NULL) {
  585. *ret_handle = ret;
  586. } else {
  587. free(ret);
  588. }
  589. ESP_EARLY_LOGD(TAG, "Connected src %d to int %d (cpu %"PRIu32")", source, intr, cpu);
  590. return ESP_OK;
  591. }
  592. esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *arg, intr_handle_t *ret_handle)
  593. {
  594. /*
  595. As an optimization, we can create a table with the possible interrupt status registers and masks for every single
  596. source there is. We can then add code here to look up an applicable value and pass that to the
  597. esp_intr_alloc_intrstatus function.
  598. */
  599. return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
  600. }
  601. esp_err_t IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram)
  602. {
  603. if (!handle) {
  604. return ESP_ERR_INVALID_ARG;
  605. }
  606. vector_desc_t *vd = handle->vector_desc;
  607. if (vd->flags & VECDESC_FL_SHARED) {
  608. return ESP_ERR_INVALID_ARG;
  609. }
  610. portENTER_CRITICAL(&spinlock);
  611. uint32_t mask = (1 << vd->intno);
  612. if (is_in_iram) {
  613. vd->flags |= VECDESC_FL_INIRAM;
  614. non_iram_int_mask[vd->cpu] &= ~mask;
  615. } else {
  616. vd->flags &= ~VECDESC_FL_INIRAM;
  617. non_iram_int_mask[vd->cpu] |= mask;
  618. }
  619. portEXIT_CRITICAL(&spinlock);
  620. return ESP_OK;
  621. }
  622. #if !CONFIG_FREERTOS_UNICORE
  623. static void esp_intr_free_cb(void *arg)
  624. {
  625. (void)esp_intr_free((intr_handle_t)arg);
  626. }
  627. #endif /* !CONFIG_FREERTOS_UNICORE */
  628. esp_err_t esp_intr_free(intr_handle_t handle)
  629. {
  630. bool free_shared_vector=false;
  631. if (!handle) {
  632. return ESP_ERR_INVALID_ARG;
  633. }
  634. #if !CONFIG_FREERTOS_UNICORE
  635. //Assign this routine to the core where this interrupt is allocated on.
  636. if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
  637. esp_err_t ret = esp_ipc_call_blocking(handle->vector_desc->cpu, &esp_intr_free_cb, (void *)handle);
  638. return ret == ESP_OK ? ESP_OK : ESP_FAIL;
  639. }
  640. #endif /* !CONFIG_FREERTOS_UNICORE */
  641. portENTER_CRITICAL(&spinlock);
  642. esp_intr_disable(handle);
  643. if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
  644. //Find and kill the shared int
  645. shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
  646. shared_vector_desc_t *prevsvd = NULL;
  647. assert(svd); //should be something in there for a shared int
  648. while (svd != NULL) {
  649. if (svd == handle->shared_vector_desc) {
  650. //Found it. Now kill it.
  651. if (prevsvd) {
  652. prevsvd->next = svd->next;
  653. } else {
  654. handle->vector_desc->shared_vec_info = svd->next;
  655. }
  656. free(svd);
  657. break;
  658. }
  659. prevsvd = svd;
  660. svd = svd->next;
  661. }
  662. //If nothing left, disable interrupt.
  663. if (handle->vector_desc->shared_vec_info == NULL) {
  664. free_shared_vector = true;
  665. }
  666. ESP_EARLY_LOGV(TAG,
  667. "esp_intr_free: Deleting shared int: %s. Shared int is %s",
  668. svd ? "not found or last one" : "deleted",
  669. free_shared_vector ? "empty now." : "still in use");
  670. }
  671. if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
  672. ESP_EARLY_LOGV(TAG, "esp_intr_free: Disabling int, killing handler");
  673. #if CONFIG_APPTRACE_SV_ENABLE
  674. if (!free_shared_vector) {
  675. void *isr_arg = esp_cpu_intr_get_handler_arg(handle->vector_desc->intno);
  676. if (isr_arg) {
  677. free(isr_arg);
  678. }
  679. }
  680. #endif
  681. //Reset to normal handler:
  682. esp_cpu_intr_set_handler(handle->vector_desc->intno, NULL, (void*)((int)handle->vector_desc->intno));
  683. //Theoretically, we could free the vector_desc... not sure if that's worth the few bytes of memory
  684. //we save.(We can also not use the same exit path for empty shared ints anymore if we delete
  685. //the desc.) For now, just mark it as free.
  686. handle->vector_desc->flags &= ~(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED|VECDESC_FL_SHARED);
  687. handle->vector_desc->source = ETS_INTERNAL_UNUSED_INTR_SOURCE;
  688. //Also kill non_iram mask bit.
  689. non_iram_int_mask[handle->vector_desc->cpu] &= ~(1<<(handle->vector_desc->intno));
  690. }
  691. portEXIT_CRITICAL(&spinlock);
  692. free(handle);
  693. return ESP_OK;
  694. }
  695. int esp_intr_get_intno(intr_handle_t handle)
  696. {
  697. return handle->vector_desc->intno;
  698. }
  699. int esp_intr_get_cpu(intr_handle_t handle)
  700. {
  701. return handle->vector_desc->cpu;
  702. }
  703. /*
  704. Interrupt disabling strategy:
  705. If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a non-connected
  706. interrupt. If the source is <0 (meaning an internal, per-cpu interrupt), we disable it using ESP_INTR_DISABLE.
  707. This allows us to, for the muxed CPUs, disable an int from the other core. It also allows disabling shared
  708. interrupts.
  709. */
  710. //Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29 cause the interrupt to effectively be disabled.
  711. #define INT_MUX_DISABLED_INTNO 6
  712. esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
  713. {
  714. if (!handle) {
  715. return ESP_ERR_INVALID_ARG;
  716. }
  717. portENTER_CRITICAL_SAFE(&spinlock);
  718. int source;
  719. if (handle->shared_vector_desc) {
  720. handle->shared_vector_desc->disabled = 0;
  721. source=handle->shared_vector_desc->source;
  722. } else {
  723. source=handle->vector_desc->source;
  724. }
  725. if (source >= 0) {
  726. //Disabled using int matrix; re-connect to enable
  727. esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, handle->vector_desc->intno);
  728. } else {
  729. //Re-enable using cpu int ena reg
  730. if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
  731. portEXIT_CRITICAL_SAFE(&spinlock);
  732. return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
  733. }
  734. ESP_INTR_ENABLE(handle->vector_desc->intno);
  735. }
  736. portEXIT_CRITICAL_SAFE(&spinlock);
  737. return ESP_OK;
  738. }
  739. esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
  740. {
  741. if (handle == NULL) {
  742. return ESP_ERR_INVALID_ARG;
  743. }
  744. portENTER_CRITICAL_SAFE(&spinlock);
  745. int source;
  746. bool disabled = true;
  747. if (handle->shared_vector_desc) {
  748. handle->shared_vector_desc->disabled = 1;
  749. source=handle->shared_vector_desc->source;
  750. shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
  751. assert(svd != NULL);
  752. while(svd) {
  753. if (svd->source == source && !svd->disabled) {
  754. disabled = false;
  755. break;
  756. }
  757. svd = svd->next;
  758. }
  759. } else {
  760. source=handle->vector_desc->source;
  761. }
  762. if (source >= 0) {
  763. if (disabled) {
  764. //Disable using int matrix
  765. esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
  766. }
  767. } else {
  768. //Disable using per-cpu regs
  769. if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
  770. portEXIT_CRITICAL_SAFE(&spinlock);
  771. return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
  772. }
  773. ESP_INTR_DISABLE(handle->vector_desc->intno);
  774. }
  775. portEXIT_CRITICAL_SAFE(&spinlock);
  776. return ESP_OK;
  777. }
  778. void IRAM_ATTR esp_intr_noniram_disable(void)
  779. {
  780. portENTER_CRITICAL_SAFE(&spinlock);
  781. uint32_t oldint;
  782. uint32_t cpu = esp_cpu_get_core_id();
  783. uint32_t non_iram_ints = non_iram_int_mask[cpu];
  784. if (non_iram_int_disabled_flag[cpu]) {
  785. abort();
  786. }
  787. non_iram_int_disabled_flag[cpu] = true;
  788. oldint = esp_cpu_intr_get_enabled_mask();
  789. esp_cpu_intr_disable(non_iram_ints);
  790. // Disable the RTC bit which don't want to be put in IRAM.
  791. rtc_isr_noniram_disable(cpu);
  792. // Save disabled ints
  793. non_iram_int_disabled[cpu] = oldint & non_iram_ints;
  794. portEXIT_CRITICAL_SAFE(&spinlock);
  795. }
  796. void IRAM_ATTR esp_intr_noniram_enable(void)
  797. {
  798. portENTER_CRITICAL_SAFE(&spinlock);
  799. uint32_t cpu = esp_cpu_get_core_id();
  800. int non_iram_ints = non_iram_int_disabled[cpu];
  801. if (!non_iram_int_disabled_flag[cpu]) {
  802. abort();
  803. }
  804. non_iram_int_disabled_flag[cpu] = false;
  805. esp_cpu_intr_enable(non_iram_ints);
  806. rtc_isr_noniram_enable(cpu);
  807. portEXIT_CRITICAL_SAFE(&spinlock);
  808. }
  809. //These functions are provided in ROM, but the ROM-based functions use non-multicore-capable
  810. //virtualized interrupt levels. Thus, we disable them in the ld file and provide working
  811. //equivalents here.
  812. void IRAM_ATTR ets_isr_unmask(uint32_t mask) {
  813. esp_cpu_intr_enable(mask);
  814. }
  815. void IRAM_ATTR ets_isr_mask(uint32_t mask) {
  816. esp_cpu_intr_disable(mask);
  817. }
  818. void IRAM_ATTR esp_intr_enable_source(int inum)
  819. {
  820. esp_cpu_intr_enable(1 << inum);
  821. }
  822. void IRAM_ATTR esp_intr_disable_source(int inum)
  823. {
  824. esp_cpu_intr_disable(1 << inum);
  825. }
  826. esp_err_t esp_intr_dump(FILE *stream)
  827. {
  828. if (stream == NULL) {
  829. stream = stdout;
  830. }
  831. #ifdef CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
  832. const int cpu_num = 1;
  833. #else
  834. const int cpu_num = SOC_CPU_CORES_NUM;
  835. #endif
  836. int general_use_ints_free = 0;
  837. int shared_ints = 0;
  838. for (int cpu = 0; cpu < cpu_num; ++cpu) {
  839. fprintf(stream, "CPU %d interrupt status:\n", cpu);
  840. fprintf(stream, " Int Level Type Status\n");
  841. for (int i_num = 0; i_num < CPU_INT_LINES_COUNT; ++i_num) {
  842. fprintf(stream, " %2d ", i_num);
  843. esp_cpu_intr_desc_t intr_desc;
  844. esp_cpu_intr_get_desc(cpu, i_num, &intr_desc);
  845. bool is_general_use = true;
  846. vector_desc_t *vd = find_desc_for_int(i_num, cpu);
  847. #ifndef SOC_CPU_HAS_FLEXIBLE_INTC
  848. fprintf(stream, " %d %s ",
  849. intr_desc.priority,
  850. intr_desc.type == ESP_CPU_INTR_TYPE_EDGE ? "Edge " : "Level");
  851. is_general_use = (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL) && (intr_desc.priority <= XCHAL_EXCM_LEVEL);
  852. #else // SOC_CPU_HAS_FLEXIBLE_INTC
  853. if (vd == NULL) {
  854. fprintf(stream, " * * ");
  855. } else {
  856. // esp_cpu_intr_get_* functions need to be extended with cpu parameter.
  857. // Showing info for the current cpu only, in the meantime.
  858. if (esp_cpu_get_core_id() == cpu) {
  859. fprintf(stream, " %d %s ",
  860. esp_cpu_intr_get_priority(i_num),
  861. esp_cpu_intr_get_type(i_num) == ESP_CPU_INTR_TYPE_EDGE ? "Edge " : "Level");
  862. } else {
  863. fprintf(stream, " ? ? ");
  864. }
  865. }
  866. #endif // SOC_CPU_HAS_FLEXIBLE_INTC
  867. if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
  868. fprintf(stream, "Reserved");
  869. } else if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL) {
  870. fprintf(stream, "CPU-internal");
  871. } else {
  872. if (vd == NULL || (vd->flags & (VECDESC_FL_RESERVED | VECDESC_FL_NONSHARED | VECDESC_FL_SHARED)) == 0) {
  873. fprintf(stream, "Free");
  874. if (is_general_use) {
  875. ++general_use_ints_free;
  876. } else {
  877. fprintf(stream, " (not general-use)");
  878. }
  879. } else if (vd->flags & VECDESC_FL_RESERVED) {
  880. fprintf(stream, "Reserved (run-time)");
  881. } else if (vd->flags & VECDESC_FL_NONSHARED) {
  882. fprintf(stream, "Used: %s", esp_isr_names[vd->source]);
  883. } else if (vd->flags & VECDESC_FL_SHARED) {
  884. fprintf(stream, "Shared: ");
  885. for (shared_vector_desc_t *svd = vd->shared_vec_info; svd != NULL; svd = svd->next) {
  886. fprintf(stream, "%s ", esp_isr_names[svd->source]);
  887. }
  888. ++shared_ints;
  889. } else {
  890. fprintf(stream, "Unknown, flags = 0x%x", vd->flags);
  891. }
  892. }
  893. fprintf(stream, "\n");
  894. }
  895. }
  896. fprintf(stream, "Interrupts available for general use: %d\n", general_use_ints_free);
  897. fprintf(stream, "Shared interrupts: %d\n", shared_ints);
  898. return ESP_OK;
  899. }