dedic_gpio.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /*
  2. * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. // #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
  7. #include <stdlib.h>
  8. #include <string.h>
  9. #include <sys/lock.h>
  10. #include "sdkconfig.h"
  11. #include "esp_compiler.h"
  12. #include "esp_heap_caps.h"
  13. #include "esp_intr_alloc.h"
  14. #include "esp_log.h"
  15. #include "esp_check.h"
  16. #include "soc/soc_caps.h"
  17. #include "soc/gpio_periph.h"
  18. #include "soc/io_mux_reg.h"
  19. #include "hal/cpu_hal.h"
  20. #include "hal/cpu_ll.h"
  21. #include "hal/gpio_hal.h"
  22. #include "driver/periph_ctrl.h"
  23. #include "esp_rom_gpio.h"
  24. #include "freertos/FreeRTOS.h"
  25. #include "driver/dedic_gpio.h"
  26. #include "soc/dedic_gpio_periph.h"
  27. #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  28. #include "soc/dedic_gpio_struct.h"
  29. #include "hal/dedic_gpio_ll.h"
  30. #endif
  31. static const char *TAG = "dedic_gpio";
  32. typedef struct dedic_gpio_platform_t dedic_gpio_platform_t;
  33. typedef struct dedic_gpio_bundle_t dedic_gpio_bundle_t;
  34. // Dedicated GPIO driver platform, GPIO bundles will be installed onto it
  35. static dedic_gpio_platform_t *s_platform[SOC_CPU_CORES_NUM];
  36. // platform level mutex lock
  37. static _lock_t s_platform_mutexlock[SOC_CPU_CORES_NUM];
  38. struct dedic_gpio_platform_t {
  39. portMUX_TYPE spinlock; // Spinlock, stop GPIO channels from accessing common resource concurrently
  40. uint32_t out_occupied_mask; // mask of output channels that already occupied
  41. uint32_t in_occupied_mask; // mask of input channels that already occupied
  42. #if SOC_DEDIC_GPIO_HAS_INTERRUPT
  43. intr_handle_t intr_hdl; // interrupt handle
  44. dedic_gpio_isr_callback_t cbs[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // array of callback function for input channel
  45. void *cb_args[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // array of callback arguments for input channel
  46. dedic_gpio_bundle_t *in_bundles[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // which bundle belongs to for input channel
  47. #endif
  48. #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  49. dedic_dev_t *dev;
  50. #endif
  51. };
  52. struct dedic_gpio_bundle_t {
  53. uint32_t core_id; // CPU core ID, a GPIO bundle must be installed to a specific CPU core
  54. uint32_t out_mask; // mask of output channels in the bank
  55. uint32_t in_mask; // mask of input channels in the bank
  56. uint32_t out_offset; // offset in the bank (seen from output channel)
  57. uint32_t in_offset; // offset in the bank (seen from input channel)
  58. size_t nr_gpio; // number of GPIOs in the gpio_array
  59. int gpio_array[]; // array of GPIO numbers (configured by user)
  60. };
  61. static esp_err_t dedic_gpio_build_platform(uint32_t core_id)
  62. {
  63. esp_err_t ret = ESP_OK;
  64. if (!s_platform[core_id]) {
  65. // prevent building platform concurrently
  66. _lock_acquire(&s_platform_mutexlock[core_id]);
  67. if (!s_platform[core_id]) {
  68. s_platform[core_id] = calloc(1, sizeof(dedic_gpio_platform_t));
  69. if (s_platform[core_id]) {
  70. // initialize platfrom members
  71. s_platform[core_id]->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  72. #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  73. s_platform[core_id]->dev = &DEDIC_GPIO;
  74. #endif // SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  75. #if !SOC_DEDIC_PERIPH_AUTO_ENABLE
  76. periph_module_enable(dedic_gpio_periph_signals.module); // enable APB clock to peripheral
  77. #endif // !SOC_DEDIC_PERIPH_AUTO_ENABLE
  78. }
  79. }
  80. _lock_release(&s_platform_mutexlock[core_id]);
  81. ESP_GOTO_ON_FALSE(s_platform[core_id], ESP_ERR_NO_MEM, err, TAG, "no mem for s_platform[%d]", core_id);
  82. ESP_LOGD(TAG, "build platform on core[%d] at %p", core_id, s_platform);
  83. }
  84. err:
  85. return ret;
  86. }
  87. static void dedic_gpio_break_platform(uint32_t core_id)
  88. {
  89. if (s_platform[core_id]) {
  90. // prevent breaking platform concurrently
  91. _lock_acquire(&s_platform_mutexlock[core_id]);
  92. if (s_platform[core_id]) {
  93. free(s_platform[core_id]);
  94. s_platform[core_id] = NULL;
  95. #if !SOC_DEDIC_PERIPH_AUTO_ENABLE
  96. periph_module_disable(dedic_gpio_periph_signals.module); // disable module if no GPIO channel is being used
  97. #endif // !SOC_DEDIC_PERIPH_AUTO_ENABLE
  98. }
  99. _lock_release(&s_platform_mutexlock[core_id]);
  100. }
  101. }
  102. #if SOC_DEDIC_GPIO_HAS_INTERRUPT
  103. static void dedic_gpio_default_isr(void *arg)
  104. {
  105. bool need_yield = false;
  106. dedic_gpio_platform_t *platform = (dedic_gpio_platform_t *)arg;
  107. // get and clear interrupt status
  108. portENTER_CRITICAL_ISR(&platform->spinlock);
  109. uint32_t status = dedic_gpio_ll_get_interrupt_status(platform->dev);
  110. dedic_gpio_ll_clear_interrupt_status(platform->dev, status);
  111. portEXIT_CRITICAL_ISR(&platform->spinlock);
  112. // handle dedicated channel one by one
  113. while (status) {
  114. uint32_t channel = __builtin_ffs(status) - 1; // get dedicated channel number which triggered the interrupt
  115. if (platform->cbs[channel]) {
  116. if (platform->cbs[channel](platform->in_bundles[channel], channel - platform->in_bundles[channel]->in_offset, platform->cb_args[channel])) {
  117. need_yield = true; // note that we need to yield at the end of isr
  118. }
  119. }
  120. status = status & (status - 1); // clear the right most bit '1'
  121. }
  122. if (need_yield) {
  123. portYIELD_FROM_ISR();
  124. }
  125. }
  126. static esp_err_t dedic_gpio_install_interrupt(uint32_t core_id)
  127. {
  128. esp_err_t ret = ESP_OK;
  129. if (!s_platform[core_id]->intr_hdl) {
  130. // prevent install interrupt concurrently
  131. _lock_acquire(&s_platform_mutexlock[core_id]);
  132. if (!s_platform[core_id]->intr_hdl) {
  133. int isr_flags = 0;
  134. ret = esp_intr_alloc(dedic_gpio_periph_signals.irq, isr_flags, dedic_gpio_default_isr, s_platform[core_id], &s_platform[core_id]->intr_hdl);
  135. // clear pending interrupt
  136. uint32_t status = dedic_gpio_ll_get_interrupt_status(s_platform[core_id]->dev);
  137. dedic_gpio_ll_clear_interrupt_status(s_platform[core_id]->dev, status);
  138. }
  139. _lock_release(&s_platform_mutexlock[core_id]);
  140. ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
  141. }
  142. err:
  143. return ret;
  144. }
  145. static void dedic_gpio_uninstall_interrupt(uint32_t core_id)
  146. {
  147. if (s_platform[core_id]->intr_hdl) {
  148. // prevent uninstall interrupt concurrently
  149. _lock_acquire(&s_platform_mutexlock[core_id]);
  150. if (s_platform[core_id]->intr_hdl) {
  151. esp_intr_free(s_platform[core_id]->intr_hdl);
  152. s_platform[core_id]->intr_hdl = NULL;
  153. // disable all interrupt
  154. dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, ~0UL, false);
  155. }
  156. _lock_release(&s_platform_mutexlock[core_id]);
  157. }
  158. }
  159. static void dedic_gpio_set_interrupt(uint32_t core_id, uint32_t channel, dedic_gpio_intr_type_t type)
  160. {
  161. dedic_gpio_ll_set_interrupt_type(s_platform[core_id]->dev, channel, type);
  162. if (type != DEDIC_GPIO_INTR_NONE) {
  163. dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, true);
  164. } else {
  165. dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, false);
  166. }
  167. }
  168. #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT
  169. esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_gpio_bundle_handle_t *ret_bundle)
  170. {
  171. esp_err_t ret = ESP_OK;
  172. dedic_gpio_bundle_t *bundle = NULL;
  173. uint32_t out_mask = 0;
  174. uint32_t in_mask = 0;
  175. uint32_t core_id = cpu_hal_get_core_id(); // dedicated GPIO will be binded to the CPU who invokes this API
  176. ESP_GOTO_ON_FALSE(config && ret_bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  177. ESP_GOTO_ON_FALSE(config->gpio_array && config->array_size > 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid GPIO array or size");
  178. ESP_GOTO_ON_FALSE(config->flags.in_en || config->flags.out_en, ESP_ERR_INVALID_ARG, err, TAG, "no input/output mode specified");
  179. // lazy install s_platform[core_id]
  180. ESP_GOTO_ON_ERROR(dedic_gpio_build_platform(core_id), err, TAG, "build platform %d failed", core_id);
  181. size_t bundle_size = sizeof(dedic_gpio_bundle_t) + config->array_size * sizeof(config->gpio_array[0]);
  182. bundle = calloc(1, bundle_size);
  183. ESP_GOTO_ON_FALSE(bundle, ESP_ERR_NO_MEM, err, TAG, "no mem for bundle");
  184. // for performance reasons, we only search for continuous channels
  185. uint32_t pattern = (1 << config->array_size) - 1;
  186. // configure outwards channels
  187. uint32_t out_offset = 0;
  188. if (config->flags.out_en) {
  189. ESP_GOTO_ON_FALSE(config->array_size <= SOC_DEDIC_GPIO_OUT_CHANNELS_NUM, ESP_ERR_INVALID_ARG, err, TAG,
  190. "array size(%d) exceeds maximum supported out channels(%d)", config->array_size, SOC_DEDIC_GPIO_OUT_CHANNELS_NUM);
  191. // prevent install bundle concurrently
  192. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  193. for (size_t i = 0; i <= SOC_DEDIC_GPIO_OUT_CHANNELS_NUM - config->array_size; i++) {
  194. if ((s_platform[core_id]->out_occupied_mask & (pattern << i)) == 0) {
  195. out_mask = pattern << i;
  196. out_offset = i;
  197. break;
  198. }
  199. }
  200. if (out_mask) {
  201. s_platform[core_id]->out_occupied_mask |= out_mask;
  202. #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  203. // always enable instruction to access output GPIO, which has better performance than register access
  204. dedic_gpio_ll_enable_instruction_access_out(s_platform[core_id]->dev, out_mask, true);
  205. #endif
  206. }
  207. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  208. ESP_GOTO_ON_FALSE(out_mask, ESP_ERR_NOT_FOUND, err, TAG, "no free outward channels on core[%d]", core_id);
  209. ESP_LOGD(TAG, "new outward bundle(%p) on core[%d], offset=%d, mask(%x)", bundle, core_id, out_offset, out_mask);
  210. }
  211. // configure inwards channels
  212. uint32_t in_offset = 0;
  213. if (config->flags.in_en) {
  214. ESP_GOTO_ON_FALSE(config->array_size <= SOC_DEDIC_GPIO_IN_CHANNELS_NUM, ESP_ERR_INVALID_ARG, err, TAG,
  215. "array size(%d) exceeds maximum supported in channels(%d)", config->array_size, SOC_DEDIC_GPIO_IN_CHANNELS_NUM);
  216. // prevent install bundle concurrently
  217. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  218. for (size_t i = 0; i <= SOC_DEDIC_GPIO_IN_CHANNELS_NUM - config->array_size; i++) {
  219. if ((s_platform[core_id]->in_occupied_mask & (pattern << i)) == 0) {
  220. in_mask = pattern << i;
  221. in_offset = i;
  222. break;
  223. }
  224. }
  225. if (in_mask) {
  226. s_platform[core_id]->in_occupied_mask |= in_mask;
  227. }
  228. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  229. ESP_GOTO_ON_FALSE(in_mask, ESP_ERR_NOT_FOUND, err, TAG, "no free inward channels on core[%d]", core_id);
  230. ESP_LOGD(TAG, "new inward bundle(%p) on core[%d], offset=%d, mask(%x)", bundle, core_id, in_offset, in_mask);
  231. }
  232. // route dedicated GPIO channel signals to GPIO matrix
  233. if (config->flags.in_en) {
  234. for (size_t i = 0; i < config->array_size; i++) {
  235. gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
  236. esp_rom_gpio_connect_in_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].in_sig_per_channel[in_offset + i], config->flags.in_invert);
  237. }
  238. }
  239. if (config->flags.out_en) {
  240. for (size_t i = 0; i < config->array_size; i++) {
  241. gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
  242. esp_rom_gpio_connect_out_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].out_sig_per_channel[out_offset + i], config->flags.out_invert, false);
  243. }
  244. #if !SOC_DEDIC_GPIO_OUT_AUTO_ENABLE
  245. cpu_ll_enable_dedic_gpio_output(s_platform[core_id]->out_occupied_mask);
  246. #endif // !SOC_DEDIC_GPIO_OUT_AUTO_ENABLE
  247. }
  248. // it's safe to initialize bundle members without locks here
  249. bundle->core_id = core_id;
  250. bundle->out_mask = out_mask;
  251. bundle->in_mask = in_mask;
  252. bundle->out_offset = out_offset;
  253. bundle->in_offset = in_offset;
  254. bundle->nr_gpio = config->array_size;
  255. memcpy(bundle->gpio_array, config->gpio_array, config->array_size * sizeof(config->gpio_array[0]));
  256. *ret_bundle = bundle; // return bundle instance
  257. return ESP_OK;
  258. err:
  259. if (s_platform[core_id] && (out_mask || in_mask)) {
  260. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  261. s_platform[core_id]->out_occupied_mask &= ~out_mask;
  262. s_platform[core_id]->in_occupied_mask &= ~in_mask;
  263. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  264. }
  265. if (bundle) {
  266. free(bundle);
  267. }
  268. return ret;
  269. }
  270. esp_err_t dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)
  271. {
  272. esp_err_t ret = ESP_OK;
  273. bool recycle_all = false;
  274. ESP_GOTO_ON_FALSE(bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  275. uint32_t core_id = cpu_hal_get_core_id();
  276. ESP_GOTO_ON_FALSE(core_id == bundle->core_id, ESP_FAIL, err, TAG, "del bundle on wrong CPU");
  277. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  278. s_platform[core_id]->out_occupied_mask &= ~(bundle->out_mask);
  279. s_platform[core_id]->in_occupied_mask &= ~(bundle->in_mask);
  280. if (!s_platform[core_id]->in_occupied_mask && !s_platform[core_id]->out_occupied_mask) {
  281. recycle_all = true;
  282. }
  283. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  284. free(bundle);
  285. if (recycle_all) {
  286. #if SOC_DEDIC_GPIO_HAS_INTERRUPT
  287. dedic_gpio_uninstall_interrupt(core_id);
  288. #endif
  289. dedic_gpio_break_platform(core_id);
  290. }
  291. err:
  292. return ret;
  293. }
  294. esp_err_t dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
  295. {
  296. esp_err_t ret = ESP_OK;
  297. ESP_GOTO_ON_FALSE(bundle && mask, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  298. *mask = bundle->out_mask;
  299. err:
  300. return ret;
  301. }
  302. esp_err_t dedic_gpio_get_in_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
  303. {
  304. esp_err_t ret = ESP_OK;
  305. ESP_GOTO_ON_FALSE(bundle && mask, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  306. *mask = bundle->in_mask;
  307. err:
  308. return ret;
  309. }
  310. void dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle, uint32_t mask, uint32_t value)
  311. {
  312. // For performance reasons, we don't want to check the validation of parameters here
  313. // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
  314. cpu_ll_write_dedic_gpio_mask(bundle->out_mask & (mask << bundle->out_offset), value << bundle->out_offset);
  315. }
  316. uint32_t dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)
  317. {
  318. // For performance reasons, we don't want to check the validation of parameters here
  319. // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
  320. uint32_t value = cpu_ll_read_dedic_gpio_out();
  321. return (value & bundle->out_mask) >> (bundle->out_offset);
  322. }
  323. uint32_t dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)
  324. {
  325. // For performance reasons, we don't want to check the validation of parameters here
  326. // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
  327. uint32_t value = cpu_ll_read_dedic_gpio_in();
  328. return (value & bundle->in_mask) >> (bundle->in_offset);
  329. }
  330. #if SOC_DEDIC_GPIO_HAS_INTERRUPT
  331. esp_err_t dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_t bundle, uint32_t mask, dedic_gpio_intr_type_t intr_type, dedic_gpio_isr_callback_t cb_isr, void *cb_args)
  332. {
  333. esp_err_t ret = ESP_OK;
  334. ESP_GOTO_ON_FALSE(bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  335. uint32_t core_id = cpu_hal_get_core_id();
  336. // lazy alloc interrupt
  337. ESP_GOTO_ON_ERROR(dedic_gpio_install_interrupt(core_id), err, TAG, "allocate interrupt on core %d failed", core_id);
  338. uint32_t channel_mask = bundle->in_mask & (mask << bundle->in_offset);
  339. uint32_t channel = 0;
  340. while (channel_mask) {
  341. channel = __builtin_ffs(channel_mask) - 1;
  342. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  343. dedic_gpio_set_interrupt(core_id, channel, intr_type);
  344. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  345. s_platform[core_id]->cbs[channel] = cb_isr;
  346. s_platform[core_id]->cb_args[channel] = cb_args;
  347. s_platform[core_id]->in_bundles[channel] = bundle;
  348. channel_mask = channel_mask & (channel_mask - 1); // clear the right most bit '1'
  349. }
  350. err:
  351. return ret;
  352. }
  353. #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT