dedic_gpio.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. // Copyright 2020 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <sys/lock.h>
  18. #include "sdkconfig.h"
  19. #include "esp_compiler.h"
  20. #include "esp_heap_caps.h"
  21. #include "esp_intr_alloc.h"
  22. #include "esp_log.h"
  23. #include "soc/soc_caps.h"
  24. #include "soc/gpio_periph.h"
  25. #include "soc/io_mux_reg.h"
  26. #include "hal/cpu_hal.h"
  27. #include "hal/cpu_ll.h"
  28. #include "hal/gpio_hal.h"
  29. #include "driver/periph_ctrl.h"
  30. #include "esp_rom_gpio.h"
  31. #include "freertos/FreeRTOS.h"
  32. #include "driver/dedic_gpio.h"
  33. #include "soc/dedic_gpio_periph.h"
  34. #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  35. #include "soc/dedic_gpio_struct.h"
  36. #include "hal/dedic_gpio_ll.h"
  37. #endif
  38. static const char *TAG = "dedic_gpio";
  39. #define DEDIC_CHECK(a, msg, tag, ret, ...) \
  40. do { \
  41. if (unlikely(!(a))) { \
  42. ESP_LOGE(TAG, "%s(%d): " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
  43. ret_code = ret; \
  44. goto tag; \
  45. } \
  46. } while (0)
  47. typedef struct dedic_gpio_platform_t dedic_gpio_platform_t;
  48. typedef struct dedic_gpio_bundle_t dedic_gpio_bundle_t;
  49. // Dedicated GPIO driver platform, GPIO bundles will be installed onto it
  50. static dedic_gpio_platform_t *s_platform[SOC_CPU_CORES_NUM];
  51. // platform level mutex lock
  52. static _lock_t s_platform_mutexlock[SOC_CPU_CORES_NUM];
  53. struct dedic_gpio_platform_t {
  54. portMUX_TYPE spinlock; // Spinlock, stop GPIO channels from accessing common resource concurrently
  55. uint32_t out_occupied_mask; // mask of output channels that already occupied
  56. uint32_t in_occupied_mask; // mask of input channels that already occupied
  57. #if SOC_DEDIC_GPIO_HAS_INTERRUPT
  58. intr_handle_t intr_hdl; // interrupt handle
  59. dedic_gpio_isr_callback_t cbs[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // array of callback function for input channel
  60. void *cb_args[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // array of callback arguments for input channel
  61. dedic_gpio_bundle_t *in_bundles[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // which bundle belongs to for input channel
  62. #endif
  63. #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  64. dedic_dev_t *dev;
  65. #endif
  66. };
  67. struct dedic_gpio_bundle_t {
  68. uint32_t core_id; // CPU core ID, a GPIO bundle must be installed to a specific CPU core
  69. uint32_t out_mask; // mask of output channels in the bank
  70. uint32_t in_mask; // mask of input channels in the bank
  71. uint32_t out_offset; // offset in the bank (seen from output channel)
  72. uint32_t in_offset; // offset in the bank (seen from input channel)
  73. size_t nr_gpio; // number of GPIOs in the gpio_array
  74. int gpio_array[0]; // array of GPIO numbers (configured by user)
  75. };
  76. static esp_err_t dedic_gpio_build_platform(uint32_t core_id)
  77. {
  78. esp_err_t ret_code = ESP_OK;
  79. if (!s_platform[core_id]) {
  80. // prevent building platform concurrently
  81. _lock_acquire(&s_platform_mutexlock[core_id]);
  82. if (!s_platform[core_id]) {
  83. s_platform[core_id] = calloc(1, sizeof(dedic_gpio_platform_t));
  84. if (s_platform[core_id]) {
  85. // initialize platfrom members
  86. s_platform[core_id]->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  87. #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  88. s_platform[core_id]->dev = &DEDIC_GPIO;
  89. #endif
  90. periph_module_enable(dedic_gpio_periph_signals.module); // enable APB clock to peripheral
  91. }
  92. }
  93. _lock_release(&s_platform_mutexlock[core_id]);
  94. DEDIC_CHECK(s_platform[core_id], "no mem for s_platform[%d]", err, ESP_ERR_NO_MEM, core_id);
  95. ESP_LOGD(TAG, "build platform on core[%d] at %p", core_id, s_platform);
  96. }
  97. err:
  98. return ret_code;
  99. }
  100. static void dedic_gpio_break_platform(uint32_t core_id)
  101. {
  102. if (s_platform[core_id]) {
  103. // prevent breaking platform concurrently
  104. _lock_acquire(&s_platform_mutexlock[core_id]);
  105. if (s_platform[core_id]) {
  106. free(s_platform[core_id]);
  107. s_platform[core_id] = NULL;
  108. periph_module_disable(dedic_gpio_periph_signals.module); // disable module if no GPIO channel is being used
  109. }
  110. _lock_release(&s_platform_mutexlock[core_id]);
  111. }
  112. }
  113. #if SOC_DEDIC_GPIO_HAS_INTERRUPT
  114. static void dedic_gpio_default_isr(void *arg)
  115. {
  116. bool need_yield = false;
  117. dedic_gpio_platform_t *platform = (dedic_gpio_platform_t *)arg;
  118. // get and clear interrupt status
  119. portENTER_CRITICAL_ISR(&platform->spinlock);
  120. uint32_t status = dedic_gpio_ll_get_interrupt_status(platform->dev);
  121. dedic_gpio_ll_clear_interrupt_status(platform->dev, status);
  122. portEXIT_CRITICAL_ISR(&platform->spinlock);
  123. // handle dedicated channel one by one
  124. while (status) {
  125. uint32_t channel = __builtin_ffs(status) - 1; // get dedicated channel number which triggered the interrupt
  126. if (platform->cbs[channel]) {
  127. if (platform->cbs[channel](platform->in_bundles[channel], channel - platform->in_bundles[channel]->in_offset, platform->cb_args[channel])) {
  128. need_yield = true; // note that we need to yield at the end of isr
  129. }
  130. }
  131. status = status & (status - 1); // clear the right most bit '1'
  132. }
  133. if (need_yield) {
  134. portYIELD_FROM_ISR();
  135. }
  136. }
  137. static esp_err_t dedic_gpio_install_interrupt(uint32_t core_id)
  138. {
  139. esp_err_t ret_code = ESP_OK;
  140. if (!s_platform[core_id]->intr_hdl) {
  141. // prevent install interrupt concurrently
  142. _lock_acquire(&s_platform_mutexlock[core_id]);
  143. if (!s_platform[core_id]->intr_hdl) {
  144. int isr_flags = 0;
  145. ret_code = esp_intr_alloc(dedic_gpio_periph_signals.irq, isr_flags, dedic_gpio_default_isr, s_platform[core_id], &s_platform[core_id]->intr_hdl);
  146. // clear pending interrupt
  147. uint32_t status = dedic_gpio_ll_get_interrupt_status(s_platform[core_id]->dev);
  148. dedic_gpio_ll_clear_interrupt_status(s_platform[core_id]->dev, status);
  149. }
  150. _lock_release(&s_platform_mutexlock[core_id]);
  151. DEDIC_CHECK(ret_code == ESP_OK, "alloc interrupt failed", err, ret_code);
  152. }
  153. err:
  154. return ret_code;
  155. }
  156. static void dedic_gpio_uninstall_interrupt(uint32_t core_id)
  157. {
  158. if (s_platform[core_id]->intr_hdl) {
  159. // prevent uninstall interrupt concurrently
  160. _lock_acquire(&s_platform_mutexlock[core_id]);
  161. if (s_platform[core_id]->intr_hdl) {
  162. esp_intr_free(s_platform[core_id]->intr_hdl);
  163. s_platform[core_id]->intr_hdl = NULL;
  164. // disable all interrupt
  165. dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, ~0UL, false);
  166. }
  167. _lock_release(&s_platform_mutexlock[core_id]);
  168. }
  169. }
  170. static void dedic_gpio_set_interrupt(uint32_t core_id, uint32_t channel, dedic_gpio_intr_type_t type)
  171. {
  172. dedic_gpio_ll_set_interrupt_type(s_platform[core_id]->dev, channel, type);
  173. if (type != DEDIC_GPIO_INTR_NONE) {
  174. dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, true);
  175. } else {
  176. dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, false);
  177. }
  178. }
  179. #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT
  180. esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_gpio_bundle_handle_t *ret_bundle)
  181. {
  182. esp_err_t ret_code = ESP_OK;
  183. dedic_gpio_bundle_t *bundle = NULL;
  184. uint32_t out_mask = 0;
  185. uint32_t in_mask = 0;
  186. uint32_t core_id = cpu_hal_get_core_id(); // dedicated GPIO will be binded to the CPU who invokes this API
  187. DEDIC_CHECK(config && ret_bundle, "invalid argument", err, ESP_ERR_INVALID_ARG);
  188. DEDIC_CHECK(config->gpio_array && config->array_size > 0, "invalid GPIO array or size", err, ESP_ERR_INVALID_ARG);
  189. DEDIC_CHECK(config->flags.in_en || config->flags.out_en, "no input/output mode specified", err, ESP_ERR_INVALID_ARG);
  190. // lazy install s_platform[core_id]
  191. DEDIC_CHECK(dedic_gpio_build_platform(core_id) == ESP_OK, "build platform %d failed", err, ESP_FAIL, core_id);
  192. size_t bundle_size = sizeof(dedic_gpio_bundle_t) + config->array_size * sizeof(config->gpio_array[0]);
  193. bundle = calloc(1, bundle_size);
  194. DEDIC_CHECK(bundle, "no mem for bundle", err, ESP_ERR_NO_MEM);
  195. // for performance reasons, we only search for continuous channels
  196. uint32_t pattern = (1 << config->array_size) - 1;
  197. // configure outwards channels
  198. uint32_t out_offset = 0;
  199. if (config->flags.out_en) {
  200. DEDIC_CHECK(SOC_DEDIC_GPIO_OUT_CHANNELS_NUM >= config->array_size, "array size(%d) exceeds maximum supported out channels(%d)",
  201. err, ESP_ERR_INVALID_ARG, config->array_size, SOC_DEDIC_GPIO_OUT_CHANNELS_NUM);
  202. // prevent install bundle concurrently
  203. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  204. for (size_t i = 0; i <= SOC_DEDIC_GPIO_OUT_CHANNELS_NUM - config->array_size; i++) {
  205. if ((s_platform[core_id]->out_occupied_mask & (pattern << i)) == 0) {
  206. out_mask = pattern << i;
  207. out_offset = i;
  208. break;
  209. }
  210. }
  211. if (out_mask) {
  212. s_platform[core_id]->out_occupied_mask |= out_mask;
  213. #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
  214. // always enable instruction to access output GPIO, which has better performance than register access
  215. dedic_gpio_ll_enable_instruction_access_out(s_platform[core_id]->dev, out_mask, true);
  216. #endif
  217. }
  218. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  219. DEDIC_CHECK(out_mask, "no free outward channels on core[%d]", err, ESP_ERR_NOT_FOUND, core_id);
  220. ESP_LOGD(TAG, "new outward bundle(%p) on core[%d], offset=%d, mask(%x)", bundle, core_id, out_offset, out_mask);
  221. }
  222. // configure inwards channels
  223. uint32_t in_offset = 0;
  224. if (config->flags.in_en) {
  225. DEDIC_CHECK(SOC_DEDIC_GPIO_IN_CHANNELS_NUM >= config->array_size, "array size(%d) exceeds maximum supported in channels(%d)",
  226. err, ESP_ERR_INVALID_ARG, config->array_size, SOC_DEDIC_GPIO_IN_CHANNELS_NUM);
  227. // prevent install bundle concurrently
  228. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  229. for (size_t i = 0; i <= SOC_DEDIC_GPIO_IN_CHANNELS_NUM - config->array_size; i++) {
  230. if ((s_platform[core_id]->in_occupied_mask & (pattern << i)) == 0) {
  231. in_mask = pattern << i;
  232. in_offset = i;
  233. break;
  234. }
  235. }
  236. if (in_mask) {
  237. s_platform[core_id]->in_occupied_mask |= in_mask;
  238. }
  239. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  240. DEDIC_CHECK(in_mask, "no free inward channels on core[%d]", err, ESP_ERR_NOT_FOUND, core_id);
  241. ESP_LOGD(TAG, "new inward bundle(%p) on core[%d], offset=%d, mask(%x)", bundle, core_id, in_offset, in_mask);
  242. }
  243. // route dedicated GPIO channel signals to GPIO matrix
  244. if (config->flags.in_en) {
  245. for (size_t i = 0; i < config->array_size; i++) {
  246. gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
  247. esp_rom_gpio_connect_in_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].in_sig_per_channel[in_offset + i], config->flags.in_invert);
  248. }
  249. }
  250. if (config->flags.out_en) {
  251. for (size_t i = 0; i < config->array_size; i++) {
  252. gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
  253. esp_rom_gpio_connect_out_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].out_sig_per_channel[out_offset + i], config->flags.out_invert, false);
  254. }
  255. }
  256. // it's safe to initialize bundle members without locks here
  257. bundle->core_id = core_id;
  258. bundle->out_mask = out_mask;
  259. bundle->in_mask = in_mask;
  260. bundle->out_offset = out_offset;
  261. bundle->in_offset = in_offset;
  262. bundle->nr_gpio = config->array_size;
  263. memcpy(bundle->gpio_array, config->gpio_array, config->array_size * sizeof(config->gpio_array[0]));
  264. *ret_bundle = bundle; // return bundle instance
  265. return ESP_OK;
  266. err:
  267. if (s_platform[core_id] && (out_mask || in_mask)) {
  268. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  269. s_platform[core_id]->out_occupied_mask &= ~out_mask;
  270. s_platform[core_id]->in_occupied_mask &= ~in_mask;
  271. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  272. }
  273. if (bundle) {
  274. free(bundle);
  275. }
  276. return ret_code;
  277. }
  278. esp_err_t dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)
  279. {
  280. esp_err_t ret_code = ESP_OK;
  281. bool recycle_all = false;
  282. DEDIC_CHECK(bundle, "invalid argument", err, ESP_ERR_INVALID_ARG);
  283. uint32_t core_id = cpu_hal_get_core_id();
  284. DEDIC_CHECK(core_id == bundle->core_id, "del bundle on wrong CPU", err, ESP_FAIL);
  285. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  286. s_platform[core_id]->out_occupied_mask &= ~(bundle->out_mask);
  287. s_platform[core_id]->in_occupied_mask &= ~(bundle->in_mask);
  288. if (!s_platform[core_id]->in_occupied_mask && !s_platform[core_id]->out_occupied_mask) {
  289. recycle_all = true;
  290. }
  291. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  292. free(bundle);
  293. if (recycle_all) {
  294. #if SOC_DEDIC_GPIO_HAS_INTERRUPT
  295. dedic_gpio_uninstall_interrupt(core_id);
  296. #endif
  297. dedic_gpio_break_platform(core_id);
  298. }
  299. err:
  300. return ret_code;
  301. }
  302. esp_err_t dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
  303. {
  304. esp_err_t ret_code = ESP_OK;
  305. DEDIC_CHECK(bundle && mask, "invalid argument", err, ESP_ERR_INVALID_ARG);
  306. *mask = bundle->out_mask;
  307. err:
  308. return ret_code;
  309. }
  310. esp_err_t dedic_gpio_get_in_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
  311. {
  312. esp_err_t ret_code = ESP_OK;
  313. DEDIC_CHECK(bundle && mask, "invalid argument", err, ESP_ERR_INVALID_ARG);
  314. *mask = bundle->in_mask;
  315. err:
  316. return ret_code;
  317. }
  318. void dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle, uint32_t mask, uint32_t value)
  319. {
  320. // For performace reasons, we don't want to check the validation of parameters here
  321. // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
  322. cpu_ll_write_dedic_gpio_mask(bundle->out_mask & (mask << bundle->out_offset), value << bundle->out_offset);
  323. }
  324. uint32_t dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)
  325. {
  326. // For performace reasons, we don't want to check the validation of parameters here
  327. // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
  328. uint32_t value = cpu_ll_read_dedic_gpio_out();
  329. return (value & bundle->out_mask) >> (bundle->out_offset);
  330. }
  331. uint32_t dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)
  332. {
  333. // For performace reasons, we don't want to check the validation of parameters here
  334. // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
  335. uint32_t value = cpu_ll_read_dedic_gpio_in();
  336. return (value & bundle->in_mask) >> (bundle->in_offset);
  337. }
  338. #if SOC_DEDIC_GPIO_HAS_INTERRUPT
  339. esp_err_t dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_t bundle, uint32_t mask, dedic_gpio_intr_type_t intr_type, dedic_gpio_isr_callback_t cb_isr, void *cb_args)
  340. {
  341. esp_err_t ret_code = ESP_OK;
  342. DEDIC_CHECK(bundle, "invalid argument", err, ESP_ERR_INVALID_ARG);
  343. uint32_t core_id = cpu_hal_get_core_id();
  344. // lazy alloc interrupt
  345. DEDIC_CHECK(dedic_gpio_install_interrupt(core_id) == ESP_OK, "allocate interrupt on core %d failed", err, ESP_FAIL, core_id);
  346. uint32_t channel_mask = bundle->in_mask & (mask << bundle->in_offset);
  347. uint32_t channel = 0;
  348. while (channel_mask) {
  349. channel = __builtin_ffs(channel_mask) - 1;
  350. portENTER_CRITICAL(&s_platform[core_id]->spinlock);
  351. dedic_gpio_set_interrupt(core_id, channel, intr_type);
  352. portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
  353. s_platform[core_id]->cbs[channel] = cb_isr;
  354. s_platform[core_id]->cb_args[channel] = cb_args;
  355. s_platform[core_id]->in_bundles[channel] = bundle;
  356. channel_mask = channel_mask & (channel_mask - 1); // clear the right most bit '1'
  357. }
  358. err:
  359. return ret_code;
  360. }
  361. #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT