spi_bus_lock.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "freertos/FreeRTOS.h"
  7. #include "freertos/semphr.h"
  8. #include <stdatomic.h>
  9. #include "sdkconfig.h"
  10. #include "esp_private/spi_common_internal.h"
  11. #include "esp_intr_alloc.h"
  12. #include "soc/soc_caps.h"
  13. #include "stdatomic.h"
  14. #include "esp_log.h"
  15. #include "esp_check.h"
  16. #include <strings.h>
  17. #include "esp_heap_caps.h"
  18. /*
  19. * This lock is designed to solve the conflicts between SPI devices (used in tasks) and
  20. * the background operations (ISR or cache access).
  21. *
  22. * There are N (device/task) + 1 (BG) acquiring processer candidates that may touch the bus.
  23. *
  24. * The core of the lock is a `status` atomic variable, which is always available. No intermediate
  25. * status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`)
  26. * atomically read the status, and bitwisely write status value ORed / ANDed with given masks.
  27. *
  28. * Definitions of the status:
  29. * - [30] WEAK_BG_FLAG, active when the BG is the cache
  30. * - [29:20] LOCK bits, active when corresponding device is asking for acquiring
  31. * - [19:10] PENDING bits, active when the BG acknowledges the REQ bits, but hasn't fully handled them.
  32. * - [ 9: 0] REQ bits, active when corresponding device is requesting for BG operations.
  33. *
  34. * The REQ bits together PENDING bits are called BG bits, which represent the actual BG request
  35. * state of devices. Either one of REQ or PENDING being active indicates the device has pending BG
  36. * requests. Reason of having two bits instead of one is in the appendix below.
  37. *
  38. * Acquiring processer means the current processor (task or ISR) allowed to touch the critical
  39. * resources, or the SPI bus.
  40. *
  41. * States of the lock:
  42. * - STATE_IDLE: There's no acquiring processor. No device is acquiring the bus, and no BG
  43. * operation is in progress.
  44. *
  45. * - STATE_ACQ: The acquiring processor is a device task. This means one of the devices is
  46. * acquiring the bus.
  47. *
  48. * - STATE_BG: The acquiring processor is the ISR, and there is no acquiring device.
  49. *
  50. * - STATE_BG_ACQ: The acquiring processor is the ISR, and there is an acquiring device.
  51. *
  52. *
  53. * Whenever a bit is written to the status, it means the a device on a task is trying to acquire
  54. * the lock (either for the task, or the ISR). When there is no LOCK bits or BG bits active, the
  55. * caller immediately become the acquiring processor. Otherwise, the task has to block, and the ISR
  56. * will not be invoked until scheduled by the current acquiring processor.
  57. *
  58. * The acquiring processor is responsible to assign the next acquiring processor by calling the
  59. * scheduler, usually after it finishes some requests, and cleared the corresponding status bit.
  60. * But there is one exception, when the last bit is cleared from the status, after which there is
  61. * no other LOCK bits or BG bits active, the acquiring processor lost its role immediately, and
  62. * don't need to call the scheduler to assign the next acquiring processor.
  63. *
  64. * The acquiring processor may also choose to assign a new acquiring device when there is no, by
  65. * calling `spi_bus_lock_bg_rotate_acq_dev` in the ISR. But the acquiring processor, in this case,
  66. * is still the ISR, until it calls the scheduler.
  67. *
  68. *
  69. * Transition of the FSM:
  70. *
  71. * - STATE_IDLE: no acquiring device, nor acquiring processor, no LOCK or BG bits active
  72. * -> STATE_BG: by `req_core`
  73. * -> STATE_ACQ: by `acquire_core`
  74. *
  75. * - STATE_BG:
  76. * * No acquiring device, the ISR is the acquiring processor, there is BG bits active, but no LOCK
  77. * bits
  78. * * The BG operation should be enabled while turning into this state.
  79. *
  80. * -> STATE_IDLE: by `bg_exit_core` after `clear_pend_core` for all BG bits
  81. * -> STATE_BG_ACQ: by `schedule_core`, when there is new LOCK bit set (by `acquire_core`)
  82. *
  83. * - STATE_BG_ACQ:
  84. * * There is acquiring device, the ISR is the acquiring processor, there may be BG bits active for
  85. * the acquiring device.
  86. * * The BG operation should be enabled while turning into this state.
  87. *
  88. * -> STATE_ACQ: by `bg_exit_core` after `clear_pend_core` for all BG bits for the acquiring
  89. * device.
  90. *
  91. * Should not go to the STATE_ACQ (unblock the acquiring task) until all requests of the
  92. * acquiring device are finished. This is to preserve the sequence of foreground (polling) and
  93. * background operations of the device. The background operations queued before the acquiring
  94. * should be completed first.
  95. *
  96. * - STATE_ACQ:
  97. * * There is acquiring device, the task is the acquiring processor, there is no BG bits active for
  98. * the acquiring device.
  99. * * The acquiring task (if blocked at `spi_bus_lock_acquire_start` or `spi_bus_lock_wait_bg_done`)
  100. * should be resumed while turning into this state.
  101. *
  102. * -> STATE_BG_ACQ: by `req_core`
  103. * -> STATE_BG_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another
  104. * device, and the new acquiring device has active BG bits.
  105. * -> STATE_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another devices,
  106. * but the new acquiring device has no active BG bits.
  107. * -> STATE_BG: by `acquire_end_core` when there is no LOCK bit active, but there are active BG
  108. * bits.
  109. * -> STATE_IDLE: by `acquire_end_core` when there is no LOCK bit, nor BG bit active.
  110. *
  111. * The `req_core` used in the task is a little special. It asks for acquiring processor for the
  112. * ISR. When it succeed for the first time, it will invoke the ISR (hence passing the acquiring
  113. * role to the BG). Otherwise it will not block, the ISR will be automatically be invoked by other
  114. * acquiring processor. The caller of `req_core` will never become acquiring processor by this
  115. * function.
  116. *
  117. *
  118. * Appendix: The design, that having both request bit and pending bit, is to solve the
  119. * concurrency issue between tasks and the bg, when the task can queue several requests,
  120. * however the request bit cannot represent the number of requests queued.
  121. *
  122. * Here's the workflow of task and ISR work concurrently:
  123. * - Task: (a) Write to Queue -> (b) Write request bit
  124. * The Task have to write request bit (b) after the data is prepared in the queue (a),
  125. * otherwise the BG may fail to read from the queue when it sees the request bit set.
  126. *
  127. * - BG: (c) Read queue -> (d) Clear request bit
  128. * Since the BG cannot know the number of requests queued, it have to repeatedly check the
  129. * queue (c), until it find the data is empty, and then clear the request bit (d).
  130. *
  131. * The events are possible to happen in the order: (c) -> (a) -> (b) -> (d). This may cause a false
  132. * clear of the request bit. And there will be data prepared in the queue, but the request bit is
  133. * inactive.
  134. *
  135. * (e) move REQ bits to PEND bits, happen before (c) is introduced to solve this problem. In this
  136. * case (d) is changed to clear the PEND bit. Even if (e) -> (c) -> (a) -> (b) -> (d), only PEND
  137. * bit is cleared, while the REQ bit is still active.
  138. */
  139. struct spi_bus_lock_dev_t;
  140. typedef struct spi_bus_lock_dev_t spi_bus_lock_dev_t;
  141. typedef struct spi_bus_lock_t spi_bus_lock_t;
  142. #define MAX_DEV_NUM 10
  143. // Bit 29-20: lock bits, Bit 19-10: pending bits
  144. // Bit 9-0: request bits, Bit 30:
  145. #define LOCK_SHIFT 20
  146. #define PENDING_SHIFT 10
  147. #define REQ_SHIFT 0
  148. #define WEAK_BG_FLAG BIT(30) /**< The bus is permanently requested by background operations.
  149. * This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release.
  150. */
  151. // get the bit mask wher bit [high-1, low] are all 1'b1 s.
  152. #define BIT1_MASK(high, low) ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low)))
  153. #define LOCK_BIT(mask) ((mask) << LOCK_SHIFT)
  154. #define REQUEST_BIT(mask) ((mask) << REQ_SHIFT)
  155. #define PENDING_BIT(mask) ((mask) << PENDING_SHIFT)
  156. #define DEV_MASK(id) (LOCK_BIT(1<<id) | PENDING_BIT(1<<id) | REQUEST_BIT(1<<id))
  157. #define ID_DEV_MASK(mask) (__builtin_ffs(mask) - 1)
  158. #define REQ_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM, REQ_SHIFT)
  159. #define PEND_MASK BIT1_MASK(PENDING_SHIFT+MAX_DEV_NUM, PENDING_SHIFT)
  160. #define BG_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM*2, REQ_SHIFT)
  161. #define LOCK_MASK BIT1_MASK(LOCK_SHIFT+MAX_DEV_NUM, LOCK_SHIFT)
  162. #define DEV_REQ_MASK(dev) ((dev)->mask & REQ_MASK)
  163. #define DEV_PEND_MASK(dev) ((dev)->mask & PEND_MASK)
  164. #define DEV_BG_MASK(dev) ((dev)->mask & BG_MASK)
  165. struct spi_bus_lock_t {
  166. /**
  167. * The core of the lock. These bits are status of the lock, which should be always available.
  168. * No intermediate status is allowed. This is realized by atomic operations, mainly
  169. * `atomic_fetch_and`, `atomic_fetch_or`, which atomically read the status, and bitwise write
  170. * status value ORed / ANDed with given masks.
  171. *
  172. * The request bits together pending bits represent the actual bg request state of one device.
  173. * Either one of them being active indicates the device has pending bg requests.
  174. *
  175. * Whenever a bit is written to the status, it means the a device on a task is trying to
  176. * acquire the lock. But this will succeed only when no LOCK or BG bits active.
  177. *
  178. * The acquiring processor is responsible to call the scheduler to pass its role to other tasks
  179. * or the BG, unless it clear the last bit in the status register.
  180. */
  181. //// Critical resources, they are only writable by acquiring processor, and stable only when read by the acquiring processor.
  182. atomic_uint_fast32_t status;
  183. spi_bus_lock_dev_t* volatile acquiring_dev; ///< The acquiring device
  184. bool volatile acq_dev_bg_active; ///< BG is the acquiring processor serving the acquiring device, used for the wait_bg to skip waiting quickly.
  185. bool volatile in_isr; ///< ISR is touching HW
  186. //// End of critical resources
  187. atomic_intptr_t dev[DEV_NUM_MAX]; ///< Child locks.
  188. bg_ctrl_func_t bg_enable; ///< Function to enable background operations.
  189. bg_ctrl_func_t bg_disable; ///< Function to disable background operations
  190. void* bg_arg; ///< Argument for `bg_enable` and `bg_disable` functions.
  191. spi_bus_lock_dev_t* last_dev; ///< Last used device, to decide whether to refresh all registers.
  192. int periph_cs_num; ///< Number of the CS pins the HW has.
  193. //debug information
  194. int host_id; ///< Host ID, for debug information printing
  195. uint32_t new_req; ///< Last int_req when `spi_bus_lock_bg_start` is called. Debug use.
  196. };
  197. struct spi_bus_lock_dev_t {
  198. SemaphoreHandle_t semphr; ///< Binary semaphore to notify the device it claimed the bus
  199. spi_bus_lock_t* parent; ///< Pointer to parent spi_bus_lock_t
  200. uint32_t mask; ///< Bitwise OR-ed mask of the REQ, PEND, LOCK bits of this device
  201. };
  202. /**
  203. * @note 1
  204. * This critical section is only used to fix such condition:
  205. *
  206. * define: lock_bits = (lock->status & LOCK_MASK) >> LOCK_SHIFT; This `lock_bits` is the Bit 29-20 of the lock->status
  207. *
  208. * 1. spi_hdl_1:
  209. * acquire_end_core():
  210. * uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
  211. *
  212. * Becuase this is the first `spi_hdl_1`, so after this , lock_bits == 0`b0. status == 0
  213. *
  214. * 2. spi_hdl_2:
  215. * acquire_core:
  216. * uint32_t status = lock_status_fetch_set(lock, dev_handle->mask & LOCK_MASK);
  217. *
  218. * Then here status is 0`b0, but lock_bits == 0`b10. Because this is the `spi_hdl_2`
  219. *
  220. * 3. spi_hdl_2:
  221. * `acquire_core` return true, because status == 0. `spi_bus_lock_acquire_start(spi_hdl_2)` then won't block.
  222. *
  223. * 4. spi_hdl_2:
  224. * spi_device_polling_end(spi_hdl_2).
  225. *
  226. * 5. spi_hdl_1:
  227. * acquire_end_core:
  228. * status is 0, so it cleas the lock->acquiring_dev
  229. *
  230. * 6. spi_hdl_2:
  231. * spi_device_polling_end:
  232. * assert(handle == get_acquiring_dev(host)); Fail
  233. *
  234. * @note 2
  235. * Only use this critical section in this condition. The critical section scope is limited to the smallest.
  236. * As `spi_bus_lock` influences the all the SPIs (including MSPI) a lot!
  237. */
  238. portMUX_TYPE s_spinlock = portMUX_INITIALIZER_UNLOCKED;
  239. DRAM_ATTR static const char TAG[] = "bus_lock";
  240. static inline int mask_get_id(uint32_t mask);
  241. static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock);
  242. /*******************************************************************************
  243. * atomic operations to the status
  244. ******************************************************************************/
  245. SPI_MASTER_ISR_ATTR static inline uint32_t lock_status_fetch_set(spi_bus_lock_t *lock, uint32_t set)
  246. {
  247. return atomic_fetch_or(&lock->status, set);
  248. }
  249. IRAM_ATTR static inline uint32_t lock_status_fetch_clear(spi_bus_lock_t *lock, uint32_t clear)
  250. {
  251. return atomic_fetch_and(&lock->status, ~clear);
  252. }
  253. IRAM_ATTR static inline uint32_t lock_status_fetch(spi_bus_lock_t *lock)
  254. {
  255. return atomic_load(&lock->status);
  256. }
  257. SPI_MASTER_ISR_ATTR static inline void lock_status_init(spi_bus_lock_t *lock)
  258. {
  259. atomic_store(&lock->status, 0);
  260. }
  261. // return the remaining status bits
  262. IRAM_ATTR static inline uint32_t lock_status_clear(spi_bus_lock_t* lock, uint32_t clear)
  263. {
  264. //the fetch and clear should be atomic, avoid missing the all '0' status when all bits are clear.
  265. uint32_t state = lock_status_fetch_clear(lock, clear);
  266. return state & (~clear);
  267. }
  268. /*******************************************************************************
  269. * Schedule service
  270. *
  271. * The modification to the status bits may cause rotating of the acquiring processor. It also have
  272. * effects to `acquired_dev` (the acquiring device), `in_isr` (HW used in BG), and
  273. * `acq_dev_bg_active` (wait_bg_end can be skipped) members of the lock structure.
  274. *
  275. * Most of them should be atomic, and special attention should be paid to the operation
  276. * sequence.
  277. ******************************************************************************/
  278. SPI_MASTER_ISR_ATTR static inline void resume_dev_in_isr(spi_bus_lock_dev_t *dev_lock, BaseType_t *do_yield)
  279. {
  280. xSemaphoreGiveFromISR(dev_lock->semphr, do_yield);
  281. }
  282. IRAM_ATTR static inline void resume_dev(const spi_bus_lock_dev_t *dev_lock)
  283. {
  284. xSemaphoreGive(dev_lock->semphr);
  285. }
  286. SPI_MASTER_ISR_ATTR static inline void bg_disable(spi_bus_lock_t *lock)
  287. {
  288. BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_disable);
  289. lock->bg_disable(lock->bg_arg);
  290. }
  291. IRAM_ATTR static inline void bg_enable(spi_bus_lock_t* lock)
  292. {
  293. BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_enable);
  294. lock->bg_enable(lock->bg_arg);
  295. }
  296. // Set the REQ bit. If we become the acquiring processor, invoke the ISR and pass that to it.
  297. // The caller will never become the acquiring processor after this function returns.
  298. SPI_MASTER_ATTR static inline void req_core(spi_bus_lock_dev_t *dev_handle)
  299. {
  300. spi_bus_lock_t *lock = dev_handle->parent;
  301. // Though `acquired_dev` is critical resource, `dev_handle == lock->acquired_dev`
  302. // is a stable statement unless `acquire_start` or `acquire_end` is called by current
  303. // device.
  304. if (dev_handle == lock->acquiring_dev) {
  305. // Set the REQ bit and check BG bits if we are the acquiring processor.
  306. // If the BG bits were not active before, invoke the BG again.
  307. // Avoid competitive risk against the `clear_pend_core`, `acq_dev_bg_active` should be set before
  308. // setting REQ bit.
  309. lock->acq_dev_bg_active = true;
  310. uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
  311. if ((status & DEV_BG_MASK(dev_handle)) == 0) {
  312. bg_enable(lock); //acquiring processor passed to BG
  313. }
  314. } else {
  315. uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
  316. if (status == 0) {
  317. bg_enable(lock); //acquiring processor passed to BG
  318. }
  319. }
  320. }
  321. //Set the LOCK bit. Handle related stuff and return true if we become the acquiring processor.
  322. SPI_MASTER_ISR_ATTR static inline bool acquire_core(spi_bus_lock_dev_t *dev_handle)
  323. {
  324. spi_bus_lock_t* lock = dev_handle->parent;
  325. //For this critical section, search `@note 1` in this file, to know details
  326. portENTER_CRITICAL_SAFE(&s_spinlock);
  327. uint32_t status = lock_status_fetch_set(lock, dev_handle->mask & LOCK_MASK);
  328. portEXIT_CRITICAL_SAFE(&s_spinlock);
  329. // Check all bits except WEAK_BG
  330. if ((status & (BG_MASK | LOCK_MASK)) == 0) {
  331. //succeed at once
  332. lock->acquiring_dev = dev_handle;
  333. BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
  334. if (status & WEAK_BG_FLAG) {
  335. //Mainly to disable the cache (Weak_BG), that is not able to disable itself
  336. bg_disable(lock);
  337. }
  338. return true;
  339. }
  340. return false;
  341. }
  342. /**
  343. * Find the next acquiring processor according to the status. Will directly change
  344. * the acquiring device if new one found.
  345. *
  346. * Cases:
  347. * - BG should still be the acquiring processor (Return false):
  348. * 1. Acquiring device has active BG bits: out_desired_dev = new acquiring device
  349. * 2. No acquiring device, but BG active: out_desired_dev = randomly pick one device with active BG bits
  350. * - BG should yield to the task (Return true):
  351. * 3. Acquiring device has no active BG bits: out_desired_dev = new acquiring device
  352. * 4. No acquiring device while no active BG bits: out_desired_dev=NULL
  353. *
  354. * Acquiring device task need to be resumed only when case 3.
  355. *
  356. * This scheduling can happen in either task or ISR, so `in_isr` or `bg_active` not touched.
  357. *
  358. * @param lock
  359. * @param status Current status
  360. * @param out_desired_dev Desired device to work next, see above.
  361. *
  362. * @return False if BG should still be the acquiring processor, otherwise True (yield to task).
  363. */
  364. IRAM_ATTR static inline bool
  365. schedule_core(spi_bus_lock_t *lock, uint32_t status, spi_bus_lock_dev_t **out_desired_dev)
  366. {
  367. spi_bus_lock_dev_t* desired_dev = NULL;
  368. uint32_t lock_bits = (status & LOCK_MASK) >> LOCK_SHIFT;
  369. uint32_t bg_bits = status & BG_MASK;
  370. bg_bits = ((bg_bits >> REQ_SHIFT) | (bg_bits >> PENDING_SHIFT)) & REQ_MASK;
  371. bool bg_yield;
  372. if (lock_bits) {
  373. int dev_id = mask_get_id(lock_bits);
  374. desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
  375. BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
  376. lock->acquiring_dev = desired_dev;
  377. bg_yield = ((bg_bits & desired_dev->mask) == 0);
  378. lock->acq_dev_bg_active = !bg_yield;
  379. } else {
  380. lock->acq_dev_bg_active = false;
  381. if (bg_bits) {
  382. int dev_id = mask_get_id(bg_bits);
  383. desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
  384. BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
  385. lock->acquiring_dev = NULL;
  386. bg_yield = false;
  387. } else {
  388. desired_dev = NULL;
  389. lock->acquiring_dev = NULL;
  390. bg_yield = true;
  391. }
  392. }
  393. *out_desired_dev = desired_dev;
  394. return bg_yield;
  395. }
  396. //Clear the LOCK bit and trigger a rescheduling.
  397. IRAM_ATTR static inline void acquire_end_core(spi_bus_lock_dev_t *dev_handle)
  398. {
  399. spi_bus_lock_t* lock = dev_handle->parent;
  400. spi_bus_lock_dev_t* desired_dev = NULL;
  401. //For this critical section, search `@note 1` in this file, to know details
  402. portENTER_CRITICAL_SAFE(&s_spinlock);
  403. uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
  404. bool invoke_bg = !schedule_core(lock, status, &desired_dev);
  405. portEXIT_CRITICAL_SAFE(&s_spinlock);
  406. if (invoke_bg) {
  407. bg_enable(lock);
  408. } else if (desired_dev) {
  409. resume_dev(desired_dev);
  410. } else if (status & WEAK_BG_FLAG) {
  411. bg_enable(lock);
  412. }
  413. }
  414. // Move the REQ bits to corresponding PEND bits. Must be called by acquiring processor.
  415. // Have no side effects on the acquiring device/processor.
  416. SPI_MASTER_ISR_ATTR static inline void update_pend_core(spi_bus_lock_t *lock, uint32_t status)
  417. {
  418. uint32_t active_req_bits = status & REQ_MASK;
  419. #if PENDING_SHIFT > REQ_SHIFT
  420. uint32_t pending_mask = active_req_bits << (PENDING_SHIFT - REQ_SHIFT);
  421. #else
  422. uint32_t pending_mask = active_req_bits >> (REQ_SHIFT - PENDING_SHIFT);
  423. #endif
  424. // We have to set the PEND bits and then clear the REQ bits, since BG bits are using bitwise OR logic,
  425. // this will not influence the effectiveness of the BG bits of every device.
  426. lock_status_fetch_set(lock, pending_mask);
  427. lock_status_fetch_clear(lock, active_req_bits);
  428. }
  429. // Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR.
  430. // Lost the acquiring processor immediately when the BG bits for active device are inactive, indiciating by the return value.
  431. // Can be called only when ISR is acting as the acquiring processor.
  432. SPI_MASTER_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle)
  433. {
  434. bool finished;
  435. spi_bus_lock_t *lock = dev_handle->parent;
  436. uint32_t pend_mask = DEV_PEND_MASK(dev_handle);
  437. BUS_LOCK_DEBUG_EXECUTE_CHECK(lock_status_fetch(lock) & pend_mask);
  438. uint32_t status = lock_status_clear(lock, pend_mask);
  439. if (lock->acquiring_dev == dev_handle) {
  440. finished = ((status & DEV_REQ_MASK(dev_handle)) == 0);
  441. if (finished) {
  442. lock->acq_dev_bg_active = false;
  443. }
  444. } else {
  445. finished = (status == 0);
  446. }
  447. return finished;
  448. }
  449. // Return true if the ISR has already touched the HW, which means previous operations should
  450. // be terminated first, before we use the HW again. Otherwise return false.
  451. // In either case `in_isr` will be marked as true, until call to `bg_exit_core` with `wip=false` successfully.
  452. SPI_MASTER_ISR_ATTR static inline bool bg_entry_core(spi_bus_lock_t *lock)
  453. {
  454. BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
  455. /*
  456. * The interrupt is disabled at the entry of ISR to avoid competitive risk as below:
  457. *
  458. * The `esp_intr_enable` will be called (b) after new BG request is queued (a) in the task;
  459. * while `esp_intr_disable` should be called (c) if we check and found the sending queue is empty (d).
  460. * If (c) happens after (d), if things happens in this sequence:
  461. * (d) -> (a) -> (b) -> (c), the interrupt will be disabled while there's pending BG request in the queue.
  462. *
  463. * To avoid this, interrupt is disabled here, and re-enabled later if required. (c) -> (d) -> (a) -> (b) -> revert (c) if !d
  464. */
  465. bg_disable(lock);
  466. if (lock->in_isr) {
  467. return false;
  468. } else {
  469. lock->in_isr = true;
  470. return true;
  471. }
  472. }
  473. // Handle the conditions of status and interrupt, avoiding the ISR being disabled when there is any new coming BG requests.
  474. // When called with `wip=true`, means the ISR is performing some operations. Will enable the interrupt again and exit unconditionally.
  475. // When called with `wip=false`, will only return `true` when there is no coming BG request. If return value is `false`, the ISR should try again.
  476. // Will not change acquiring device.
  477. SPI_MASTER_ISR_ATTR static inline bool bg_exit_core(spi_bus_lock_t *lock, bool wip, BaseType_t *do_yield)
  478. {
  479. //See comments in `bg_entry_core`, re-enable interrupt disabled in entry if we do need the interrupt
  480. if (wip) {
  481. bg_enable(lock);
  482. BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
  483. return true;
  484. }
  485. bool ret;
  486. uint32_t status = lock_status_fetch(lock);
  487. if (lock->acquiring_dev) {
  488. if (status & DEV_BG_MASK(lock->acquiring_dev)) {
  489. BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acq_dev_bg_active);
  490. ret = false;
  491. } else {
  492. // The request may happen any time, even after we fetched the status.
  493. // The value of `acq_dev_bg_active` is random.
  494. resume_dev_in_isr(lock->acquiring_dev, do_yield);
  495. ret = true;
  496. }
  497. } else {
  498. BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
  499. ret = !(status & BG_MASK);
  500. }
  501. if (ret) {
  502. //when successfully exit, but no transaction done, mark BG as inactive
  503. lock->in_isr = false;
  504. }
  505. return ret;
  506. }
  507. IRAM_ATTR static inline void dev_wait_prepare(spi_bus_lock_dev_t *dev_handle)
  508. {
  509. xSemaphoreTake(dev_handle->semphr, 0);
  510. }
  511. SPI_MASTER_ISR_ATTR static inline esp_err_t dev_wait(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
  512. {
  513. BaseType_t ret = xSemaphoreTake(dev_handle->semphr, wait);
  514. if (ret == pdFALSE) {
  515. return ESP_ERR_TIMEOUT;
  516. }
  517. return ESP_OK;
  518. }
  519. /*******************************************************************************
  520. * Initialization & Deinitialization
  521. ******************************************************************************/
  522. esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config)
  523. {
  524. spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(sizeof(spi_bus_lock_t), 1);
  525. if (lock == NULL) {
  526. return ESP_ERR_NO_MEM;
  527. }
  528. lock_status_init(lock);
  529. lock->acquiring_dev = NULL;
  530. lock->last_dev = NULL;
  531. lock->periph_cs_num = config->cs_num;
  532. lock->host_id = config->host_id;
  533. *out_lock = lock;
  534. return ESP_OK;
  535. }
  536. void spi_bus_deinit_lock(spi_bus_lock_handle_t lock)
  537. {
  538. for (int i = 0; i < DEV_NUM_MAX; i++) {
  539. assert(atomic_load(&lock->dev[i]) == (intptr_t)NULL);
  540. }
  541. free(lock);
  542. }
  543. static int try_acquire_free_dev(spi_bus_lock_t *lock, bool cs_required)
  544. {
  545. if (cs_required) {
  546. int i;
  547. for (i = 0; i < lock->periph_cs_num; i++) {
  548. intptr_t null = (intptr_t) NULL;
  549. //use 1 to occupy the slot, actual setup comes later
  550. if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
  551. break;
  552. }
  553. }
  554. return ((i == lock->periph_cs_num) ? -1 : i);
  555. } else {
  556. int i;
  557. for (i = DEV_NUM_MAX - 1; i >= 0; i--) {
  558. intptr_t null = (intptr_t) NULL;
  559. //use 1 to occupy the slot, actual setup comes later
  560. if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
  561. break;
  562. }
  563. }
  564. return i;
  565. }
  566. }
  567. esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock, spi_bus_lock_dev_config_t *config,
  568. spi_bus_lock_dev_handle_t *out_dev_handle)
  569. {
  570. if (lock == NULL) {
  571. return ESP_ERR_INVALID_ARG;
  572. }
  573. int id = try_acquire_free_dev(lock, config->flags & SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED);
  574. if (id == -1) {
  575. return ESP_ERR_NOT_SUPPORTED;
  576. }
  577. spi_bus_lock_dev_t* dev_lock = (spi_bus_lock_dev_t*)heap_caps_calloc(sizeof(spi_bus_lock_dev_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
  578. if (dev_lock == NULL) {
  579. return ESP_ERR_NO_MEM;
  580. }
  581. dev_lock->semphr = xSemaphoreCreateBinary();
  582. if (dev_lock->semphr == NULL) {
  583. free(dev_lock);
  584. atomic_store(&lock->dev[id], (intptr_t)NULL);
  585. return ESP_ERR_NO_MEM;
  586. }
  587. dev_lock->parent = lock;
  588. dev_lock->mask = DEV_MASK(id);
  589. ESP_LOGV(TAG, "device registered on bus %d slot %d.", lock->host_id, id);
  590. atomic_store(&lock->dev[id], (intptr_t)dev_lock);
  591. *out_dev_handle = dev_lock;
  592. return ESP_OK;
  593. }
  594. void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)
  595. {
  596. int id = dev_lock_get_id(dev_handle);
  597. spi_bus_lock_t* lock = dev_handle->parent;
  598. BUS_LOCK_DEBUG_EXECUTE_CHECK(atomic_load(&lock->dev[id]) == (intptr_t)dev_handle);
  599. if (lock->last_dev == dev_handle) {
  600. lock->last_dev = NULL;
  601. }
  602. atomic_store(&lock->dev[id], (intptr_t)NULL);
  603. if (dev_handle->semphr) {
  604. vSemaphoreDelete(dev_handle->semphr);
  605. }
  606. free(dev_handle);
  607. }
  608. IRAM_ATTR static inline int mask_get_id(uint32_t mask)
  609. {
  610. return ID_DEV_MASK(mask);
  611. }
  612. IRAM_ATTR static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock)
  613. {
  614. return mask_get_id(dev_lock->mask);
  615. }
  616. void spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock, bg_ctrl_func_t bg_enable, bg_ctrl_func_t bg_disable, void *arg)
  617. {
  618. lock->bg_enable = bg_enable;
  619. lock->bg_disable = bg_disable;
  620. lock->bg_arg = arg;
  621. }
  622. IRAM_ATTR int spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle)
  623. {
  624. return (dev_handle ? dev_lock_get_id(dev_handle) : -1);
  625. }
  626. //will be called when cache disabled
  627. IRAM_ATTR bool spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle)
  628. {
  629. spi_bus_lock_dev_t* last_dev = dev_handle->parent->last_dev;
  630. dev_handle->parent->last_dev = dev_handle;
  631. if (last_dev != dev_handle) {
  632. int last_dev_id = (last_dev ? dev_lock_get_id(last_dev) : -1);
  633. ESP_DRAM_LOGV(TAG, "SPI dev changed from %d to %d",
  634. last_dev_id, dev_lock_get_id(dev_handle));
  635. }
  636. return (dev_handle != last_dev);
  637. }
  638. /*******************************************************************************
  639. * Acquiring service
  640. ******************************************************************************/
  641. IRAM_ATTR esp_err_t spi_bus_lock_acquire_start(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
  642. {
  643. ESP_RETURN_ON_FALSE_ISR(wait == portMAX_DELAY, ESP_ERR_INVALID_ARG, TAG, "timeout other than portMAX_DELAY not supported");
  644. spi_bus_lock_t* lock = dev_handle->parent;
  645. // Clear the semaphore before checking
  646. dev_wait_prepare(dev_handle);
  647. if (!acquire_core(dev_handle)) {
  648. //block until becoming the acquiring processor (help by previous acquiring processor)
  649. esp_err_t err = dev_wait(dev_handle, wait);
  650. //TODO: add timeout handling here.
  651. if (err != ESP_OK) {
  652. return err;
  653. }
  654. }
  655. ESP_DRAM_LOGV(TAG, "dev %d acquired.", dev_lock_get_id(dev_handle));
  656. BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acquiring_dev == dev_handle);
  657. //When arrives at here, requests of this device should already be handled
  658. uint32_t status = lock_status_fetch(lock);
  659. (void) status;
  660. BUS_LOCK_DEBUG_EXECUTE_CHECK((status & DEV_BG_MASK(dev_handle)) == 0);
  661. return ESP_OK;
  662. }
  663. IRAM_ATTR esp_err_t spi_bus_lock_acquire_end(spi_bus_lock_dev_t *dev_handle)
  664. {
  665. //release the bus
  666. spi_bus_lock_t* lock = dev_handle->parent;
  667. ESP_RETURN_ON_FALSE_ISR(lock->acquiring_dev == dev_handle, ESP_ERR_INVALID_STATE, TAG, "Cannot release a lock that hasn't been acquired.");
  668. acquire_end_core(dev_handle);
  669. ESP_LOGV(TAG, "dev %d released.", dev_lock_get_id(dev_handle));
  670. return ESP_OK;
  671. }
  672. SPI_MASTER_ISR_ATTR spi_bus_lock_dev_handle_t spi_bus_lock_get_acquiring_dev(spi_bus_lock_t *lock)
  673. {
  674. return lock->acquiring_dev;
  675. }
  676. /*******************************************************************************
  677. * BG (background operation) service
  678. ******************************************************************************/
  679. SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_entry(spi_bus_lock_t* lock)
  680. {
  681. return bg_entry_core(lock);
  682. }
  683. SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_exit(spi_bus_lock_t* lock, bool wip, BaseType_t* do_yield)
  684. {
  685. return bg_exit_core(lock, wip, do_yield);
  686. }
  687. SPI_MASTER_ATTR esp_err_t spi_bus_lock_bg_request(spi_bus_lock_dev_t *dev_handle)
  688. {
  689. req_core(dev_handle);
  690. return ESP_OK;
  691. }
  692. IRAM_ATTR esp_err_t spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle, TickType_t wait)
  693. {
  694. spi_bus_lock_t *lock = dev_handle->parent;
  695. ESP_RETURN_ON_FALSE_ISR(lock->acquiring_dev == dev_handle, ESP_ERR_INVALID_STATE, TAG, "Cannot wait for a device that is not acquired");
  696. ESP_RETURN_ON_FALSE_ISR(wait == portMAX_DELAY, ESP_ERR_INVALID_ARG, TAG, "timeout other than portMAX_DELAY not supported");
  697. // If no BG bits active, skip quickly. This is ensured by `spi_bus_lock_wait_bg_done`
  698. // cannot be executed with `bg_request` on the same device concurrently.
  699. if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
  700. // Clear the semaphore before checking
  701. dev_wait_prepare(dev_handle);
  702. if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
  703. //block until becoming the acquiring processor (help by previous acquiring processor)
  704. esp_err_t err = dev_wait(dev_handle, wait);
  705. //TODO: add timeout handling here.
  706. if (err != ESP_OK) {
  707. return err;
  708. }
  709. }
  710. }
  711. BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
  712. BUS_LOCK_DEBUG_EXECUTE_CHECK((lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) == 0);
  713. return ESP_OK;
  714. }
  715. SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t *dev_handle)
  716. {
  717. bool finished = clear_pend_core(dev_handle);
  718. ESP_EARLY_LOGV(TAG, "dev %d served from bg.", dev_lock_get_id(dev_handle));
  719. return finished;
  720. }
  721. SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t *lock,
  722. spi_bus_lock_dev_handle_t *out_dev_lock)
  723. {
  724. BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev);
  725. uint32_t status = lock_status_fetch(lock);
  726. return schedule_core(lock, status, out_dev_lock);
  727. }
  728. SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_t *dev_lock)
  729. {
  730. spi_bus_lock_t* lock = dev_lock->parent;
  731. uint32_t status = lock_status_fetch(lock);
  732. uint32_t dev_status = status & dev_lock->mask;
  733. // move REQ bits of all device to corresponding PEND bits.
  734. // To reduce executing time, only done when the REQ bit of the calling device is set.
  735. if (dev_status & REQ_MASK) {
  736. update_pend_core(lock, status);
  737. return true;
  738. } else {
  739. return dev_status & PEND_MASK;
  740. }
  741. }
  742. SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_req_exist(spi_bus_lock_t *lock)
  743. {
  744. uint32_t status = lock_status_fetch(lock);
  745. return status & BG_MASK;
  746. }
  747. /*******************************************************************************
  748. * Static variables of the locks of the main flash
  749. ******************************************************************************/
  750. #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
  751. static spi_bus_lock_dev_t lock_main_flash_dev;
  752. static spi_bus_lock_t main_spi_bus_lock = {
  753. /*
  754. * the main bus cache is permanently required, this flag is set here and never clear so that the
  755. * cache will always be enabled if acquiring devices yield.
  756. */
  757. .status = ATOMIC_VAR_INIT(WEAK_BG_FLAG),
  758. .acquiring_dev = NULL,
  759. .dev = {ATOMIC_VAR_INIT((intptr_t)&lock_main_flash_dev)},
  760. .new_req = 0,
  761. .periph_cs_num = SOC_SPI_PERIPH_CS_NUM(0),
  762. };
  763. const spi_bus_lock_handle_t g_main_spi_bus_lock = &main_spi_bus_lock;
  764. esp_err_t spi_bus_lock_init_main_bus(void)
  765. {
  766. spi_bus_main_set_lock(g_main_spi_bus_lock);
  767. return ESP_OK;
  768. }
  769. static StaticSemaphore_t main_flash_semphr;
  770. static spi_bus_lock_dev_t lock_main_flash_dev = {
  771. .semphr = NULL,
  772. .parent = &main_spi_bus_lock,
  773. .mask = DEV_MASK(0),
  774. };
  775. const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = &lock_main_flash_dev;
  776. esp_err_t spi_bus_lock_init_main_dev(void)
  777. {
  778. g_spi_lock_main_flash_dev->semphr = xSemaphoreCreateBinaryStatic(&main_flash_semphr);
  779. if (g_spi_lock_main_flash_dev->semphr == NULL) {
  780. return ESP_ERR_NO_MEM;
  781. }
  782. return ESP_OK;
  783. }
  784. #else //CONFIG_SPI_FLASH_SHARE_SPI1_BUS
  785. //when the dev lock is not initialized, point to NULL
  786. const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = NULL;
  787. #endif