spinlock.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #pragma once
  7. #include "sdkconfig.h"
  8. #include <stdint.h>
  9. #include <stdbool.h>
  10. #include "esp_cpu.h"
  11. #if __XTENSA__
  12. #include "xtensa/xtruntime.h"
  13. #include "xt_utils.h"
  14. #else
  15. #include "riscv/rv_utils.h"
  16. #endif
  17. #ifdef __cplusplus
  18. extern "C" {
  19. #endif
  20. #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
  21. #define NEED_VOLATILE_MUX volatile
  22. #else
  23. #define NEED_VOLATILE_MUX
  24. #endif
  25. #define SPINLOCK_FREE 0xB33FFFFF
  26. #define SPINLOCK_WAIT_FOREVER (-1)
  27. #define SPINLOCK_NO_WAIT 0
  28. #define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
  29. #define SPINLOCK_OWNER_ID_0 0xCDCD /* Use these values to avoid 0 being a valid lock owner, same as CORE_ID_REGVAL_PRO on Xtensa */
  30. #define SPINLOCK_OWNER_ID_1 0xABAB /* Same as CORE_ID_REGVAL_APP on Xtensa*/
  31. #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
  32. #define SPINLOCK_OWNER_ID_XOR_SWAP CORE_ID_REGVAL_XOR_SWAP
  33. typedef struct {
  34. NEED_VOLATILE_MUX uint32_t owner;
  35. NEED_VOLATILE_MUX uint32_t count;
  36. } spinlock_t;
  37. /**
  38. * @brief Initialize a lock to its default state - unlocked
  39. * @param lock - spinlock object to initialize
  40. */
  41. static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
  42. {
  43. assert(lock);
  44. #if !CONFIG_FREERTOS_UNICORE
  45. lock->owner = SPINLOCK_FREE;
  46. lock->count = 0;
  47. #endif
  48. }
  49. /**
  50. * @brief Top level spinlock acquire function, spins until get the lock
  51. *
  52. * This function will:
  53. * - Save current interrupt state, then disable interrupts
  54. * - Spin until lock is acquired or until timeout occurs
  55. * - Restore interrupt state
  56. *
  57. * @note Spinlocks alone do no constitute true critical sections (as this
  58. * function reenables interrupts once the spinlock is acquired). For critical
  59. * sections, use the interface provided by the operating system.
  60. * @param lock - target spinlock object
  61. * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
  62. */
  63. static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
  64. {
  65. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  66. uint32_t irq_status;
  67. uint32_t core_owner_id, other_core_owner_id;
  68. bool lock_set;
  69. esp_cpu_cycle_count_t start_count;
  70. assert(lock);
  71. #if __XTENSA__
  72. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  73. // Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
  74. core_owner_id = xt_utils_get_raw_core_id();
  75. #else //__riscv
  76. irq_status = rv_utils_mask_int_level_lower_than(RVHAL_EXCM_LEVEL);
  77. core_owner_id = rv_utils_get_core_id() == 0 ? SPINLOCK_OWNER_ID_0 : SPINLOCK_OWNER_ID_1;
  78. #endif
  79. other_core_owner_id = CORE_ID_REGVAL_XOR_SWAP ^ core_owner_id;
  80. /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
  81. * CORE_ID_REGVAL_APP:
  82. * - If SPINLOCK_FREE, we want to atomically set to 'core_owner_id'.
  83. * - If "our" core_owner_id, we can drop through immediately.
  84. * - If "other_core_owner_id", we spin here.
  85. */
  86. // The caller is already the owner of the lock. Simply increment the nesting count
  87. if (lock->owner == core_owner_id) {
  88. assert(lock->count > 0 && lock->count < 0xFF); // Bad count value implies memory corruption
  89. lock->count++;
  90. #if __XTENSA__
  91. XTOS_RESTORE_INTLEVEL(irq_status);
  92. #else
  93. rv_utils_restore_intlevel(irq_status);
  94. #endif
  95. return true;
  96. }
  97. /* First attempt to take the lock.
  98. *
  99. * Note: We do a first attempt separately (instead of putting this into a loop) in order to avoid call to
  100. * esp_cpu_get_cycle_count(). This doing a first attempt separately makes acquiring a free lock quicker, which
  101. * is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
  102. * aren't meant to be held for long).
  103. */
  104. lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_owner_id);
  105. if (lock_set || timeout == SPINLOCK_NO_WAIT) {
  106. // We've successfully taken the lock, or we are not retrying
  107. goto exit;
  108. }
  109. // First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
  110. start_count = esp_cpu_get_cycle_count();
  111. do {
  112. lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_owner_id);
  113. if (lock_set) {
  114. break;
  115. }
  116. // Keep looping if we are waiting forever, or check if we have timed out
  117. } while ((timeout == SPINLOCK_WAIT_FOREVER) || (esp_cpu_get_cycle_count() - start_count) <= timeout);
  118. exit:
  119. if (lock_set) {
  120. assert(lock->owner == core_owner_id);
  121. assert(lock->count == 0); // This is the first time the lock is set, so count should still be 0
  122. lock->count++; // Finally, we increment the lock count
  123. } else { // We timed out waiting for lock
  124. assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_owner_id);
  125. assert(lock->count < 0xFF); // Bad count value implies memory corruption
  126. }
  127. #if __XTENSA__
  128. XTOS_RESTORE_INTLEVEL(irq_status);
  129. #else
  130. rv_utils_restore_intlevel(irq_status);
  131. #endif
  132. return lock_set;
  133. #else // !CONFIG_FREERTOS_UNICORE
  134. return true;
  135. #endif
  136. }
  137. /**
  138. * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
  139. *
  140. * This function will:
  141. * - Save current interrupt state, then disable interrupts
  142. * - Release the spinlock
  143. * - Restore interrupt state
  144. *
  145. * @note Spinlocks alone do no constitute true critical sections (as this
  146. * function reenables interrupts once the spinlock is acquired). For critical
  147. * sections, use the interface provided by the operating system.
  148. * @param lock - target, locked before, spinlock object
  149. */
  150. static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
  151. {
  152. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  153. uint32_t irq_status;
  154. uint32_t core_owner_id;
  155. assert(lock);
  156. #if __XTENSA__
  157. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  158. core_owner_id = xt_utils_get_raw_core_id();
  159. #else
  160. irq_status = rv_utils_mask_int_level_lower_than(RVHAL_EXCM_LEVEL);
  161. core_owner_id = rv_utils_get_core_id() == 0 ? SPINLOCK_OWNER_ID_0 : SPINLOCK_OWNER_ID_1;
  162. #endif
  163. assert(core_owner_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
  164. lock->count--;
  165. if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
  166. lock->owner = SPINLOCK_FREE;
  167. } else {
  168. assert(lock->count < 0x100); // Indicates memory corruption
  169. }
  170. #if __XTENSA__
  171. XTOS_RESTORE_INTLEVEL(irq_status);
  172. #else
  173. rv_utils_restore_intlevel(irq_status);
  174. #endif //#if __XTENSA__
  175. #endif //#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  176. }
  177. #ifdef __cplusplus
  178. }
  179. #endif