spinlock.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #pragma once
  7. #include "sdkconfig.h"
  8. #include <stdint.h>
  9. #include <stdbool.h>
  10. #include "esp_cpu.h"
  11. #if __XTENSA__
  12. #include "xtensa/xtruntime.h"
  13. #include "xt_utils.h"
  14. #else
  15. #include "riscv/rv_utils.h"
  16. #endif
  17. //TODO: IDF-7771, P4, see jira to know what changed and what need to be checked
  18. #ifdef __cplusplus
  19. extern "C" {
  20. #endif
  21. #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
  22. #define NEED_VOLATILE_MUX volatile
  23. #else
  24. #define NEED_VOLATILE_MUX
  25. #endif
  26. #define SPINLOCK_FREE 0xB33FFFFF
  27. #define SPINLOCK_WAIT_FOREVER (-1)
  28. #define SPINLOCK_NO_WAIT 0
  29. #define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
  30. #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
  31. typedef struct {
  32. NEED_VOLATILE_MUX uint32_t owner;
  33. NEED_VOLATILE_MUX uint32_t count;
  34. } spinlock_t;
  35. /**
  36. * @brief Initialize a lock to its default state - unlocked
  37. * @param lock - spinlock object to initialize
  38. */
  39. static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
  40. {
  41. assert(lock);
  42. #if !CONFIG_FREERTOS_UNICORE
  43. lock->owner = SPINLOCK_FREE;
  44. lock->count = 0;
  45. #endif
  46. }
  47. /**
  48. * @brief Top level spinlock acquire function, spins until get the lock
  49. *
  50. * This function will:
  51. * - Save current interrupt state, then disable interrupts
  52. * - Spin until lock is acquired or until timeout occurs
  53. * - Restore interrupt state
  54. *
  55. * @note Spinlocks alone do no constitute true critical sections (as this
  56. * function reenables interrupts once the spinlock is acquired). For critical
  57. * sections, use the interface provided by the operating system.
  58. * @param lock - target spinlock object
  59. * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
  60. */
  61. static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
  62. {
  63. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  64. uint32_t irq_status;
  65. uint32_t core_id, other_core_id;
  66. bool lock_set;
  67. esp_cpu_cycle_count_t start_count;
  68. assert(lock);
  69. #if __XTENSA__
  70. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  71. // Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
  72. core_id = xt_utils_get_raw_core_id();
  73. other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
  74. #else //__riscv
  75. irq_status = rv_utils_set_intlevel(RVHAL_EXCM_LEVEL);
  76. core_id = rv_utils_get_core_id();
  77. other_core_id = 1 - core_id;
  78. #endif
  79. /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
  80. * CORE_ID_REGVAL_APP:
  81. * - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
  82. * - If "our" core_id, we can drop through immediately.
  83. * - If "other_core_id", we spin here.
  84. */
  85. // The caller is already the owner of the lock. Simply increment the nesting count
  86. if (lock->owner == core_id) {
  87. assert(lock->count > 0 && lock->count < 0xFF); // Bad count value implies memory corruption
  88. lock->count++;
  89. #if __XTENSA__
  90. XTOS_RESTORE_INTLEVEL(irq_status);
  91. #else
  92. rv_utils_restore_intlevel(irq_status);
  93. #endif
  94. return true;
  95. }
  96. /* First attempt to take the lock.
  97. *
  98. * Note: We do a first attempt separately (instead of putting this into a loop) in order to avoid call to
  99. * esp_cpu_get_cycle_count(). This doing a first attempt separately makes acquiring a free lock quicker, which
  100. * is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
  101. * aren't meant to be held for long).
  102. */
  103. lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
  104. if (lock_set || timeout == SPINLOCK_NO_WAIT) {
  105. // We've successfully taken the lock, or we are not retrying
  106. goto exit;
  107. }
  108. // First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
  109. start_count = esp_cpu_get_cycle_count();
  110. do {
  111. lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
  112. if (lock_set) {
  113. break;
  114. }
  115. // Keep looping if we are waiting forever, or check if we have timed out
  116. } while ((timeout == SPINLOCK_WAIT_FOREVER) || (esp_cpu_get_cycle_count() - start_count) <= timeout);
  117. exit:
  118. if (lock_set) {
  119. assert(lock->owner == core_id);
  120. assert(lock->count == 0); // This is the first time the lock is set, so count should still be 0
  121. lock->count++; // Finally, we increment the lock count
  122. } else { // We timed out waiting for lock
  123. assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_id);
  124. assert(lock->count < 0xFF); // Bad count value implies memory corruption
  125. }
  126. #if __XTENSA__
  127. XTOS_RESTORE_INTLEVEL(irq_status);
  128. #else
  129. rv_utils_restore_intlevel(irq_status);
  130. #endif
  131. return lock_set;
  132. #else // !CONFIG_FREERTOS_UNICORE
  133. return true;
  134. #endif
  135. }
  136. /**
  137. * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
  138. *
  139. * This function will:
  140. * - Save current interrupt state, then disable interrupts
  141. * - Release the spinlock
  142. * - Restore interrupt state
  143. *
  144. * @note Spinlocks alone do no constitute true critical sections (as this
  145. * function reenables interrupts once the spinlock is acquired). For critical
  146. * sections, use the interface provided by the operating system.
  147. * @param lock - target, locked before, spinlock object
  148. */
  149. static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
  150. {
  151. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  152. uint32_t irq_status;
  153. uint32_t core_id;
  154. assert(lock);
  155. #if __XTENSA__
  156. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  157. core_id = xt_utils_get_raw_core_id();
  158. #else
  159. irq_status = rv_utils_set_intlevel(RVHAL_EXCM_LEVEL);
  160. core_id = rv_utils_get_core_id();
  161. #endif
  162. assert(core_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
  163. lock->count--;
  164. if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
  165. lock->owner = SPINLOCK_FREE;
  166. } else {
  167. assert(lock->count < 0x100); // Indicates memory corruption
  168. }
  169. #if __XTENSA__
  170. XTOS_RESTORE_INTLEVEL(irq_status);
  171. #else
  172. rv_utils_restore_intlevel(irq_status);
  173. #endif //#if __XTENSA__
  174. #endif //#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  175. }
  176. #ifdef __cplusplus
  177. }
  178. #endif