spinlock.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #pragma once
  7. #include "sdkconfig.h"
  8. #include <stdint.h>
  9. #include <stdbool.h>
  10. #include "esp_cpu.h"
  11. #if __XTENSA__
  12. #include "xtensa/xtruntime.h"
  13. #include "xt_utils.h"
  14. #endif
  15. #ifdef __cplusplus
  16. extern "C" {
  17. #endif
  18. #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
  19. #define NEED_VOLATILE_MUX volatile
  20. #else
  21. #define NEED_VOLATILE_MUX
  22. #endif
  23. #define SPINLOCK_FREE 0xB33FFFFF
  24. #define SPINLOCK_WAIT_FOREVER (-1)
  25. #define SPINLOCK_NO_WAIT 0
  26. #define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
  27. #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
  28. typedef struct {
  29. NEED_VOLATILE_MUX uint32_t owner;
  30. NEED_VOLATILE_MUX uint32_t count;
  31. } spinlock_t;
  32. /**
  33. * @brief Initialize a lock to its default state - unlocked
  34. * @param lock - spinlock object to initialize
  35. */
  36. static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
  37. {
  38. assert(lock);
  39. #if !CONFIG_FREERTOS_UNICORE
  40. lock->owner = SPINLOCK_FREE;
  41. lock->count = 0;
  42. #endif
  43. }
  44. /**
  45. * @brief Top level spinlock acquire function, spins until get the lock
  46. *
  47. * This function will:
  48. * - Save current interrupt state, then disable interrupts
  49. * - Spin until lock is acquired or until timeout occurs
  50. * - Restore interrupt state
  51. *
  52. * @note Spinlocks alone do no constitute true critical sections (as this
  53. * function reenables interrupts once the spinlock is acquired). For critical
  54. * sections, use the interface provided by the operating system.
  55. * @param lock - target spinlock object
  56. * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
  57. */
  58. static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
  59. {
  60. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  61. uint32_t irq_status;
  62. uint32_t core_id, other_core_id;
  63. bool lock_set;
  64. esp_cpu_cycle_count_t start_count;
  65. assert(lock);
  66. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  67. // Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
  68. core_id = xt_utils_get_raw_core_id();
  69. other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
  70. /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
  71. * CORE_ID_REGVAL_APP:
  72. * - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
  73. * - If "our" core_id, we can drop through immediately.
  74. * - If "other_core_id", we spin here.
  75. */
  76. // The caller is already the owner of the lock. Simply increment the nesting count
  77. if (lock->owner == core_id) {
  78. assert(lock->count > 0 && lock->count < 0xFF); // Bad count value implies memory corruption
  79. lock->count++;
  80. XTOS_RESTORE_INTLEVEL(irq_status);
  81. return true;
  82. }
  83. /* First attempt to take the lock.
  84. *
  85. * Note: We do a first attempt separately (instead of putting this into a loop) in order to avoid call to
  86. * esp_cpu_get_cycle_count(). This doing a first attempt separately makes acquiring a free lock quicker, which
  87. * is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
  88. * aren't meant to be held for long).
  89. */
  90. lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
  91. if (lock_set || timeout == SPINLOCK_NO_WAIT) {
  92. // We've successfully taken the lock, or we are not retrying
  93. goto exit;
  94. }
  95. // First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
  96. start_count = esp_cpu_get_cycle_count();
  97. do {
  98. lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
  99. if (lock_set) {
  100. break;
  101. }
  102. // Keep looping if we are waiting forever, or check if we have timed out
  103. } while ((timeout == SPINLOCK_WAIT_FOREVER) || (esp_cpu_get_cycle_count() - start_count) <= timeout);
  104. exit:
  105. if (lock_set) {
  106. assert(lock->owner == core_id);
  107. assert(lock->count == 0); // This is the first time the lock is set, so count should still be 0
  108. lock->count++; // Finally, we increment the lock count
  109. } else { // We timed out waiting for lock
  110. assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_id);
  111. assert(lock->count < 0xFF); // Bad count value implies memory corruption
  112. }
  113. XTOS_RESTORE_INTLEVEL(irq_status);
  114. return lock_set;
  115. #else // !CONFIG_FREERTOS_UNICORE
  116. return true;
  117. #endif
  118. }
  119. /**
  120. * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
  121. *
  122. * This function will:
  123. * - Save current interrupt state, then disable interrupts
  124. * - Release the spinlock
  125. * - Restore interrupt state
  126. *
  127. * @note Spinlocks alone do no constitute true critical sections (as this
  128. * function reenables interrupts once the spinlock is acquired). For critical
  129. * sections, use the interface provided by the operating system.
  130. * @param lock - target, locked before, spinlock object
  131. */
  132. static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
  133. {
  134. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  135. uint32_t irq_status;
  136. uint32_t core_id;
  137. assert(lock);
  138. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  139. core_id = xt_utils_get_raw_core_id();
  140. assert(core_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
  141. lock->count--;
  142. if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
  143. lock->owner = SPINLOCK_FREE;
  144. } else {
  145. assert(lock->count < 0x100); // Indicates memory corruption
  146. }
  147. XTOS_RESTORE_INTLEVEL(irq_status);
  148. #endif
  149. }
  150. #ifdef __cplusplus
  151. }
  152. #endif