spinlock.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #pragma once
  7. #include <stdint.h>
  8. #include <stdbool.h>
  9. #include "sdkconfig.h"
  10. #include "hal/cpu_hal.h"
  11. #include "compare_set.h"
  12. #include "soc/soc.h"
  13. #if __XTENSA__
  14. #include "xtensa/xtruntime.h"
  15. #endif
  16. #ifdef __cplusplus
  17. extern "C" {
  18. #endif
  19. #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
  20. #define NEED_VOLATILE_MUX volatile
  21. #else
  22. #define NEED_VOLATILE_MUX
  23. #endif
  24. #define SPINLOCK_FREE 0xB33FFFFF
  25. #define SPINLOCK_WAIT_FOREVER (-1)
  26. #define SPINLOCK_NO_WAIT 0
  27. #define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
  28. #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
  29. typedef struct {
  30. NEED_VOLATILE_MUX uint32_t owner;
  31. NEED_VOLATILE_MUX uint32_t count;
  32. }spinlock_t;
  33. #if (CONFIG_SPIRAM)
  34. /**
  35. * @brief Check if the pointer is on external ram
  36. * @param p pointer
  37. * @return true: on external ram; false: not on external ram
  38. */
  39. static inline bool __attribute__((always_inline)) spinlock_ptr_external_ram(const void *p)
  40. {
  41. //On esp32, this external virtual address rergion is for psram
  42. return ((intptr_t)p >= SOC_EXTRAM_DATA_LOW && (intptr_t)p < SOC_EXTRAM_DATA_HIGH);
  43. }
  44. #endif
  45. /**
  46. * @brief Initialize a lock to its default state - unlocked
  47. * @param lock - spinlock object to initialize
  48. */
  49. static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
  50. {
  51. assert(lock);
  52. #if !CONFIG_FREERTOS_UNICORE
  53. lock->owner = SPINLOCK_FREE;
  54. lock->count = 0;
  55. #endif
  56. }
  57. /**
  58. * @brief Top level spinlock acquire function, spins until get the lock
  59. *
  60. * This function will:
  61. * - Save current interrupt state, then disable interrupts
  62. * - Spin until lock is acquired or until timeout occurs
  63. * - Restore interrupt state
  64. *
  65. * @note Spinlocks alone do no constitute true critical sections (as this
  66. * function reenables interrupts once the spinlock is acquired). For critical
  67. * sections, use the interface provided by the operating system.
  68. * @param lock - target spinlock object
  69. * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
  70. */
  71. static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
  72. {
  73. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  74. uint32_t result;
  75. uint32_t irq_status;
  76. uint32_t ccount_start;
  77. uint32_t core_id, other_core_id;
  78. assert(lock);
  79. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  80. if(timeout != SPINLOCK_WAIT_FOREVER){
  81. RSR(CCOUNT, ccount_start);
  82. }
  83. /*spin until we own a core */
  84. RSR(PRID, core_id);
  85. /* Note: coreID is the full 32 bit core ID (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) */
  86. other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
  87. do {
  88. /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
  89. * CORE_ID_REGVAL_APP:
  90. * - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
  91. * - If "our" core_id, we can drop through immediately.
  92. * - If "other_core_id", we spin here.
  93. */
  94. result = core_id;
  95. #if (CONFIG_SPIRAM)
  96. if (spinlock_ptr_external_ram(lock)) {
  97. compare_and_set_extram(&lock->owner, SPINLOCK_FREE, &result);
  98. } else {
  99. #endif
  100. compare_and_set_native(&lock->owner, SPINLOCK_FREE, &result);
  101. #if (CONFIG_SPIRAM)
  102. }
  103. #endif
  104. if(result != other_core_id) {
  105. break;
  106. }
  107. if (timeout != SPINLOCK_WAIT_FOREVER) {
  108. uint32_t ccount_now;
  109. ccount_now = cpu_hal_get_cycle_count();
  110. if (ccount_now - ccount_start > (unsigned)timeout) {
  111. XTOS_RESTORE_INTLEVEL(irq_status);
  112. return false;
  113. }
  114. }
  115. }while(1);
  116. /* any other value implies memory corruption or uninitialized mux */
  117. assert(result == core_id || result == SPINLOCK_FREE);
  118. assert((result == SPINLOCK_FREE) == (lock->count == 0)); /* we're first to lock iff count is zero */
  119. assert(lock->count < 0xFF); /* Bad count value implies memory corruption */
  120. lock->count++;
  121. XTOS_RESTORE_INTLEVEL(irq_status);
  122. return true;
  123. #else // !CONFIG_FREERTOS_UNICORE
  124. return true;
  125. #endif
  126. }
  127. /**
  128. * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
  129. *
  130. * This function will:
  131. * - Save current interrupt state, then disable interrupts
  132. * - Release the spinlock
  133. * - Restore interrupt state
  134. *
  135. * @note Spinlocks alone do no constitute true critical sections (as this
  136. * function reenables interrupts once the spinlock is acquired). For critical
  137. * sections, use the interface provided by the operating system.
  138. * @param lock - target, locked before, spinlock object
  139. */
  140. static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
  141. {
  142. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  143. uint32_t irq_status;
  144. uint32_t core_id;
  145. assert(lock);
  146. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  147. RSR(PRID, core_id);
  148. assert(core_id == lock->owner); // This is a mutex we didn't lock, or it's corrupt
  149. lock->count--;
  150. if(!lock->count) {
  151. lock->owner = SPINLOCK_FREE;
  152. } else {
  153. assert(lock->count < 0x100); // Indicates memory corruption
  154. }
  155. XTOS_RESTORE_INTLEVEL(irq_status);
  156. #endif
  157. }
  158. #ifdef __cplusplus
  159. }
  160. #endif