spinlock.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #pragma once
  7. #include <stdint.h>
  8. #include <stdbool.h>
  9. #include "sdkconfig.h"
  10. #include "soc/cpu.h"
  11. #include "hal/cpu_hal.h"
  12. #include "soc/compare_set.h"
  13. #if __XTENSA__
  14. #include "xtensa/xtruntime.h"
  15. #endif
  16. #ifdef __cplusplus
  17. extern "C" {
  18. #endif
  19. #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
  20. #define NEED_VOLATILE_MUX volatile
  21. #else
  22. #define NEED_VOLATILE_MUX
  23. #endif
  24. #define SPINLOCK_FREE 0xB33FFFFF
  25. #define SPINLOCK_WAIT_FOREVER (-1)
  26. #define SPINLOCK_NO_WAIT 0
  27. #define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
  28. #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
  29. typedef struct {
  30. NEED_VOLATILE_MUX uint32_t owner;
  31. NEED_VOLATILE_MUX uint32_t count;
  32. }spinlock_t;
  33. /**
  34. * @brief Initialize a lock to its default state - unlocked
  35. * @param lock - spinlock object to initialize
  36. */
  37. static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
  38. {
  39. assert(lock);
  40. #if !CONFIG_FREERTOS_UNICORE
  41. lock->owner = SPINLOCK_FREE;
  42. lock->count = 0;
  43. #endif
  44. }
  45. /**
  46. * @brief Top level spinlock acquire function, spins until get the lock
  47. *
  48. * This function will:
  49. * - Save current interrupt state, then disable interrupts
  50. * - Spin until lock is acquired or until timeout occurs
  51. * - Restore interrupt state
  52. *
  53. * @note Spinlocks alone do no constitute true critical sections (as this
  54. * function reenables interrupts once the spinlock is acquired). For critical
  55. * sections, use the interface provided by the operating system.
  56. * @param lock - target spinlock object
  57. * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
  58. */
  59. static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
  60. {
  61. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  62. uint32_t result;
  63. uint32_t irq_status;
  64. uint32_t ccount_start;
  65. uint32_t core_id, other_core_id;
  66. assert(lock);
  67. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  68. if(timeout != SPINLOCK_WAIT_FOREVER){
  69. RSR(CCOUNT, ccount_start);
  70. }
  71. /*spin until we own a core */
  72. RSR(PRID, core_id);
  73. /* Note: coreID is the full 32 bit core ID (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) */
  74. other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
  75. do {
  76. /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
  77. * CORE_ID_REGVAL_APP:
  78. * - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
  79. * - If "our" core_id, we can drop through immediately.
  80. * - If "other_core_id", we spin here.
  81. */
  82. result = core_id;
  83. #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
  84. if (esp_ptr_external_ram(lock)) {
  85. compare_and_set_extram(&lock->owner, SPINLOCK_FREE, &result);
  86. } else {
  87. #endif
  88. compare_and_set_native(&lock->owner, SPINLOCK_FREE, &result);
  89. #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
  90. }
  91. #endif
  92. if(result != other_core_id) {
  93. break;
  94. }
  95. if (timeout != SPINLOCK_WAIT_FOREVER) {
  96. uint32_t ccount_now;
  97. ccount_now = cpu_hal_get_cycle_count();
  98. if (ccount_now - ccount_start > (unsigned)timeout) {
  99. XTOS_RESTORE_INTLEVEL(irq_status);
  100. return false;
  101. }
  102. }
  103. }while(1);
  104. /* any other value implies memory corruption or uninitialized mux */
  105. assert(result == core_id || result == SPINLOCK_FREE);
  106. assert((result == SPINLOCK_FREE) == (lock->count == 0)); /* we're first to lock iff count is zero */
  107. assert(lock->count < 0xFF); /* Bad count value implies memory corruption */
  108. lock->count++;
  109. XTOS_RESTORE_INTLEVEL(irq_status);
  110. return true;
  111. #else // !CONFIG_FREERTOS_UNICORE
  112. return true;
  113. #endif
  114. }
  115. /**
  116. * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
  117. *
  118. * This function will:
  119. * - Save current interrupt state, then disable interrupts
  120. * - Release the spinlock
  121. * - Restore interrupt state
  122. *
  123. * @note Spinlocks alone do no constitute true critical sections (as this
  124. * function reenables interrupts once the spinlock is acquired). For critical
  125. * sections, use the interface provided by the operating system.
  126. * @param lock - target, locked before, spinlock object
  127. */
  128. static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
  129. {
  130. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  131. uint32_t irq_status;
  132. uint32_t core_id;
  133. assert(lock);
  134. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  135. RSR(PRID, core_id);
  136. assert(core_id == lock->owner); // This is a mutex we didn't lock, or it's corrupt
  137. lock->count--;
  138. if(!lock->count) {
  139. lock->owner = SPINLOCK_FREE;
  140. } else {
  141. assert(lock->count < 0x100); // Indicates memory corruption
  142. }
  143. XTOS_RESTORE_INTLEVEL(irq_status);
  144. #endif
  145. }
  146. #ifdef __cplusplus
  147. }
  148. #endif