spinlock.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #pragma once
  7. #include <stdint.h>
  8. #include <stdbool.h>
  9. #include "sdkconfig.h"
  10. #include "soc/cpu.h"
  11. #include "hal/cpu_hal.h"
  12. #include "soc/compare_set.h"
  13. #if __XTENSA__
  14. #include "xtensa/xtruntime.h"
  15. #endif
  16. #ifdef __cplusplus
  17. extern "C" {
  18. #endif
  19. #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
  20. #define NEED_VOLATILE_MUX volatile
  21. #else
  22. #define NEED_VOLATILE_MUX
  23. #endif
  24. #define SPINLOCK_FREE 0xB33FFFFF
  25. #define SPINLOCK_WAIT_FOREVER (-1)
  26. #define SPINLOCK_NO_WAIT 0
  27. #define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
  28. #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
  29. typedef struct {
  30. NEED_VOLATILE_MUX uint32_t owner;
  31. NEED_VOLATILE_MUX uint32_t count;
  32. }spinlock_t;
  33. /**
  34. * @brief Initialize a lock to its default state - unlocked
  35. * @param lock - spinlock object to initialize
  36. */
  37. static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
  38. {
  39. assert(lock);
  40. #if !CONFIG_FREERTOS_UNICORE
  41. lock->owner = SPINLOCK_FREE;
  42. lock->count = 0;
  43. #endif
  44. }
  45. /**
  46. * @brief Top level spinlock acquire function, spins until get the lock
  47. * @param lock - target spinlock object
  48. * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
  49. */
  50. static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
  51. {
  52. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  53. uint32_t result;
  54. uint32_t irq_status;
  55. uint32_t ccount_start;
  56. uint32_t core_id, other_core_id;
  57. assert(lock);
  58. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  59. if(timeout != SPINLOCK_WAIT_FOREVER){
  60. RSR(CCOUNT, ccount_start);
  61. }
  62. /*spin until we own a core */
  63. RSR(PRID, core_id);
  64. /* Note: coreID is the full 32 bit core ID (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) */
  65. other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
  66. do {
  67. /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
  68. * CORE_ID_REGVAL_APP:
  69. * - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
  70. * - If "our" core_id, we can drop through immediately.
  71. * - If "other_core_id", we spin here.
  72. */
  73. result = core_id;
  74. #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
  75. if (esp_ptr_external_ram(lock)) {
  76. compare_and_set_extram(&lock->owner, SPINLOCK_FREE, &result);
  77. } else {
  78. #endif
  79. compare_and_set_native(&lock->owner, SPINLOCK_FREE, &result);
  80. #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
  81. }
  82. #endif
  83. if(result != other_core_id) {
  84. break;
  85. }
  86. if (timeout != SPINLOCK_WAIT_FOREVER) {
  87. uint32_t ccount_now;
  88. ccount_now = cpu_hal_get_cycle_count();
  89. if (ccount_now - ccount_start > (unsigned)timeout) {
  90. XTOS_RESTORE_INTLEVEL(irq_status);
  91. return false;
  92. }
  93. }
  94. }while(1);
  95. /* any other value implies memory corruption or uninitialized mux */
  96. assert(result == core_id || result == SPINLOCK_FREE);
  97. assert((result == SPINLOCK_FREE) == (lock->count == 0)); /* we're first to lock iff count is zero */
  98. assert(lock->count < 0xFF); /* Bad count value implies memory corruption */
  99. lock->count++;
  100. XTOS_RESTORE_INTLEVEL(irq_status);
  101. return true;
  102. #else // !CONFIG_FREERTOS_UNICORE
  103. return true;
  104. #endif
  105. }
  106. /**
  107. * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
  108. * @param lock - target, locked before, spinlock object
  109. */
  110. static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
  111. {
  112. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  113. uint32_t irq_status;
  114. uint32_t core_id;
  115. assert(lock);
  116. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  117. RSR(PRID, core_id);
  118. assert(core_id == lock->owner); // This is a mutex we didn't lock, or it's corrupt
  119. lock->count--;
  120. if(!lock->count) {
  121. lock->owner = SPINLOCK_FREE;
  122. } else {
  123. assert(lock->count < 0x100); // Indicates memory corruption
  124. }
  125. XTOS_RESTORE_INTLEVEL(irq_status);
  126. #endif
  127. }
  128. #ifdef __cplusplus
  129. }
  130. #endif