spinlock.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. // Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <stdint.h>
  16. #include <stdbool.h>
  17. #include "sdkconfig.h"
  18. #include "soc/cpu.h"
  19. #include "hal/cpu_hal.h"
  20. #include "soc/soc_memory_layout.h"
  21. #include "soc/compare_set.h"
  22. #if __XTENSA__
  23. #include "xtensa/xtruntime.h"
  24. #endif
  25. #ifdef __cplusplus
  26. extern "C" {
  27. #endif
  28. #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
  29. #define NEED_VOLATILE_MUX volatile
  30. #else
  31. #define NEED_VOLATILE_MUX
  32. #endif
  33. #define SPINLOCK_FREE 0xB33FFFFF
  34. #define SPINLOCK_WAIT_FOREVER (-1)
  35. #define SPINLOCK_NO_WAIT 0
  36. #define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
  37. #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
  38. typedef struct {
  39. NEED_VOLATILE_MUX uint32_t owner;
  40. NEED_VOLATILE_MUX uint32_t count;
  41. }spinlock_t;
  42. /**
  43. * @brief Initialize a lock to its default state - unlocked
  44. * @param lock - spinlock object to initialize
  45. */
  46. static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
  47. {
  48. assert(lock);
  49. #if !CONFIG_FREERTOS_UNICORE
  50. lock->owner = SPINLOCK_FREE;
  51. lock->count = 0;
  52. #endif
  53. }
  54. /**
  55. * @brief Top level spinlock acquire function, spins until get the lock
  56. * @param lock - target spinlock object
  57. * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
  58. */
  59. static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
  60. {
  61. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  62. uint32_t result;
  63. uint32_t irq_status;
  64. uint32_t ccount_start;
  65. uint32_t core_id, other_core_id;
  66. assert(lock);
  67. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  68. if(timeout != SPINLOCK_WAIT_FOREVER){
  69. RSR(CCOUNT, ccount_start);
  70. }
  71. /*spin until we own a core */
  72. RSR(PRID, core_id);
  73. /* Note: coreID is the full 32 bit core ID (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) */
  74. other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
  75. do {
  76. /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
  77. * CORE_ID_REGVAL_APP:
  78. * - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
  79. * - If "our" core_id, we can drop through immediately.
  80. * - If "other_core_id", we spin here.
  81. */
  82. result = core_id;
  83. #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
  84. if (esp_ptr_external_ram(lock)) {
  85. compare_and_set_extram(&lock->owner, SPINLOCK_FREE, &result);
  86. } else {
  87. #endif
  88. compare_and_set_native(&lock->owner, SPINLOCK_FREE, &result);
  89. #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
  90. }
  91. #endif
  92. if(result != other_core_id) {
  93. break;
  94. }
  95. if (timeout != SPINLOCK_WAIT_FOREVER) {
  96. uint32_t ccount_now;
  97. ccount_now = cpu_hal_get_cycle_count();
  98. if (ccount_now - ccount_start > (unsigned)timeout) {
  99. XTOS_RESTORE_INTLEVEL(irq_status);
  100. return false;
  101. }
  102. }
  103. }while(1);
  104. /* any other value implies memory corruption or uninitialized mux */
  105. assert(result == core_id || result == SPINLOCK_FREE);
  106. assert((result == SPINLOCK_FREE) == (lock->count == 0)); /* we're first to lock iff count is zero */
  107. assert(lock->count < 0xFF); /* Bad count value implies memory corruption */
  108. lock->count++;
  109. XTOS_RESTORE_INTLEVEL(irq_status);
  110. return true;
  111. #else // !CONFIG_FREERTOS_UNICORE
  112. return true;
  113. #endif
  114. }
  115. /**
  116. * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
  117. * @param lock - target, locked before, spinlock object
  118. */
  119. static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
  120. {
  121. #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
  122. uint32_t irq_status;
  123. uint32_t core_id;
  124. assert(lock);
  125. irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
  126. RSR(PRID, core_id);
  127. assert(core_id == lock->owner); // This is a mutex we didn't lock, or it's corrupt
  128. lock->count--;
  129. if(!lock->count) {
  130. lock->owner = SPINLOCK_FREE;
  131. } else {
  132. assert(lock->count < 0x100); // Indicates memory corruption
  133. }
  134. XTOS_RESTORE_INTLEVEL(irq_status);
  135. #endif
  136. }
  137. #ifdef __cplusplus
  138. }
  139. #endif