| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175 |
- /*
- * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
- *
- * SPDX-License-Identifier: Apache-2.0
- */
- #pragma once
- #include "sdkconfig.h"
- #include <stdint.h>
- #include <stdbool.h>
- #include "esp_cpu.h"
- #if __XTENSA__
- #include "xtensa/xtruntime.h"
- #include "xt_utils.h"
- #endif
- #ifdef __cplusplus
- extern "C" {
- #endif
- #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
- #define NEED_VOLATILE_MUX volatile
- #else
- #define NEED_VOLATILE_MUX
- #endif
- #define SPINLOCK_FREE 0xB33FFFFF
- #define SPINLOCK_WAIT_FOREVER (-1)
- #define SPINLOCK_NO_WAIT 0
- #define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
- #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
- typedef struct {
- NEED_VOLATILE_MUX uint32_t owner;
- NEED_VOLATILE_MUX uint32_t count;
- } spinlock_t;
- /**
- * @brief Initialize a lock to its default state - unlocked
- * @param lock - spinlock object to initialize
- */
- static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
- {
- assert(lock);
- #if !CONFIG_FREERTOS_UNICORE
- lock->owner = SPINLOCK_FREE;
- lock->count = 0;
- #endif
- }
- /**
- * @brief Top level spinlock acquire function, spins until get the lock
- *
- * This function will:
- * - Save current interrupt state, then disable interrupts
- * - Spin until lock is acquired or until timeout occurs
- * - Restore interrupt state
- *
- * @note Spinlocks alone do no constitute true critical sections (as this
- * function reenables interrupts once the spinlock is acquired). For critical
- * sections, use the interface provided by the operating system.
- * @param lock - target spinlock object
- * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
- */
- static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
- {
- #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
- uint32_t irq_status;
- uint32_t core_id, other_core_id;
- bool lock_set;
- esp_cpu_cycle_count_t start_count;
- assert(lock);
- irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
- // Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
- core_id = xt_utils_get_raw_core_id();
- other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
- /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
- * CORE_ID_REGVAL_APP:
- * - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
- * - If "our" core_id, we can drop through immediately.
- * - If "other_core_id", we spin here.
- */
- // The caller is already the owner of the lock. Simply increment the nesting count
- if (lock->owner == core_id) {
- assert(lock->count > 0 && lock->count < 0xFF); // Bad count value implies memory corruption
- lock->count++;
- XTOS_RESTORE_INTLEVEL(irq_status);
- return true;
- }
- /* First attempt to take the lock.
- *
- * Note: We do a first attempt separately (instead of putting this into a loop) in order to avoid call to
- * esp_cpu_get_cycle_count(). This doing a first attempt separately makes acquiring a free lock quicker, which
- * is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
- * aren't meant to be held for long).
- */
- lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
- if (lock_set || timeout == SPINLOCK_NO_WAIT) {
- // We've successfully taken the lock, or we are not retrying
- goto exit;
- }
- // First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
- start_count = esp_cpu_get_cycle_count();
- do {
- lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
- if (lock_set) {
- break;
- }
- // Keep looping if we are waiting forever, or check if we have timed out
- } while ((timeout == SPINLOCK_WAIT_FOREVER) || (esp_cpu_get_cycle_count() - start_count) <= timeout);
- exit:
- if (lock_set) {
- assert(lock->owner == core_id);
- assert(lock->count == 0); // This is the first time the lock is set, so count should still be 0
- lock->count++; // Finally, we increment the lock count
- } else { // We timed out waiting for lock
- assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_id);
- assert(lock->count < 0xFF); // Bad count value implies memory corruption
- }
- XTOS_RESTORE_INTLEVEL(irq_status);
- return lock_set;
- #else // !CONFIG_FREERTOS_UNICORE
- return true;
- #endif
- }
- /**
- * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
- *
- * This function will:
- * - Save current interrupt state, then disable interrupts
- * - Release the spinlock
- * - Restore interrupt state
- *
- * @note Spinlocks alone do no constitute true critical sections (as this
- * function reenables interrupts once the spinlock is acquired). For critical
- * sections, use the interface provided by the operating system.
- * @param lock - target, locked before, spinlock object
- */
- static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
- {
- #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
- uint32_t irq_status;
- uint32_t core_id;
- assert(lock);
- irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
- core_id = xt_utils_get_raw_core_id();
- assert(core_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
- lock->count--;
- if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
- lock->owner = SPINLOCK_FREE;
- } else {
- assert(lock->count < 0x100); // Indicates memory corruption
- }
- XTOS_RESTORE_INTLEVEL(irq_status);
- #endif
- }
- #ifdef __cplusplus
- }
- #endif
|