cpu_up.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /*
  2. * Copyright (c) 2006-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024-04-19 Shell Fixup UP irq spinlock
  9. * 2024-05-22 Shell Add UP cpu object and
  10. * maintain the rt_current_thread inside it
  11. */
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. static struct rt_cpu _cpu;
  15. /**
  16. * @addtogroup group_thread_comm
  17. *
  18. * @cond
  19. *
  20. * @{
  21. */
  22. /**
  23. * @brief Initialize a static spinlock object.
  24. *
  25. * @param lock is a pointer to the spinlock to initialize.
  26. */
  27. void rt_spin_lock_init(struct rt_spinlock *lock)
  28. {
  29. RT_UNUSED(lock);
  30. }
  31. /**
  32. * @brief This function will lock the spinlock, will lock the thread scheduler.
  33. *
  34. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  35. * until the spinlock is unlocked.
  36. *
  37. * @param lock is a pointer to the spinlock.
  38. */
  39. void rt_spin_lock(struct rt_spinlock *lock)
  40. {
  41. rt_enter_critical();
  42. RT_SPIN_LOCK_DEBUG(lock);
  43. }
  44. /**
  45. * @brief This function will unlock the spinlock, will unlock the thread scheduler.
  46. *
  47. * @note If the scheduling function is called before unlocking, it will be scheduled in this function.
  48. *
  49. * @param lock is a pointer to the spinlock.
  50. */
  51. void rt_spin_unlock(struct rt_spinlock *lock)
  52. {
  53. rt_base_t critical_level;
  54. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  55. rt_exit_critical_safe(critical_level);
  56. }
  57. /**
  58. * @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
  59. *
  60. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  61. * until the spinlock is unlocked.
  62. *
  63. * @param lock is a pointer to the spinlock.
  64. *
  65. * @return Return current cpu interrupt status.
  66. */
  67. rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
  68. {
  69. rt_base_t level;
  70. RT_UNUSED(lock);
  71. level = rt_hw_interrupt_disable();
  72. rt_enter_critical();
  73. RT_SPIN_LOCK_DEBUG(lock);
  74. return level;
  75. }
  76. /**
  77. * @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
  78. *
  79. * @note If the scheduling function is called before unlocking, it will be scheduled in this function.
  80. *
  81. * @param lock is a pointer to the spinlock.
  82. *
  83. * @param level is interrupt status returned by rt_spin_lock_irqsave().
  84. */
  85. void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
  86. {
  87. rt_base_t critical_level;
  88. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  89. rt_exit_critical_safe(critical_level);
  90. rt_hw_interrupt_enable(level);
  91. }
  92. /**
  93. * @brief This fucntion will return current cpu object.
  94. *
  95. * @return Return a pointer to the current cpu object.
  96. */
  97. struct rt_cpu *rt_cpu_self(void)
  98. {
  99. return &_cpu;
  100. }
  101. /**
  102. * @brief This fucntion will return the cpu object corresponding to index.
  103. *
  104. * @param index is the index of target cpu object.
  105. *
  106. * @return Return a pointer to the cpu object corresponding to index.
  107. */
  108. struct rt_cpu *rt_cpu_index(int index)
  109. {
  110. return index == 0 ? &_cpu : RT_NULL;
  111. }
  112. /**
  113. * @}
  114. *
  115. * @endcond
  116. */