cpu_up.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * Copyright (c) 2006-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024-04-19 Shell Fixup UP irq spinlock
  9. * 2024-05-22 Shell Add UP cpu object and
  10. * maintain the rt_current_thread inside it
  11. */
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. static struct rt_cpu _cpu;
  15. /**
  16. * @brief Initialize a static spinlock object.
  17. *
  18. * @param lock is a pointer to the spinlock to initialize.
  19. */
  20. void rt_spin_lock_init(struct rt_spinlock *lock)
  21. {
  22. RT_UNUSED(lock);
  23. }
  24. /**
  25. * @brief This function will lock the spinlock, will lock the thread scheduler.
  26. *
  27. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  28. * until the spinlock is unlocked.
  29. *
  30. * @param lock is a pointer to the spinlock.
  31. */
  32. void rt_spin_lock(struct rt_spinlock *lock)
  33. {
  34. rt_enter_critical();
  35. RT_SPIN_LOCK_DEBUG(lock);
  36. }
  37. /**
  38. * @brief This function will unlock the spinlock, will unlock the thread scheduler.
  39. *
  40. * @note If the scheduling function is called before unlocking, it will be scheduled in this function.
  41. *
  42. * @param lock is a pointer to the spinlock.
  43. */
  44. void rt_spin_unlock(struct rt_spinlock *lock)
  45. {
  46. rt_base_t critical_level;
  47. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  48. rt_exit_critical_safe(critical_level);
  49. }
  50. /**
  51. * @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
  52. *
  53. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  54. * until the spinlock is unlocked.
  55. *
  56. * @param lock is a pointer to the spinlock.
  57. *
  58. * @return Return current cpu interrupt status.
  59. */
  60. rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
  61. {
  62. rt_base_t level;
  63. RT_UNUSED(lock);
  64. level = rt_hw_interrupt_disable();
  65. rt_enter_critical();
  66. RT_SPIN_LOCK_DEBUG(lock);
  67. return level;
  68. }
  69. /**
  70. * @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
  71. *
  72. * @note If the scheduling function is called before unlocking, it will be scheduled in this function.
  73. *
  74. * @param lock is a pointer to the spinlock.
  75. *
  76. * @param level is interrupt status returned by rt_spin_lock_irqsave().
  77. */
  78. void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
  79. {
  80. rt_base_t critical_level;
  81. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  82. rt_exit_critical_safe(critical_level);
  83. rt_hw_interrupt_enable(level);
  84. }
  85. /**
  86. * @brief This fucntion will return current cpu object.
  87. *
  88. * @return Return a pointer to the current cpu object.
  89. */
  90. struct rt_cpu *rt_cpu_self(void)
  91. {
  92. return &_cpu;
  93. }
  94. /**
  95. * @brief This fucntion will return the cpu object corresponding to index.
  96. *
  97. * @param index is the index of target cpu object.
  98. *
  99. * @return Return a pointer to the cpu object corresponding to index.
  100. */
  101. struct rt_cpu *rt_cpu_index(int index)
  102. {
  103. return index == 0 ? &_cpu : RT_NULL;
  104. }