rthw.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-18 Bernard the first version
  9. * 2006-04-25 Bernard add rt_hw_context_switch_interrupt declaration
  10. * 2006-09-24 Bernard add rt_hw_context_switch_to declaration
  11. * 2012-12-29 Bernard add rt_hw_exception_install declaration
  12. * 2017-10-17 Hichard add some macros
  13. * 2018-11-17 Jesven add rt_hw_spinlock_t
  14. * add smp support
  15. * 2019-05-18 Bernard add empty definition for not enable cache case
  16. * 2023-10-16 Shell Support a new backtrace framework
  17. */
  18. #ifndef __RT_HW_H__
  19. #define __RT_HW_H__
  20. #include <rtdef.h>
  21. #if defined (RT_USING_CACHE) || defined(RT_USING_SMP)
  22. #include <cpuport.h> /* include spinlock, cache ops, etc. */
  23. #endif
  24. #ifdef __cplusplus
  25. extern "C" {
  26. #endif
  27. /*
  28. * Some macros define
  29. */
  30. #ifndef HWREG64
  31. #define HWREG64(x) (*((volatile rt_uint64_t *)(x)))
  32. #endif
  33. #ifndef HWREG32
  34. #define HWREG32(x) (*((volatile rt_uint32_t *)(x)))
  35. #endif
  36. #ifndef HWREG16
  37. #define HWREG16(x) (*((volatile rt_uint16_t *)(x)))
  38. #endif
  39. #ifndef HWREG8
  40. #define HWREG8(x) (*((volatile rt_uint8_t *)(x)))
  41. #endif
  42. #ifndef RT_CPU_CACHE_LINE_SZ
  43. #define RT_CPU_CACHE_LINE_SZ 32
  44. #endif
  45. enum RT_HW_CACHE_OPS
  46. {
  47. RT_HW_CACHE_FLUSH = 0x01,
  48. RT_HW_CACHE_INVALIDATE = 0x02,
  49. };
  50. /*
  51. * CPU interfaces
  52. */
  53. #ifdef RT_USING_CACHE
  54. #ifdef RT_USING_SMART
  55. #include <cache.h>
  56. #endif
  57. void rt_hw_cpu_icache_enable(void);
  58. void rt_hw_cpu_icache_disable(void);
  59. rt_base_t rt_hw_cpu_icache_status(void);
  60. void rt_hw_cpu_icache_ops(int ops, void* addr, int size);
  61. void rt_hw_cpu_dcache_enable(void);
  62. void rt_hw_cpu_dcache_disable(void);
  63. rt_base_t rt_hw_cpu_dcache_status(void);
  64. void rt_hw_cpu_dcache_ops(int ops, void* addr, int size);
  65. #else
  66. /* define cache ops as empty */
  67. #define rt_hw_cpu_icache_enable(...)
  68. #define rt_hw_cpu_icache_disable(...)
  69. #define rt_hw_cpu_icache_ops(...)
  70. #define rt_hw_cpu_dcache_enable(...)
  71. #define rt_hw_cpu_dcache_disable(...)
  72. #define rt_hw_cpu_dcache_ops(...)
  73. #define rt_hw_cpu_icache_status(...) 0
  74. #define rt_hw_cpu_dcache_status(...) 0
  75. #endif
  76. void rt_hw_cpu_reset(void);
  77. void rt_hw_cpu_shutdown(void);
  78. const char *rt_hw_cpu_arch(void);
  79. rt_uint8_t *rt_hw_stack_init(void *entry,
  80. void *parameter,
  81. rt_uint8_t *stack_addr,
  82. void *exit);
  83. /*
  84. * Interrupt handler definition
  85. */
  86. typedef void (*rt_isr_handler_t)(int vector, void *param);
  87. struct rt_irq_desc
  88. {
  89. rt_isr_handler_t handler;
  90. void *param;
  91. #ifdef RT_USING_INTERRUPT_INFO
  92. char name[RT_NAME_MAX];
  93. rt_uint32_t counter;
  94. #endif
  95. };
  96. /*
  97. * Interrupt interfaces
  98. */
  99. void rt_hw_interrupt_init(void);
  100. void rt_hw_interrupt_mask(int vector);
  101. void rt_hw_interrupt_umask(int vector);
  102. rt_isr_handler_t rt_hw_interrupt_install(int vector,
  103. rt_isr_handler_t handler,
  104. void *param,
  105. const char *name);
  106. #ifdef RT_USING_SMP
  107. rt_base_t rt_hw_local_irq_disable();
  108. void rt_hw_local_irq_enable(rt_base_t level);
  109. #define rt_hw_interrupt_disable rt_cpus_lock
  110. #define rt_hw_interrupt_enable rt_cpus_unlock
  111. #else
  112. rt_base_t rt_hw_interrupt_disable(void);
  113. void rt_hw_interrupt_enable(rt_base_t level);
  114. #endif /*RT_USING_SMP*/
  115. rt_bool_t rt_hw_interrupt_is_disabled(void);
  116. /*
  117. * Context interfaces
  118. */
  119. #ifdef RT_USING_SMP
  120. void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
  121. void rt_hw_context_switch_to(rt_ubase_t to, struct rt_thread *to_thread);
  122. void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
  123. #else
  124. void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
  125. void rt_hw_context_switch_to(rt_ubase_t to);
  126. void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread);
  127. #endif /*RT_USING_SMP*/
  128. /**
  129. * Hardware Layer Backtrace Service
  130. */
  131. struct rt_hw_backtrace_frame {
  132. rt_base_t fp;
  133. rt_base_t pc;
  134. };
  135. rt_err_t rt_hw_backtrace_frame_get(rt_thread_t thread, struct rt_hw_backtrace_frame *frame);
  136. rt_err_t rt_hw_backtrace_frame_unwind(rt_thread_t thread, struct rt_hw_backtrace_frame *frame);
  137. void rt_hw_console_output(const char *str);
  138. void rt_hw_show_memory(rt_uint32_t addr, rt_size_t size);
  139. /*
  140. * Exception interfaces
  141. */
  142. void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context));
  143. /*
  144. * delay interfaces
  145. */
  146. void rt_hw_us_delay(rt_uint32_t us);
  147. int rt_hw_cpu_id(void);
  148. #if defined(RT_USING_SMP) || defined(RT_USING_AMP)
  149. /**
  150. * ipi function
  151. */
  152. void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask);
  153. #endif
  154. #ifdef RT_USING_SMP
  155. struct rt_spinlock
  156. {
  157. rt_hw_spinlock_t lock;
  158. };
  159. void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock);
  160. void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
  161. void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);
  162. extern rt_hw_spinlock_t _cpus_lock;
  163. extern rt_hw_spinlock_t _rt_critical_lock;
  164. #define __RT_HW_SPIN_LOCK_INITIALIZER(lockname) {0}
  165. #define __RT_HW_SPIN_LOCK_UNLOCKED(lockname) \
  166. (rt_hw_spinlock_t) __RT_HW_SPIN_LOCK_INITIALIZER(lockname)
  167. #define RT_DEFINE_SPINLOCK(x) rt_hw_spinlock_t x = __RT_HW_SPIN_LOCK_UNLOCKED(x)
  168. #define RT_DECLARE_SPINLOCK(x)
  169. /**
  170. * boot secondary cpu
  171. */
  172. void rt_hw_secondary_cpu_up(void);
  173. /**
  174. * secondary cpu idle function
  175. */
  176. void rt_hw_secondary_cpu_idle_exec(void);
  177. #else
  178. #define RT_DEFINE_SPINLOCK(x) rt_ubase_t x
  179. #define RT_DECLARE_SPINLOCK(x)
  180. #define rt_hw_spin_lock(lock) *(lock) = rt_hw_interrupt_disable()
  181. #define rt_hw_spin_unlock(lock) rt_hw_interrupt_enable(*(lock))
  182. typedef rt_ubase_t rt_spinlock_t;
  183. struct rt_spinlock
  184. {
  185. rt_spinlock_t lock;
  186. };
  187. #endif
  188. #ifndef RT_USING_CACHE
  189. #define rt_hw_isb()
  190. #define rt_hw_dmb()
  191. #define rt_hw_dsb()
  192. #endif
  193. #ifdef __cplusplus
  194. }
  195. #endif
  196. #endif