rt_HAL_CM.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*----------------------------------------------------------------------------
  2. * CMSIS-RTOS - RTX
  3. *----------------------------------------------------------------------------
  4. * Name: RT_HAL_CM.H
  5. * Purpose: Hardware Abstraction Layer for Cortex-M definitions
  6. * Rev.: V4.79
  7. *----------------------------------------------------------------------------
  8. *
  9. * Copyright (c) 1999-2009 KEIL, 2009-2017 ARM Germany GmbH. All rights reserved.
  10. *
  11. * SPDX-License-Identifier: Apache-2.0
  12. *
  13. * Licensed under the Apache License, Version 2.0 (the License); you may
  14. * not use this file except in compliance with the License.
  15. * You may obtain a copy of the License at
  16. *
  17. * www.apache.org/licenses/LICENSE-2.0
  18. *
  19. * Unless required by applicable law or agreed to in writing, software
  20. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  21. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  22. * See the License for the specific language governing permissions and
  23. * limitations under the License.
  24. *---------------------------------------------------------------------------*/
  25. /* Definitions */
  26. #define INITIAL_xPSR 0x01000000U
  27. #define DEMCR_TRCENA 0x01000000U
  28. #define ITM_ITMENA 0x00000001U
  29. #define MAGIC_WORD 0xE25A2EA5U
  30. #define MAGIC_PATTERN 0xCCCCCCCCU
  31. #if defined (__CC_ARM) /* ARM Compiler */
  32. #if ((defined(__TARGET_ARCH_7_M) || defined(__TARGET_ARCH_7E_M)) && !defined(NO_EXCLUSIVE_ACCESS))
  33. #define __USE_EXCLUSIVE_ACCESS
  34. #else
  35. #undef __USE_EXCLUSIVE_ACCESS
  36. #endif
  37. #ifndef __CMSIS_GENERIC
  38. #define __DMB() do {\
  39. __schedule_barrier();\
  40. __dmb(0xF);\
  41. __schedule_barrier();\
  42. } while (0)
  43. #endif
  44. #elif defined (__GNUC__) /* GNU Compiler */
  45. #undef __USE_EXCLUSIVE_ACCESS
  46. #if defined (__CORTEX_M0)
  47. #define __TARGET_ARCH_6S_M
  48. #endif
  49. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  50. #define __TARGET_FPU_VFP
  51. #endif
  52. #define __inline inline
  53. #define __weak __attribute__((weak))
  54. #ifndef __CMSIS_GENERIC
  55. __attribute__((always_inline)) static inline void __enable_irq(void)
  56. {
  57. __asm volatile ("cpsie i");
  58. }
  59. __attribute__((always_inline)) static inline U32 __disable_irq(void)
  60. {
  61. U32 result;
  62. __asm volatile ("mrs %0, primask" : "=r" (result));
  63. __asm volatile ("cpsid i");
  64. return(result & 1);
  65. }
  66. __attribute__((always_inline)) static inline void __DMB(void)
  67. {
  68. __asm volatile ("dmb 0xF":::"memory");
  69. }
  70. #endif
  71. __attribute__(( always_inline)) static inline U8 __clz(U32 value)
  72. {
  73. U8 result;
  74. __asm volatile ("clz %0, %1" : "=r" (result) : "r" (value));
  75. return(result);
  76. }
  77. #elif defined (__ICCARM__) /* IAR Compiler */
  78. #undef __USE_EXCLUSIVE_ACCESS
  79. #if (__CORE__ == __ARM6M__)
  80. #define __TARGET_ARCH_6S_M 1
  81. #endif
  82. #if defined __ARMVFP__
  83. #define __TARGET_FPU_VFP 1
  84. #endif
  85. #define __inline inline
  86. #ifndef __CMSIS_GENERIC
  87. static inline void __enable_irq(void)
  88. {
  89. __asm volatile ("cpsie i");
  90. }
  91. static inline U32 __disable_irq(void)
  92. {
  93. U32 result;
  94. __asm volatile ("mrs %0, primask" : "=r" (result));
  95. __asm volatile ("cpsid i");
  96. return(result & 1);
  97. }
  98. #endif
  99. static inline U8 __clz(U32 value)
  100. {
  101. U8 result;
  102. __asm volatile ("clz %0, %1" : "=r" (result) : "r" (value));
  103. return(result);
  104. }
  105. #endif
  106. /* NVIC registers */
  107. #define NVIC_ST_CTRL (*((volatile U32 *)0xE000E010U))
  108. #define NVIC_ST_RELOAD (*((volatile U32 *)0xE000E014U))
  109. #define NVIC_ST_CURRENT (*((volatile U32 *)0xE000E018U))
  110. #define NVIC_ISER ((volatile U32 *)0xE000E100U)
  111. #define NVIC_ICER ((volatile U32 *)0xE000E180U)
  112. #if defined(__TARGET_ARCH_6S_M)
  113. #define NVIC_IP ((volatile U32 *)0xE000E400U)
  114. #else
  115. #define NVIC_IP ((volatile U8 *)0xE000E400U)
  116. #endif
  117. #define NVIC_INT_CTRL (*((volatile U32 *)0xE000ED04U))
  118. #define NVIC_AIR_CTRL (*((volatile U32 *)0xE000ED0CU))
  119. #define NVIC_SYS_PRI2 (*((volatile U32 *)0xE000ED1CU))
  120. #define NVIC_SYS_PRI3 (*((volatile U32 *)0xE000ED20U))
  121. #define OS_PEND_IRQ() NVIC_INT_CTRL = (1UL<<28)
  122. #define OS_PENDING ((NVIC_INT_CTRL >> 26) & 5U)
  123. #define OS_UNPEND(fl) NVIC_INT_CTRL = (U32)(fl = (U8)OS_PENDING) << 25
  124. #define OS_PEND(fl,p) NVIC_INT_CTRL = (U32)(fl | (U8)(p<<2)) << 26
  125. #define OS_LOCK() NVIC_ST_CTRL = 0x0005U
  126. #define OS_UNLOCK() NVIC_ST_CTRL = 0x0007U
  127. #define OS_X_PENDING ((NVIC_INT_CTRL >> 28) & 1U)
  128. #define OS_X_UNPEND(fl) NVIC_INT_CTRL = (U32)(fl = (U8)OS_X_PENDING) << 27
  129. #define OS_X_PEND(fl,p) NVIC_INT_CTRL = (U32)(fl | p) << 28
  130. #if defined(__TARGET_ARCH_6S_M)
  131. #define OS_X_INIT(n) NVIC_IP[n>>2] |= (U32)0xFFU << ((n & 0x03U) << 3); \
  132. NVIC_ISER[n>>5] = (U32)1U << (n & 0x1FU)
  133. #else
  134. #define OS_X_INIT(n) NVIC_IP[n] = 0xFFU; \
  135. NVIC_ISER[n>>5] = (U32)1U << (n & 0x1FU)
  136. #endif
  137. #define OS_X_LOCK(n) NVIC_ICER[n>>5] = (U32)1U << (n & 0x1FU)
  138. #define OS_X_UNLOCK(n) NVIC_ISER[n>>5] = (U32)1U << (n & 0x1FU)
  139. /* Core Debug registers */
  140. #define DEMCR (*((volatile U32 *)0xE000EDFCU))
  141. /* ITM registers */
  142. #define ITM_CONTROL (*((volatile U32 *)0xE0000E80U))
  143. #define ITM_ENABLE (*((volatile U32 *)0xE0000E00U))
  144. #define ITM_PORT30_U32 (*((volatile U32 *)0xE0000078U))
  145. #define ITM_PORT31_U32 (*((volatile U32 *)0xE000007CU))
  146. #define ITM_PORT31_U16 (*((volatile U16 *)0xE000007CU))
  147. #define ITM_PORT31_U8 (*((volatile U8 *)0xE000007CU))
  148. /* Variables */
  149. extern BIT dbg_msg;
  150. /* Functions */
  151. #ifdef __USE_EXCLUSIVE_ACCESS
  152. #define rt_inc(p) while(__strex((__ldrex(p)+1U),p))
  153. #define rt_dec(p) while(__strex((__ldrex(p)-1U),p))
  154. #else
  155. #define rt_inc(p) __disable_irq();(*p)++;__enable_irq();
  156. #define rt_dec(p) __disable_irq();(*p)--;__enable_irq();
  157. #endif
  158. __inline static U32 rt_inc_qi (U32 size, U8 *count, U8 *first) {
  159. U32 cnt,c2;
  160. #ifdef __USE_EXCLUSIVE_ACCESS
  161. do {
  162. if ((cnt = __ldrex(count)) == size) {
  163. __clrex();
  164. return (cnt); }
  165. } while (__strex(cnt+1U, count));
  166. do {
  167. c2 = (cnt = __ldrex(first)) + 1U;
  168. if (c2 == size) { c2 = 0U; }
  169. } while (__strex(c2, first));
  170. #else
  171. __disable_irq();
  172. if ((cnt = *count) < size) {
  173. *count = (U8)(cnt+1U);
  174. c2 = (cnt = *first) + 1U;
  175. if (c2 == size) { c2 = 0U; }
  176. *first = (U8)c2;
  177. }
  178. __enable_irq ();
  179. #endif
  180. return (cnt);
  181. }
  182. __inline static void rt_systick_init (void) {
  183. NVIC_ST_RELOAD = os_trv;
  184. NVIC_ST_CURRENT = 0U;
  185. NVIC_ST_CTRL = 0x0007U;
  186. NVIC_SYS_PRI3 |= 0xFF000000U;
  187. }
  188. __inline static U32 rt_systick_val (void) {
  189. return (os_trv - NVIC_ST_CURRENT);
  190. }
  191. __inline static U32 rt_systick_ovf (void) {
  192. return ((NVIC_INT_CTRL >> 26) & 1U);
  193. }
  194. __inline static void rt_svc_init (void) {
  195. #if !defined(__TARGET_ARCH_6S_M)
  196. U32 sh,prigroup;
  197. #endif
  198. NVIC_SYS_PRI3 |= 0x00FF0000U;
  199. #if defined(__TARGET_ARCH_6S_M)
  200. NVIC_SYS_PRI2 |= (NVIC_SYS_PRI3<<(8+1)) & 0xFC000000U;
  201. #else
  202. sh = 8U - __clz(~((NVIC_SYS_PRI3 << 8) & 0xFF000000U));
  203. prigroup = ((NVIC_AIR_CTRL >> 8) & 0x07U);
  204. if (prigroup >= sh) {
  205. sh = prigroup + 1U;
  206. }
  207. NVIC_SYS_PRI2 = ((0xFEFFFFFFU << sh) & 0xFF000000U) | (NVIC_SYS_PRI2 & 0x00FFFFFFU);
  208. #endif
  209. }
  210. extern void rt_set_PSP (U32 stack);
  211. extern U32 rt_get_PSP (void);
  212. extern void os_set_env (void);
  213. extern void *_alloc_box (void *box_mem);
  214. extern U32 _free_box (void *box_mem, void *box);
  215. extern void rt_init_stack (P_TCB p_TCB, FUNCP task_body);
  216. extern void rt_ret_val (P_TCB p_TCB, U32 v0);
  217. extern void rt_ret_val2 (P_TCB p_TCB, U32 v0, U32 v1);
  218. extern void dbg_init (void);
  219. extern void dbg_task_notify (P_TCB p_tcb, BOOL create);
  220. extern void dbg_task_switch (U32 task_id);
  221. #ifdef DBG_MSG
  222. #define DBG_INIT() dbg_init()
  223. #define DBG_TASK_NOTIFY(p_tcb,create) if (dbg_msg) dbg_task_notify(p_tcb,create)
  224. #define DBG_TASK_SWITCH(task_id) if (dbg_msg && (os_tsk.next!=os_tsk.run)) \
  225. dbg_task_switch(task_id)
  226. #else
  227. #define DBG_INIT()
  228. #define DBG_TASK_NOTIFY(p_tcb,create)
  229. #define DBG_TASK_SWITCH(task_id)
  230. #endif
  231. /*----------------------------------------------------------------------------
  232. * end of file
  233. *---------------------------------------------------------------------------*/