cmsis_gcc.h 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. /**************************************************************************//**
  2. * @file cmsis_gcc.h
  3. * @brief CMSIS compiler GCC header file
  4. * @version V1.3.3
  5. * @date 13. November 2022
  6. ******************************************************************************/
  7. /*
  8. * Copyright (c) 2009-2022 Arm Limited. All rights reserved.
  9. *
  10. * SPDX-License-Identifier: Apache-2.0
  11. *
  12. * Licensed under the Apache License, Version 2.0 (the License); you may
  13. * not use this file except in compliance with the License.
  14. * You may obtain a copy of the License at
  15. *
  16. * www.apache.org/licenses/LICENSE-2.0
  17. *
  18. * Unless required by applicable law or agreed to in writing, software
  19. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  20. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  21. * See the License for the specific language governing permissions and
  22. * limitations under the License.
  23. */
  24. #ifndef __CMSIS_GCC_H
  25. #define __CMSIS_GCC_H
  26. /* ignore some GCC warnings */
  27. #pragma GCC diagnostic push
  28. #pragma GCC diagnostic ignored "-Wsign-conversion"
  29. #pragma GCC diagnostic ignored "-Wconversion"
  30. #pragma GCC diagnostic ignored "-Wunused-parameter"
  31. /* Fallback for __has_builtin */
  32. #ifndef __has_builtin
  33. #define __has_builtin(x) (0)
  34. #endif
  35. /* CMSIS compiler specific defines */
  36. #ifndef __ASM
  37. #define __ASM __asm
  38. #endif
  39. #ifndef __INLINE
  40. #define __INLINE inline
  41. #endif
  42. #ifndef __FORCEINLINE
  43. #define __FORCEINLINE __attribute__((always_inline))
  44. #endif
  45. #ifndef __STATIC_INLINE
  46. #define __STATIC_INLINE static inline
  47. #endif
  48. #ifndef __STATIC_FORCEINLINE
  49. #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
  50. #endif
  51. #ifndef __NO_RETURN
  52. #define __NO_RETURN __attribute__((__noreturn__))
  53. #endif
  54. #ifndef CMSIS_DEPRECATED
  55. #define CMSIS_DEPRECATED __attribute__((deprecated))
  56. #endif
  57. #ifndef __USED
  58. #define __USED __attribute__((used))
  59. #endif
  60. #ifndef __WEAK
  61. #define __WEAK __attribute__((weak))
  62. #endif
  63. #ifndef __PACKED
  64. #define __PACKED __attribute__((packed, aligned(1)))
  65. #endif
  66. #ifndef __PACKED_STRUCT
  67. #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
  68. #endif
  69. #ifndef __UNALIGNED_UINT16_WRITE
  70. #pragma GCC diagnostic push
  71. #pragma GCC diagnostic ignored "-Wpacked"
  72. #pragma GCC diagnostic ignored "-Wattributes"
  73. __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
  74. #pragma GCC diagnostic pop
  75. #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
  76. #endif
  77. #ifndef __UNALIGNED_UINT16_READ
  78. #pragma GCC diagnostic push
  79. #pragma GCC diagnostic ignored "-Wpacked"
  80. #pragma GCC diagnostic ignored "-Wattributes"
  81. __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
  82. #pragma GCC diagnostic pop
  83. #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
  84. #endif
  85. #ifndef __UNALIGNED_UINT32_WRITE
  86. #pragma GCC diagnostic push
  87. #pragma GCC diagnostic ignored "-Wpacked"
  88. #pragma GCC diagnostic ignored "-Wattributes"
  89. __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
  90. #pragma GCC diagnostic pop
  91. #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
  92. #endif
  93. #ifndef __UNALIGNED_UINT32_READ
  94. #pragma GCC diagnostic push
  95. #pragma GCC diagnostic ignored "-Wpacked"
  96. #pragma GCC diagnostic ignored "-Wattributes"
  97. __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
  98. #pragma GCC diagnostic pop
  99. #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
  100. #endif
  101. #ifndef __ALIGNED
  102. #define __ALIGNED(x) __attribute__((aligned(x)))
  103. #endif
  104. #ifndef __RESTRICT
  105. #define __RESTRICT __restrict
  106. #endif
  107. #ifndef __COMPILER_BARRIER
  108. #define __COMPILER_BARRIER() __ASM volatile("":::"memory")
  109. #endif
  110. /* ########################## Core Instruction Access ######################### */
  111. /**
  112. \brief No Operation
  113. \details No Operation does nothing. This instruction can be used for code alignment purposes.
  114. */
  115. #define __NOP() __ASM volatile ("nop")
  116. /**
  117. \brief Wait For Interrupt
  118. \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
  119. */
  120. #define __WFI() __ASM volatile ("wfi":::"memory")
  121. /**
  122. \brief Wait For Event
  123. \details Wait For Event is a hint instruction that permits the processor to enter
  124. a low-power state until one of a number of events occurs.
  125. */
  126. #define __WFE() __ASM volatile ("wfe":::"memory")
  127. /**
  128. \brief Send Event
  129. \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
  130. */
  131. #define __SEV() __ASM volatile ("sev")
  132. /**
  133. \brief Instruction Synchronization Barrier
  134. \details Instruction Synchronization Barrier flushes the pipeline in the processor,
  135. so that all instructions following the ISB are fetched from cache or memory,
  136. after the instruction has been completed.
  137. */
  138. __STATIC_FORCEINLINE void __ISB(void)
  139. {
  140. __ASM volatile ("isb 0xF":::"memory");
  141. }
  142. /**
  143. \brief Data Synchronization Barrier
  144. \details Acts as a special kind of Data Memory Barrier.
  145. It completes when all explicit memory accesses before this instruction complete.
  146. */
  147. __STATIC_FORCEINLINE void __DSB(void)
  148. {
  149. __ASM volatile ("dsb 0xF":::"memory");
  150. }
  151. /**
  152. \brief Data Memory Barrier
  153. \details Ensures the apparent order of the explicit memory operations before
  154. and after the instruction, without ensuring their completion.
  155. */
  156. __STATIC_FORCEINLINE void __DMB(void)
  157. {
  158. __ASM volatile ("dmb 0xF":::"memory");
  159. }
  160. /**
  161. \brief Reverse byte order (32 bit)
  162. \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
  163. \param [in] value Value to reverse
  164. \return Reversed value
  165. */
  166. __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
  167. {
  168. #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
  169. return __builtin_bswap32(value);
  170. #else
  171. uint32_t result;
  172. __ASM ("rev %0, %1" : "=r" (result) : "r" (value) );
  173. return result;
  174. #endif
  175. }
  176. /**
  177. \brief Reverse byte order (16 bit)
  178. \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
  179. \param [in] value Value to reverse
  180. \return Reversed value
  181. */
  182. __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
  183. {
  184. uint32_t result;
  185. __ASM ("rev16 %0, %1" : "=r" (result) : "r" (value));
  186. return result;
  187. }
  188. /**
  189. \brief Reverse byte order (16 bit)
  190. \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
  191. \param [in] value Value to reverse
  192. \return Reversed value
  193. */
  194. __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
  195. {
  196. #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
  197. return (int16_t)__builtin_bswap16(value);
  198. #else
  199. int16_t result;
  200. __ASM ("revsh %0, %1" : "=r" (result) : "r" (value) );
  201. return result;
  202. #endif
  203. }
  204. /**
  205. \brief Rotate Right in unsigned value (32 bit)
  206. \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
  207. \param [in] op1 Value to rotate
  208. \param [in] op2 Number of Bits to rotate
  209. \return Rotated value
  210. */
  211. __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
  212. {
  213. op2 %= 32U;
  214. if (op2 == 0U)
  215. {
  216. return op1;
  217. }
  218. return (op1 >> op2) | (op1 << (32U - op2));
  219. }
  220. /**
  221. \brief Breakpoint
  222. \details Causes the processor to enter Debug state.
  223. Debug tools can use this to investigate system state when the instruction at a particular address is reached.
  224. \param [in] value is ignored by the processor.
  225. If required, a debugger can use it to store additional information about the breakpoint.
  226. */
  227. #define __BKPT(value) __ASM volatile ("bkpt "#value)
  228. /**
  229. \brief Reverse bit order of value
  230. \details Reverses the bit order of the given value.
  231. \param [in] value Value to reverse
  232. \return Reversed value
  233. */
  234. __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
  235. {
  236. uint32_t result;
  237. __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
  238. return result;
  239. }
  240. /**
  241. \brief Count leading zeros
  242. \details Counts the number of leading zeros of a data value.
  243. \param [in] value Value to count the leading zeros
  244. \return number of leading zeros in value
  245. */
  246. __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
  247. {
  248. /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
  249. __builtin_clz(0) is undefined behaviour, so handle this case specially.
  250. This guarantees ARM-compatible results if happening to compile on a non-ARM
  251. target, and ensures the compiler doesn't decide to activate any
  252. optimisations using the logic "value was passed to __builtin_clz, so it
  253. is non-zero".
  254. ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
  255. single CLZ instruction.
  256. */
  257. if (value == 0U)
  258. {
  259. return 32U;
  260. }
  261. return __builtin_clz(value);
  262. }
  263. /**
  264. \brief LDR Exclusive (8 bit)
  265. \details Executes a exclusive LDR instruction for 8 bit value.
  266. \param [in] ptr Pointer to data
  267. \return value of type uint8_t at (*ptr)
  268. */
  269. __STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
  270. {
  271. uint32_t result;
  272. #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
  273. __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
  274. #else
  275. /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
  276. accepted by assembler. So has to use following less efficient pattern.
  277. */
  278. __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
  279. #endif
  280. return ((uint8_t) result); /* Add explicit type cast here */
  281. }
  282. /**
  283. \brief LDR Exclusive (16 bit)
  284. \details Executes a exclusive LDR instruction for 16 bit values.
  285. \param [in] ptr Pointer to data
  286. \return value of type uint16_t at (*ptr)
  287. */
  288. __STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
  289. {
  290. uint32_t result;
  291. #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
  292. __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
  293. #else
  294. /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
  295. accepted by assembler. So has to use following less efficient pattern.
  296. */
  297. __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
  298. #endif
  299. return ((uint16_t) result); /* Add explicit type cast here */
  300. }
  301. /**
  302. \brief LDR Exclusive (32 bit)
  303. \details Executes a exclusive LDR instruction for 32 bit values.
  304. \param [in] ptr Pointer to data
  305. \return value of type uint32_t at (*ptr)
  306. */
  307. __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
  308. {
  309. uint32_t result;
  310. __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
  311. return(result);
  312. }
  313. /**
  314. \brief STR Exclusive (8 bit)
  315. \details Executes a exclusive STR instruction for 8 bit values.
  316. \param [in] value Value to store
  317. \param [in] ptr Pointer to location
  318. \return 0 Function succeeded
  319. \return 1 Function failed
  320. */
  321. __STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
  322. {
  323. uint32_t result;
  324. __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
  325. return(result);
  326. }
  327. /**
  328. \brief STR Exclusive (16 bit)
  329. \details Executes a exclusive STR instruction for 16 bit values.
  330. \param [in] value Value to store
  331. \param [in] ptr Pointer to location
  332. \return 0 Function succeeded
  333. \return 1 Function failed
  334. */
  335. __STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
  336. {
  337. uint32_t result;
  338. __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
  339. return(result);
  340. }
  341. /**
  342. \brief STR Exclusive (32 bit)
  343. \details Executes a exclusive STR instruction for 32 bit values.
  344. \param [in] value Value to store
  345. \param [in] ptr Pointer to location
  346. \return 0 Function succeeded
  347. \return 1 Function failed
  348. */
  349. __STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
  350. {
  351. uint32_t result;
  352. __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
  353. return(result);
  354. }
  355. /**
  356. \brief Remove the exclusive lock
  357. \details Removes the exclusive lock which is created by LDREX.
  358. */
  359. __STATIC_FORCEINLINE void __CLREX(void)
  360. {
  361. __ASM volatile ("clrex" ::: "memory");
  362. }
  363. /**
  364. \brief Signed Saturate
  365. \details Saturates a signed value.
  366. \param [in] ARG1 Value to be saturated
  367. \param [in] ARG2 Bit position to saturate to (1..32)
  368. \return Saturated value
  369. */
  370. #define __SSAT(ARG1, ARG2) \
  371. __extension__ \
  372. ({ \
  373. int32_t __RES, __ARG1 = (ARG1); \
  374. __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
  375. __RES; \
  376. })
  377. /**
  378. \brief Unsigned Saturate
  379. \details Saturates an unsigned value.
  380. \param [in] ARG1 Value to be saturated
  381. \param [in] ARG2 Bit position to saturate to (0..31)
  382. \return Saturated value
  383. */
  384. #define __USAT(ARG1, ARG2) \
  385. __extension__ \
  386. ({ \
  387. uint32_t __RES, __ARG1 = (ARG1); \
  388. __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
  389. __RES; \
  390. })
  391. /* ########################### Core Function Access ########################### */
  392. /** \ingroup CMSIS_Core_FunctionInterface
  393. \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
  394. @{
  395. */
  396. /**
  397. \brief Enable IRQ Interrupts
  398. \details Enables IRQ interrupts by clearing special-purpose register PRIMASK.
  399. Can only be executed in Privileged modes.
  400. */
  401. __STATIC_FORCEINLINE void __enable_irq(void)
  402. {
  403. __ASM volatile ("cpsie i" : : : "memory");
  404. }
  405. /**
  406. \brief Disable IRQ Interrupts
  407. \details Disables IRQ interrupts by setting special-purpose register PRIMASK.
  408. Can only be executed in Privileged modes.
  409. */
  410. __STATIC_FORCEINLINE void __disable_irq(void)
  411. {
  412. __ASM volatile ("cpsid i" : : : "memory");
  413. }
  414. /**
  415. \brief Enable FIQ
  416. \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK.
  417. Can only be executed in Privileged modes.
  418. */
  419. __STATIC_FORCEINLINE void __enable_fault_irq(void)
  420. {
  421. __ASM volatile ("cpsie f" : : : "memory");
  422. }
  423. /**
  424. \brief Disable FIQ
  425. \details Disables FIQ interrupts by setting special-purpose register FAULTMASK.
  426. Can only be executed in Privileged modes.
  427. */
  428. __STATIC_FORCEINLINE void __disable_fault_irq(void)
  429. {
  430. __ASM volatile ("cpsid f" : : : "memory");
  431. }
  432. /**
  433. \brief Get FPSCR
  434. \details Returns the current value of the Floating Point Status/Control register.
  435. \return Floating Point Status/Control register value
  436. */
  437. __STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
  438. {
  439. #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
  440. (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
  441. #if __has_builtin(__builtin_arm_get_fpscr)
  442. // Re-enable using built-in when GCC has been fixed
  443. // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
  444. /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
  445. return __builtin_arm_get_fpscr();
  446. #else
  447. uint32_t result;
  448. __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
  449. return(result);
  450. #endif
  451. #else
  452. return(0U);
  453. #endif
  454. }
  455. /**
  456. \brief Set FPSCR
  457. \details Assigns the given value to the Floating Point Status/Control register.
  458. \param [in] fpscr Floating Point Status/Control value to set
  459. */
  460. __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
  461. {
  462. #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
  463. (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
  464. #if __has_builtin(__builtin_arm_set_fpscr)
  465. // Re-enable using built-in when GCC has been fixed
  466. // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
  467. /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
  468. __builtin_arm_set_fpscr(fpscr);
  469. #else
  470. __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
  471. #endif
  472. #else
  473. (void)fpscr;
  474. #endif
  475. }
  476. /*@} end of CMSIS_Core_RegAccFunctions */
  477. /* ################### Compiler specific Intrinsics ########################### */
  478. /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
  479. Access to dedicated SIMD instructions
  480. @{
  481. */
  482. __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
  483. {
  484. uint32_t result;
  485. __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  486. return(result);
  487. }
  488. __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
  489. {
  490. uint32_t result;
  491. __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  492. return(result);
  493. }
  494. __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
  495. {
  496. uint32_t result;
  497. __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  498. return(result);
  499. }
  500. __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
  501. {
  502. uint32_t result;
  503. __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  504. return(result);
  505. }
  506. __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
  507. {
  508. uint32_t result;
  509. __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  510. return(result);
  511. }
  512. __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
  513. {
  514. uint32_t result;
  515. __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  516. return(result);
  517. }
  518. __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
  519. {
  520. uint32_t result;
  521. __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  522. return(result);
  523. }
  524. __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
  525. {
  526. uint32_t result;
  527. __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  528. return(result);
  529. }
  530. __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
  531. {
  532. uint32_t result;
  533. __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  534. return(result);
  535. }
  536. __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
  537. {
  538. uint32_t result;
  539. __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  540. return(result);
  541. }
  542. __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
  543. {
  544. uint32_t result;
  545. __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
  546. return(result);
  547. }
  548. __STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
  549. {
  550. uint32_t result;
  551. __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  552. return(result);
  553. }
  554. __STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
  555. {
  556. uint32_t result;
  557. __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  558. return(result);
  559. }
  560. __STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
  561. {
  562. union llreg_u{
  563. uint32_t w32[2];
  564. uint64_t w64;
  565. } llr;
  566. llr.w64 = acc;
  567. #ifndef __ARMEB__ /* Little endian */
  568. __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
  569. #else /* Big endian */
  570. __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
  571. #endif
  572. return(llr.w64);
  573. }
  574. __STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
  575. {
  576. union llreg_u{
  577. uint32_t w32[2];
  578. uint64_t w64;
  579. } llr;
  580. llr.w64 = acc;
  581. #ifndef __ARMEB__ /* Little endian */
  582. __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
  583. #else /* Big endian */
  584. __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
  585. #endif
  586. return(llr.w64);
  587. }
  588. __STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
  589. {
  590. uint32_t result;
  591. __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  592. return(result);
  593. }
  594. __STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
  595. {
  596. uint32_t result;
  597. __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  598. return(result);
  599. }
  600. __STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
  601. {
  602. uint32_t result;
  603. __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  604. return(result);
  605. }
  606. __STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
  607. {
  608. int32_t result;
  609. __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  610. return(result);
  611. }
  612. __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
  613. {
  614. int32_t result;
  615. __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  616. return(result);
  617. }
  618. __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
  619. {
  620. uint32_t result;
  621. __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  622. return(result);
  623. }
  624. #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
  625. ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
  626. #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
  627. ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
  628. __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
  629. {
  630. int32_t result;
  631. __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
  632. return(result);
  633. }
  634. __STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
  635. {
  636. uint32_t result;
  637. __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  638. return(result);
  639. }
  640. /*@} end of group CMSIS_SIMD_intrinsics */
  641. /** \defgroup CMSIS_Core_intrinsics CMSIS Core Intrinsics
  642. Access to dedicated SIMD instructions
  643. @{
  644. */
  645. /** \brief Get CPSR Register
  646. \return CPSR Register value
  647. */
  648. __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
  649. {
  650. uint32_t result;
  651. __ASM volatile("MRS %0, cpsr" : "=r" (result) );
  652. return(result);
  653. }
  654. /** \brief Set CPSR Register
  655. \param [in] cpsr CPSR value to set
  656. */
  657. __STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
  658. {
  659. __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
  660. }
  661. /** \brief Get Mode
  662. \return Processor Mode
  663. */
  664. __STATIC_FORCEINLINE uint32_t __get_mode(void)
  665. {
  666. return (__get_CPSR() & 0x1FU);
  667. }
  668. /** \brief Set Mode
  669. \param [in] mode Mode value to set
  670. */
  671. __STATIC_FORCEINLINE void __set_mode(uint32_t mode)
  672. {
  673. __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
  674. }
  675. /** \brief Get Stack Pointer
  676. \return Stack Pointer value
  677. */
  678. __STATIC_FORCEINLINE uint32_t __get_SP(void)
  679. {
  680. uint32_t result;
  681. __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory");
  682. return result;
  683. }
  684. /** \brief Set Stack Pointer
  685. \param [in] stack Stack Pointer value to set
  686. */
  687. __STATIC_FORCEINLINE void __set_SP(uint32_t stack)
  688. {
  689. __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory");
  690. }
  691. /** \brief Get USR/SYS Stack Pointer
  692. \return USR/SYS Stack Pointer value
  693. */
  694. __STATIC_FORCEINLINE uint32_t __get_SP_usr(void)
  695. {
  696. uint32_t cpsr = __get_CPSR();
  697. uint32_t result;
  698. __ASM volatile(
  699. "CPS #0x1F \n"
  700. "MOV %0, sp " : "=r"(result) : : "memory"
  701. );
  702. __set_CPSR(cpsr);
  703. __ISB();
  704. return result;
  705. }
  706. /** \brief Set USR/SYS Stack Pointer
  707. \param [in] topOfProcStack USR/SYS Stack Pointer value to set
  708. */
  709. __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
  710. {
  711. uint32_t cpsr = __get_CPSR();
  712. __ASM volatile(
  713. "CPS #0x1F \n"
  714. "MOV sp, %0 " : : "r" (topOfProcStack) : "memory"
  715. );
  716. __set_CPSR(cpsr);
  717. __ISB();
  718. }
  719. /** \brief Get FPEXC
  720. \return Floating Point Exception Control register value
  721. */
  722. __STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
  723. {
  724. #if (__FPU_PRESENT == 1)
  725. uint32_t result;
  726. __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory");
  727. return(result);
  728. #else
  729. return(0);
  730. #endif
  731. }
  732. /** \brief Set FPEXC
  733. \param [in] fpexc Floating Point Exception Control value to set
  734. */
  735. __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
  736. {
  737. #if (__FPU_PRESENT == 1)
  738. __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
  739. #endif
  740. }
  741. /*
  742. * Include common core functions to access Coprocessor 15 registers
  743. */
  744. #define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
  745. #define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
  746. #define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
  747. #define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
  748. #include "cmsis_cp15.h"
  749. /** \brief Enable Floating Point Unit
  750. Critical section, called from undef handler, so systick is disabled
  751. */
  752. __STATIC_INLINE void __FPU_Enable(void)
  753. {
  754. __ASM volatile(
  755. //Permit access to VFP/NEON, registers by modifying CPACR
  756. " MRC p15,0,R1,c1,c0,2 \n"
  757. " ORR R1,R1,#0x00F00000 \n"
  758. " MCR p15,0,R1,c1,c0,2 \n"
  759. //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
  760. " ISB \n"
  761. //Enable VFP/NEON
  762. " VMRS R1,FPEXC \n"
  763. " ORR R1,R1,#0x40000000 \n"
  764. " VMSR FPEXC,R1 \n"
  765. //Initialise VFP/NEON registers to 0
  766. " MOV R2,#0 \n"
  767. //Initialise D16 registers to 0
  768. " VMOV D0, R2,R2 \n"
  769. " VMOV D1, R2,R2 \n"
  770. " VMOV D2, R2,R2 \n"
  771. " VMOV D3, R2,R2 \n"
  772. " VMOV D4, R2,R2 \n"
  773. " VMOV D5, R2,R2 \n"
  774. " VMOV D6, R2,R2 \n"
  775. " VMOV D7, R2,R2 \n"
  776. " VMOV D8, R2,R2 \n"
  777. " VMOV D9, R2,R2 \n"
  778. " VMOV D10,R2,R2 \n"
  779. " VMOV D11,R2,R2 \n"
  780. " VMOV D12,R2,R2 \n"
  781. " VMOV D13,R2,R2 \n"
  782. " VMOV D14,R2,R2 \n"
  783. " VMOV D15,R2,R2 \n"
  784. #if (defined(__ARM_NEON) && (__ARM_NEON == 1))
  785. //Initialise D32 registers to 0
  786. " VMOV D16,R2,R2 \n"
  787. " VMOV D17,R2,R2 \n"
  788. " VMOV D18,R2,R2 \n"
  789. " VMOV D19,R2,R2 \n"
  790. " VMOV D20,R2,R2 \n"
  791. " VMOV D21,R2,R2 \n"
  792. " VMOV D22,R2,R2 \n"
  793. " VMOV D23,R2,R2 \n"
  794. " VMOV D24,R2,R2 \n"
  795. " VMOV D25,R2,R2 \n"
  796. " VMOV D26,R2,R2 \n"
  797. " VMOV D27,R2,R2 \n"
  798. " VMOV D28,R2,R2 \n"
  799. " VMOV D29,R2,R2 \n"
  800. " VMOV D30,R2,R2 \n"
  801. " VMOV D31,R2,R2 \n"
  802. #endif
  803. //Initialise FPSCR to a known state
  804. " VMRS R1,FPSCR \n"
  805. " LDR R2,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
  806. " AND R1,R1,R2 \n"
  807. " VMSR FPSCR,R1 "
  808. : : : "cc", "r1", "r2"
  809. );
  810. }
  811. /*@} end of group CMSIS_Core_intrinsics */
  812. #pragma GCC diagnostic pop
  813. #endif /* __CMSIS_GCC_H */