core-macros.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. /*
  2. * xtensa/core-macros.h -- C specific definitions
  3. * that depend on CORE configuration
  4. */
  5. /*
  6. * Copyright (c) 2012 Tensilica Inc.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining
  9. * a copy of this software and associated documentation files (the
  10. * "Software"), to deal in the Software without restriction, including
  11. * without limitation the rights to use, copy, modify, merge, publish,
  12. * distribute, sublicense, and/or sell copies of the Software, and to
  13. * permit persons to whom the Software is furnished to do so, subject to
  14. * the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included
  17. * in all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  20. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  22. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  23. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. */
  27. #ifndef XTENSA_CACHE_H
  28. #define XTENSA_CACHE_H
  29. #include <xtensa/config/core.h>
  30. /* Only define things for C code. */
  31. #if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__)
  32. /*************************** CACHE ***************************/
  33. /* All the macros are in the lower case now and some of them
  34. * share the name with the existing functions from hal.h.
  35. * Including this header file will define XTHAL_USE_CACHE_MACROS
  36. * which directs hal.h not to use the functions.
  37. */
  38. /*
  39. * Single-cache-line operations in C-callable inline assembly.
  40. * Essentially macro versions (uppercase) of:
  41. *
  42. * xthal_icache_line_invalidate(void *addr);
  43. * xthal_icache_line_lock(void *addr);
  44. * xthal_icache_line_unlock(void *addr);
  45. * xthal_icache_sync(void);
  46. *
  47. * NOTE: unlike the above functions, the following macros do NOT
  48. * execute the xthal_icache_sync() as part of each line operation.
  49. * This sync must be called explicitly by the caller. This is to
  50. * allow better optimization when operating on more than one line.
  51. *
  52. * xthal_dcache_line_invalidate(void *addr);
  53. * xthal_dcache_line_writeback(void *addr);
  54. * xthal_dcache_line_writeback_inv(void *addr);
  55. * xthal_dcache_line_lock(void *addr);
  56. * xthal_dcache_line_unlock(void *addr);
  57. * xthal_dcache_sync(void);
  58. * xthal_dcache_line_prefetch_for_write(void *addr);
  59. * xthal_dcache_line_prefetch_for_read(void *addr);
  60. *
  61. * All are made memory-barriers, given that's how they're typically used
  62. * (ops operate on a whole line, so clobbers all memory not just *addr).
  63. *
  64. * NOTE: All the block block cache ops and line prefetches are implemented
  65. * using intrinsics so they are better optimized regarding memory barriers etc.
  66. *
  67. * All block downgrade functions exist in two forms: with and without
  68. * the 'max' parameter: This parameter allows compiler to optimize
  69. * the functions whenever the parameter is smaller than the cache size.
  70. *
  71. * xthal_dcache_block_invalidate(void *addr, unsigned size);
  72. * xthal_dcache_block_writeback(void *addr, unsigned size);
  73. * xthal_dcache_block_writeback_inv(void *addr, unsigned size);
  74. * xthal_dcache_block_invalidate_max(void *addr, unsigned size, unsigned max);
  75. * xthal_dcache_block_writeback_max(void *addr, unsigned size, unsigned max);
  76. * xthal_dcache_block_writeback_inv_max(void *addr, unsigned size, unsigned max);
  77. *
  78. * xthal_dcache_block_prefetch_for_read(void *addr, unsigned size);
  79. * xthal_dcache_block_prefetch_for_write(void *addr, unsigned size);
  80. * xthal_dcache_block_prefetch_modify(void *addr, unsigned size);
  81. * xthal_dcache_block_prefetch_read_write(void *addr, unsigned size);
  82. * xthal_dcache_block_prefetch_for_read_grp(void *addr, unsigned size);
  83. * xthal_dcache_block_prefetch_for_write_grp(void *addr, unsigned size);
  84. * xthal_dcache_block_prefetch_modify_grp(void *addr, unsigned size);
  85. * xthal_dcache_block_prefetch_read_write_grp(void *addr, unsigned size)
  86. *
  87. * xthal_dcache_block_wait();
  88. * xthal_dcache_block_required_wait();
  89. * xthal_dcache_block_abort();
  90. * xthal_dcache_block_prefetch_end();
  91. * xthal_dcache_block_newgrp();
  92. */
  93. /*** INSTRUCTION CACHE ***/
  94. #define XTHAL_USE_CACHE_MACROS
  95. #if XCHAL_ICACHE_SIZE > 0
  96. # define xthal_icache_line_invalidate(addr) do { void *__a = (void*)(addr); \
  97. __asm__ __volatile__("ihi %0, 0" :: "a"(__a) : "memory"); \
  98. } while(0)
  99. #else
  100. # define xthal_icache_line_invalidate(addr) do {/*nothing*/} while(0)
  101. #endif
  102. #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
  103. # define xthal_icache_line_lock(addr) do { void *__a = (void*)(addr); \
  104. __asm__ __volatile__("ipfl %0, 0" :: "a"(__a) : "memory"); \
  105. } while(0)
  106. # define xthal_icache_line_unlock(addr) do { void *__a = (void*)(addr); \
  107. __asm__ __volatile__("ihu %0, 0" :: "a"(__a) : "memory"); \
  108. } while(0)
  109. #else
  110. # define xthal_icache_line_lock(addr) do {/*nothing*/} while(0)
  111. # define xthal_icache_line_unlock(addr) do {/*nothing*/} while(0)
  112. #endif
  113. /*
  114. * Even if a config doesn't have caches, an isync is still needed
  115. * when instructions in any memory are modified, whether by a loader
  116. * or self-modifying code. Therefore, this macro always produces
  117. * an isync, whether or not an icache is present.
  118. */
  119. #define xthal_icache_sync() \
  120. __asm__ __volatile__("isync":::"memory")
  121. /*** DATA CACHE ***/
  122. #if XCHAL_DCACHE_SIZE > 0
  123. # include <xtensa/tie/xt_datacache.h>
  124. # define xthal_dcache_line_invalidate(addr) do { void *__a = (void*)(addr); \
  125. __asm__ __volatile__("dhi %0, 0" :: "a"(__a) : "memory"); \
  126. } while(0)
  127. # define xthal_dcache_line_writeback(addr) do { void *__a = (void*)(addr); \
  128. __asm__ __volatile__("dhwb %0, 0" :: "a"(__a) : "memory"); \
  129. } while(0)
  130. # define xthal_dcache_line_writeback_inv(addr) do { void *__a = (void*)(addr); \
  131. __asm__ __volatile__("dhwbi %0, 0" :: "a"(__a) : "memory"); \
  132. } while(0)
  133. # define xthal_dcache_sync() \
  134. __asm__ __volatile__("" /*"dsync"?*/:::"memory")
  135. # define xthal_dcache_line_prefetch_for_read(addr) do { \
  136. XT_DPFR((const int*)addr, 0); \
  137. } while(0)
  138. #else
  139. # define xthal_dcache_line_invalidate(addr) do {/*nothing*/} while(0)
  140. # define xthal_dcache_line_writeback(addr) do {/*nothing*/} while(0)
  141. # define xthal_dcache_line_writeback_inv(addr) do {/*nothing*/} while(0)
  142. # define xthal_dcache_sync() __asm__ __volatile__("":::"memory")
  143. # define xthal_dcache_line_prefetch_for_read(addr) do {/*nothing*/} while(0)
  144. #endif
  145. #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
  146. # define xthal_dcache_line_lock(addr) do { void *__a = (void*)(addr); \
  147. __asm__ __volatile__("dpfl %0, 0" :: "a"(__a) : "memory"); \
  148. } while(0)
  149. # define xthal_dcache_line_unlock(addr) do { void *__a = (void*)(addr); \
  150. __asm__ __volatile__("dhu %0, 0" :: "a"(__a) : "memory"); \
  151. } while(0)
  152. #else
  153. # define xthal_dcache_line_lock(addr) do {/*nothing*/} while(0)
  154. # define xthal_dcache_line_unlock(addr) do {/*nothing*/} while(0)
  155. #endif
  156. #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
  157. # define xthal_dcache_line_prefetch_for_write(addr) do { \
  158. XT_DPFW((const int*)addr, 0); \
  159. } while(0)
  160. #else
  161. # define xthal_dcache_line_prefetch_for_write(addr) do {/*nothing*/} while(0)
  162. #endif
  163. /***** Block Operations *****/
  164. #if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS
  165. /* upgrades */
  166. # define _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, type) \
  167. { \
  168. type((const int*)addr, size); \
  169. }
  170. /*downgrades */
  171. # define _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type) \
  172. unsigned _s = size; \
  173. unsigned _a = addr; \
  174. do { \
  175. unsigned __s = (_s > XCHAL_DCACHE_SIZE) ? \
  176. XCHAL_DCACHE_SIZE : _s; \
  177. type((const int*)_a, __s); \
  178. _s -= __s; \
  179. _a += __s; \
  180. } while(_s > 0);
  181. # define _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, type, max) \
  182. if (max <= XCHAL_DCACHE_SIZE) { \
  183. unsigned _s = size; \
  184. unsigned _a = addr; \
  185. type((const int*)_a, _s); \
  186. } \
  187. else { \
  188. _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type); \
  189. }
  190. # define xthal_dcache_block_invalidate(addr, size) do { \
  191. _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHI_B); \
  192. } while(0)
  193. # define xthal_dcache_block_writeback(addr, size) do { \
  194. _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHWB_B); \
  195. } while(0)
  196. # define xthal_dcache_block_writeback_inv(addr, size) do { \
  197. _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHWBI_B); \
  198. } while(0)
  199. # define xthal_dcache_block_invalidate_max(addr, size, max) do { \
  200. _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHI_B, max); \
  201. } while(0)
  202. # define xthal_dcache_block_writeback_max(addr, size, max) do { \
  203. _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHWB_B, max); \
  204. } while(0)
  205. # define xthal_dcache_block_writeback_inv_max(addr, size, max) do { \
  206. _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHWBI_B, max); \
  207. } while(0)
  208. /* upgrades that are performed even with write-thru caches */
  209. # define xthal_dcache_block_prefetch_read_write(addr, size) do { \
  210. _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \
  211. } while(0)
  212. # define xthal_dcache_block_prefetch_read_write_grp(addr, size) do { \
  213. _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \
  214. } while(0)
  215. # define xthal_dcache_block_prefetch_for_read(addr, size) do { \
  216. _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFR_B); \
  217. } while(0)
  218. # define xthal_dcache_block_prefetch_for_read_grp(addr, size) do { \
  219. _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFR_BF); \
  220. } while(0)
  221. /* abort all or end optional block cache operations */
  222. # define xthal_dcache_block_abort() do { \
  223. XT_PFEND_A(); \
  224. } while(0)
  225. # define xthal_dcache_block_end() do { \
  226. XT_PFEND_O(); \
  227. } while(0)
  228. /* wait for all/required block cache operations to finish */
  229. # define xthal_dcache_block_wait() do { \
  230. XT_PFWAIT_A(); \
  231. } while(0)
  232. # define xthal_dcache_block_required_wait() do { \
  233. XT_PFWAIT_R(); \
  234. } while(0)
  235. /* Start a new group */
  236. # define xthal_dcache_block_newgrp() do { \
  237. XT_PFNXT_F(); \
  238. } while(0)
  239. #else
  240. # define xthal_dcache_block_invalidate(addr, size) do {/*nothing*/} while(0)
  241. # define xthal_dcache_block_writeback(addr, size) do {/*nothing*/} while(0)
  242. # define xthal_dcache_block_writeback_inv(addr, size) do {/*nothing*/} while(0)
  243. # define xthal_dcache_block_invalidate_max(addr, size, max) do {/*nothing*/} while(0)
  244. # define xthal_dcache_block_writeback_max(addr, size, max) do {/*nothing*/} while(0)
  245. # define xthal_dcache_block_writeback_inv_max(addr, size, max) do {/*nothing*/} while(0)
  246. # define xthal_dcache_block_prefetch_read_write(addr, size) do {/*nothing*/} while(0)
  247. # define xthal_dcache_block_prefetch_read_write_grp(addr, size) do {/*nothing*/} while(0)
  248. # define xthal_dcache_block_prefetch_for_read(addr, size) do {/*nothing*/} while(0)
  249. # define xthal_dcache_block_prefetch_for_read_grp(addr, size) do {/*nothing*/} while(0)
  250. # define xthal_dcache_block_end() do {/*nothing*/} while(0)
  251. # define xthal_dcache_block_abort() do {/*nothing*/} while(0)
  252. # define xthal_dcache_block_wait() do {/*nothing*/} while(0)
  253. # define xthal_dcache_block_required_wait() do {/*nothing*/} while(0)
  254. # define xthal_dcache_block_newgrp() do {/*nothing*/} while(0)
  255. #endif
  256. #if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS && XCHAL_DCACHE_IS_WRITEBACK
  257. # define xthal_dcache_block_prefetch_for_write(addr, size) do { \
  258. _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \
  259. } while(0)
  260. # define xthal_dcache_block_prefetch_modify(addr, size) do { \
  261. _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_B); \
  262. } while(0)
  263. # define xthal_dcache_block_prefetch_for_write_grp(addr, size) do { \
  264. _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \
  265. } while(0)
  266. # define xthal_dcache_block_prefetch_modify_grp(addr, size) do { \
  267. _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_BF); \
  268. } while(0)
  269. #else
  270. # define xthal_dcache_block_prefetch_for_write(addr, size) do {/*nothing*/} while(0)
  271. # define xthal_dcache_block_prefetch_modify(addr, size) do {/*nothing*/} while(0)
  272. # define xthal_dcache_block_prefetch_for_write_grp(addr, size) do {/*nothing*/} while(0)
  273. # define xthal_dcache_block_prefetch_modify_grp(addr, size) do {/*nothing*/} while(0)
  274. #endif
  275. /*************************** INTERRUPTS ***************************/
  276. /*
  277. * Macro versions of:
  278. * unsigned xthal_get_intenable( void );
  279. * void xthal_set_intenable( unsigned );
  280. * unsigned xthal_get_interrupt( void );
  281. * void xthal_set_intset( unsigned );
  282. * void xthal_set_intclear( unsigned );
  283. * unsigned xthal_get_ccount(void);
  284. * void xthal_set_ccompare(int, unsigned);
  285. * unsigned xthal_get_ccompare(int);
  286. *
  287. * NOTE: for {set,get}_ccompare, the first argument MUST be a decimal constant.
  288. */
  289. #if XCHAL_HAVE_INTERRUPTS
  290. # define XTHAL_GET_INTENABLE() ({ int __intenable; \
  291. __asm__("rsr.intenable %0" : "=a"(__intenable)); \
  292. __intenable; })
  293. # define XTHAL_SET_INTENABLE(v) do { int __intenable = (int)(v); \
  294. __asm__ __volatile__("wsr.intenable %0" :: "a"(__intenable):"memory"); \
  295. } while(0)
  296. # define XTHAL_GET_INTERRUPT() ({ int __interrupt; \
  297. __asm__("rsr.interrupt %0" : "=a"(__interrupt)); \
  298. __interrupt; })
  299. # define XTHAL_SET_INTSET(v) do { int __interrupt = (int)(v); \
  300. __asm__ __volatile__("wsr.intset %0" :: "a"(__interrupt):"memory"); \
  301. } while(0)
  302. # define XTHAL_SET_INTCLEAR(v) do { int __interrupt = (int)(v); \
  303. __asm__ __volatile__("wsr.intclear %0" :: "a"(__interrupt):"memory"); \
  304. } while(0)
  305. # define XTHAL_GET_CCOUNT() ({ int __ccount; \
  306. __asm__("rsr.ccount %0" : "=a"(__ccount)); \
  307. __ccount; })
  308. # define XTHAL_SET_CCOUNT(v) do { int __ccount = (int)(v); \
  309. __asm__ __volatile__("wsr.ccount %0" :: "a"(__ccount):"memory"); \
  310. } while(0)
  311. # define _XTHAL_GET_CCOMPARE(n) ({ int __ccompare; \
  312. __asm__("rsr.ccompare" #n " %0" : "=a"(__ccompare)); \
  313. __ccompare; })
  314. # define XTHAL_GET_CCOMPARE(n) _XTHAL_GET_CCOMPARE(n)
  315. # define _XTHAL_SET_CCOMPARE(n,v) do { int __ccompare = (int)(v); \
  316. __asm__ __volatile__("wsr.ccompare" #n " %0 ; esync" :: "a"(__ccompare):"memory"); \
  317. } while(0)
  318. # define XTHAL_SET_CCOMPARE(n,v) _XTHAL_SET_CCOMPARE(n,v)
  319. #else
  320. # define XTHAL_GET_INTENABLE() 0
  321. # define XTHAL_SET_INTENABLE(v) do {/*nothing*/} while(0)
  322. # define XTHAL_GET_INTERRUPT() 0
  323. # define XTHAL_SET_INTSET(v) do {/*nothing*/} while(0)
  324. # define XTHAL_SET_INTCLEAR(v) do {/*nothing*/} while(0)
  325. # define XTHAL_GET_CCOUNT() 0
  326. # define XTHAL_SET_CCOUNT(v) do {/*nothing*/} while(0)
  327. # define XTHAL_GET_CCOMPARE(n) 0
  328. # define XTHAL_SET_CCOMPARE(n,v) do {/*nothing*/} while(0)
  329. #endif
  330. /*************************** MISC ***************************/
  331. /*
  332. * Macro or inline versions of:
  333. * void xthal_clear_regcached_code( void );
  334. * unsigned xthal_get_prid( void );
  335. * unsigned xthal_compare_and_set( int *addr, int testval, int setval );
  336. */
  337. #if XCHAL_HAVE_LOOPS
  338. # define XTHAL_CLEAR_REGCACHED_CODE() \
  339. __asm__ __volatile__("wsr.lcount %0" :: "a"(0) : "memory")
  340. #else
  341. # define XTHAL_CLEAR_REGCACHED_CODE() do {/*nothing*/} while(0)
  342. #endif
  343. #if XCHAL_HAVE_PRID
  344. # define XTHAL_GET_PRID() ({ int __prid; \
  345. __asm__("rsr.prid %0" : "=a"(__prid)); \
  346. __prid; })
  347. #else
  348. # define XTHAL_GET_PRID() 0
  349. #endif
  350. static inline unsigned XTHAL_COMPARE_AND_SET( int *addr, int testval, int setval )
  351. {
  352. int result;
  353. #if XCHAL_HAVE_S32C1I && XCHAL_HW_MIN_VERSION_MAJOR >= 2200
  354. __asm__ __volatile__ (
  355. " wsr.scompare1 %2 \n"
  356. " s32c1i %0, %3, 0 \n"
  357. : "=a"(result) : "0" (setval), "a" (testval), "a" (addr)
  358. : "memory");
  359. #elif XCHAL_HAVE_INTERRUPTS
  360. int tmp;
  361. __asm__ __volatile__ (
  362. " rsil %4, 15 \n" // %4 == saved ps
  363. " l32i %0, %3, 0 \n" // %0 == value to test, return val
  364. " bne %2, %0, 9f \n" // test
  365. " s32i %1, %3, 0 \n" // write the new value
  366. "9: wsr.ps %4 ; rsync \n" // restore the PS
  367. : "=a"(result)
  368. : "0" (setval), "a" (testval), "a" (addr), "a" (tmp)
  369. : "memory");
  370. #else
  371. __asm__ __volatile__ (
  372. " l32i %0, %3, 0 \n" // %0 == value to test, return val
  373. " bne %2, %0, 9f \n" // test
  374. " s32i %1, %3, 0 \n" // write the new value
  375. "9: \n"
  376. : "=a"(result) : "0" (setval), "a" (testval), "a" (addr)
  377. : "memory");
  378. #endif
  379. return result;
  380. }
  381. #if XCHAL_HAVE_EXTERN_REGS
  382. static inline unsigned XTHAL_RER (unsigned int reg)
  383. {
  384. unsigned result;
  385. __asm__ __volatile__ (
  386. " rer %0, %1"
  387. : "=a" (result) : "a" (reg) : "memory");
  388. return result;
  389. }
  390. static inline void XTHAL_WER (unsigned reg, unsigned value)
  391. {
  392. __asm__ __volatile__ (
  393. " wer %0, %1"
  394. : : "a" (value), "a" (reg) : "memory");
  395. }
  396. #endif /* XCHAL_HAVE_EXTERN_REGS */
  397. #endif /* C code */
  398. #endif /*XTENSA_CACHE_H*/