tlb.h 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-28 WangXiaoyao the first version
  9. */
  10. #ifndef __TLB_H__
  11. #define __TLB_H__
  12. #include <rtthread.h>
  13. #include <stddef.h>
  14. #include <stdint.h>
  15. #include "mm_aspace.h"
  16. #include "mmu.h"
  17. #define TLBI_ARG(addr, asid) \
  18. ({ \
  19. rt_ubase_t arg = (rt_ubase_t)(addr) >> ARCH_PAGE_SHIFT; \
  20. arg &= (1ull << 44) - 1; \
  21. arg |= (rt_ubase_t)(asid) << MMU_ASID_SHIFT; \
  22. (void *)arg; \
  23. })
  24. static inline void rt_hw_tlb_invalidate_all(void)
  25. {
  26. __asm__ volatile(
  27. // ensure updates to pte completed
  28. "dsb ishst\n"
  29. "tlbi vmalle1is\n"
  30. "dsb ish\n"
  31. // after tlb in new context, refresh inst
  32. "isb\n" ::
  33. : "memory");
  34. }
  35. static inline void rt_hw_tlb_invalidate_all_local(void)
  36. {
  37. __asm__ volatile(
  38. // ensure updates to pte completed
  39. "dsb nshst\n"
  40. "tlbi vmalle1is\n"
  41. "dsb nsh\n"
  42. // after tlb in new context, refresh inst
  43. "isb\n" ::
  44. : "memory");
  45. }
  46. static inline void rt_hw_tlb_invalidate_aspace(rt_aspace_t aspace)
  47. {
  48. #ifdef ARCH_USING_ASID
  49. __asm__ volatile(
  50. // ensure updates to pte completed
  51. "dsb nshst\n"
  52. "tlbi aside1is, %0\n"
  53. "dsb nsh\n"
  54. // after tlb in new context, refresh inst
  55. "isb\n" ::"r"(TLBI_ARG(0ul, aspace->asid))
  56. : "memory");
  57. #else
  58. rt_hw_tlb_invalidate_all();
  59. #endif
  60. }
  61. static inline void rt_hw_tlb_invalidate_page(rt_aspace_t aspace, void *start)
  62. {
  63. start = TLBI_ARG(start, 0);
  64. __asm__ volatile(
  65. "dsb ishst\n"
  66. "tlbi vaae1is, %0\n"
  67. "dsb ish\n"
  68. "isb\n" ::"r"(start)
  69. : "memory");
  70. }
  71. static inline void rt_hw_tlb_invalidate_range(rt_aspace_t aspace, void *start,
  72. size_t size, size_t stride)
  73. {
  74. if (size <= ARCH_PAGE_SIZE)
  75. {
  76. rt_hw_tlb_invalidate_page(aspace, start);
  77. }
  78. else
  79. {
  80. rt_hw_tlb_invalidate_aspace(aspace);
  81. }
  82. }
  83. #endif /* __TLB_H__ */