interrupt.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018/10/01 Bernard The first version
  9. * 2018/12/27 Jesven Change irq enable/disable to cpu0
  10. */
  11. #include <plic.h>
  12. #include <mmu.h>
  13. #include "tick.h"
  14. #include "encoding.h"
  15. #include "riscv.h"
  16. #include "interrupt.h"
  17. struct rt_irq_desc irq_desc[MAX_HANDLERS];
  18. #ifdef RT_USING_SMP
  19. #include "sbi.h"
  20. struct rt_irq_desc ipi_desc[RT_MAX_IPI];
  21. uint8_t ipi_vectors[RT_CPUS_NR] = { 0 };
  22. #endif /* RT_USING_SMP */
  23. static rt_isr_handler_t rt_hw_interrupt_handle(rt_uint32_t vector, void *param)
  24. {
  25. rt_kprintf("UN-handled interrupt %d occurred!!!\n", vector);
  26. return RT_NULL;
  27. }
  28. int rt_hw_plic_irq_enable(int irq_number)
  29. {
  30. plic_irq_enable(irq_number);
  31. return 0;
  32. }
  33. int rt_hw_plic_irq_disable(int irq_number)
  34. {
  35. plic_irq_disable(irq_number);
  36. return 0;
  37. }
  38. /**
  39. * This function will un-mask a interrupt.
  40. * @param vector the interrupt number
  41. */
  42. void rt_hw_interrupt_umask(int vector)
  43. {
  44. plic_set_priority(vector, 1);
  45. rt_hw_plic_irq_enable(vector);
  46. }
  47. /**
  48. * This function will install a interrupt service routine to a interrupt.
  49. * @param vector the interrupt number
  50. * @param new_handler the interrupt service routine to be installed
  51. * @param old_handler the old interrupt service routine
  52. */
  53. rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
  54. void *param, const char *name)
  55. {
  56. rt_isr_handler_t old_handler = RT_NULL;
  57. if (vector < MAX_HANDLERS)
  58. {
  59. old_handler = irq_desc[vector].handler;
  60. if (handler != RT_NULL)
  61. {
  62. irq_desc[vector].handler = (rt_isr_handler_t)handler;
  63. irq_desc[vector].param = param;
  64. #ifdef RT_USING_INTERRUPT_INFO
  65. rt_snprintf(irq_desc[vector].name, RT_NAME_MAX - 1, "%s", name);
  66. irq_desc[vector].counter = 0;
  67. #endif
  68. }
  69. }
  70. return old_handler;
  71. }
  72. void rt_hw_interrupt_init()
  73. {
  74. /* Enable machine external interrupts. */
  75. // set_csr(sie, SIP_SEIP);
  76. int idx = 0;
  77. /* init exceptions table */
  78. for (idx = 0; idx < MAX_HANDLERS; idx++)
  79. {
  80. irq_desc[idx].handler = (rt_isr_handler_t)rt_hw_interrupt_handle;
  81. irq_desc[idx].param = RT_NULL;
  82. #ifdef RT_USING_INTERRUPT_INFO
  83. rt_snprintf(irq_desc[idx].name, RT_NAME_MAX - 1, "default");
  84. irq_desc[idx].counter = 0;
  85. #endif
  86. }
  87. plic_set_threshold(0);
  88. }
  89. #ifdef RT_USING_SMP
  90. void rt_hw_interrupt_set_priority(int vector, unsigned int priority)
  91. {
  92. plic_set_priority(vector, priority);
  93. }
  94. unsigned int rt_hw_interrupt_get_priority(int vector)
  95. {
  96. return (*(uint32_t *)PLIC_PRIORITY(vector));
  97. }
  98. rt_bool_t rt_hw_interrupt_is_disabled(void)
  99. {
  100. /* Determine the interrupt enable state */
  101. rt_ubase_t sstatus;
  102. __asm__ volatile("csrr %0, sstatus" : "=r"(sstatus));
  103. return (sstatus & SSTATUS_SIE) == 0;
  104. }
  105. void rt_hw_spin_lock_init(rt_hw_spinlock_t *_lock)
  106. {
  107. _lock->slock = 0;
  108. }
  109. void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
  110. {
  111. /* Use ticket lock implemented on top of the 32/64-bit atomic AMO ops.
  112. * The combined word layout (slock) maps two uint16_t fields:
  113. * low 16 bits: owner
  114. * high 16 bits: next (ticket allocator)
  115. * We atomically increment the "next" field by (1 << 16) and use the
  116. * returned old value to compute our ticket. Then wait until owner == ticket.
  117. */
  118. rt_atomic_t prev;
  119. rt_atomic_t ticket;
  120. rt_atomic_t owner;
  121. /* Allocate a ticket by adding (1 << 16) to slock, prev holds previous value */
  122. prev = rt_hw_atomic_add((volatile rt_atomic_t *)&lock->slock, (rt_atomic_t)(1UL << 16));
  123. ticket = (prev >> 16) & 0xffffUL;
  124. /* Wait until owner equals our ticket */
  125. for (;;)
  126. {
  127. owner = rt_hw_atomic_load((volatile rt_atomic_t *)&lock->slock) & 0xffffUL;
  128. if (owner == ticket)
  129. break;
  130. /* TODO: low-power wait for interrupt while spinning */
  131. }
  132. /* Ensure all following memory accesses are ordered after acquiring the lock */
  133. __asm__ volatile("fence rw, rw" ::: "memory");
  134. }
  135. void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
  136. {
  137. /* Ensure memory operations before unlock are visible before owner increment */
  138. __asm__ volatile("fence rw, rw" ::: "memory");
  139. /* Increment owner (low 16 bits) to hand over lock to next ticket.
  140. * Use an atomic load of the combined slock word and compare the low
  141. * 16-bit owner field.If owner would overflow (0xffff), clear the owner field
  142. * atomically by ANDing with 0xffff0000; otherwise increment owner by 1.
  143. */
  144. if ((rt_hw_atomic_load((volatile rt_atomic_t *)&lock->slock) & (rt_atomic_t)0xffffUL) == (rt_atomic_t)0xffffUL)
  145. {
  146. /* Atomic clear owner (low 16 bits) when it overflows. Keep next ticket field. */
  147. rt_hw_atomic_and((volatile rt_atomic_t *)&lock->slock, (rt_atomic_t)0xffff0000UL);
  148. }
  149. else
  150. {
  151. rt_hw_atomic_add((volatile rt_atomic_t *)&lock->slock, (rt_atomic_t)1);
  152. }
  153. // TODO: IPI interrupt to wake up other harts waiting for the lock
  154. /* Make the increment visible to other harts */
  155. __asm__ volatile("fence rw, rw" ::: "memory");
  156. }
  157. void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask)
  158. {
  159. int cpuid = __builtin_ctz(cpu_mask); // get the bit position of the lowest set bit
  160. ipi_vectors[cpuid] |= (uint8_t)ipi_vector;
  161. sbi_send_ipi((const unsigned long *)&cpu_mask);
  162. }
  163. void rt_hw_ipi_init(void)
  164. {
  165. int idx = 0, cpuid = rt_cpu_get_id();
  166. ipi_vectors[cpuid] = 0;
  167. /* init exceptions table */
  168. for (idx = 0; idx < RT_MAX_IPI; idx++)
  169. {
  170. ipi_desc[idx].handler = RT_NULL;
  171. ipi_desc[idx].param = RT_NULL;
  172. #ifdef RT_USING_INTERRUPT_INFO
  173. rt_snprintf(ipi_desc[idx].name, RT_NAME_MAX - 1, "default");
  174. ipi_desc[idx].counter = 0;
  175. #endif
  176. }
  177. set_csr(sie, SIP_SSIP);
  178. }
  179. void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler)
  180. {
  181. if (ipi_vector < RT_MAX_IPI)
  182. {
  183. if (ipi_isr_handler != RT_NULL)
  184. {
  185. ipi_desc[ipi_vector].handler = (rt_isr_handler_t)ipi_isr_handler;
  186. ipi_desc[ipi_vector].param = RT_NULL;
  187. }
  188. }
  189. }
  190. void rt_hw_ipi_handler(void)
  191. {
  192. rt_uint32_t ipi_vector;
  193. ipi_vector = ipi_vectors[rt_cpu_get_id()];
  194. while (ipi_vector)
  195. {
  196. int bitpos = __builtin_ctz(ipi_vector);
  197. ipi_vector &= ~(1 << bitpos);
  198. if (bitpos < RT_MAX_IPI && ipi_desc[bitpos].handler != RT_NULL)
  199. {
  200. rt_hw_atomic_and((volatile rt_atomic_t *)&ipi_vectors[rt_cpu_get_id()], ~((rt_atomic_t)(1 << bitpos)));
  201. /* call the irq service routine */
  202. ipi_desc[bitpos].handler(bitpos, ipi_desc[bitpos].param);
  203. }
  204. }
  205. // TODO: Clear the software interrupt pending bit in CLINT
  206. clear_csr(sip, SIP_SSIP);
  207. }
  208. #endif /* RT_USING_SMP */