pic.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-08-24 GuEe-GUI first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #define DBG_TAG "rtdm.pic"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <drivers/pic.h>
  16. #include <ktime.h>
  17. struct irq_traps
  18. {
  19. rt_list_t list;
  20. void *data;
  21. rt_bool_t (*handler)(void *);
  22. };
  23. static int _ipi_hash[] =
  24. {
  25. #ifdef RT_USING_SMP
  26. [RT_SCHEDULE_IPI] = RT_SCHEDULE_IPI,
  27. [RT_STOP_IPI] = RT_STOP_IPI,
  28. #endif
  29. };
  30. /* reserved ipi */
  31. static int _pirq_hash_idx = RT_ARRAY_SIZE(_ipi_hash);
  32. static struct rt_pic_irq _pirq_hash[MAX_HANDLERS] =
  33. {
  34. [0 ... MAX_HANDLERS - 1] =
  35. {
  36. .irq = -1,
  37. .hwirq = -1,
  38. .mode = RT_IRQ_MODE_NONE,
  39. .priority = RT_UINT32_MAX,
  40. .rw_lock = { },
  41. }
  42. };
  43. static struct rt_spinlock _pic_lock = { };
  44. static rt_size_t _pic_name_max = sizeof("PIC");
  45. static rt_list_t _pic_nodes = RT_LIST_OBJECT_INIT(_pic_nodes);
  46. static rt_list_t _traps_nodes = RT_LIST_OBJECT_INIT(_traps_nodes);
  47. static struct rt_pic_irq *irq2pirq(int irq)
  48. {
  49. struct rt_pic_irq *pirq = RT_NULL;
  50. if ((irq >= 0) && (irq < MAX_HANDLERS))
  51. {
  52. pirq = &_pirq_hash[irq];
  53. if (pirq->irq < 0)
  54. {
  55. pirq = RT_NULL;
  56. }
  57. }
  58. if (!pirq)
  59. {
  60. LOG_E("irq = %d is invalid", irq);
  61. }
  62. return pirq;
  63. }
  64. static void append_pic(struct rt_pic *pic)
  65. {
  66. int pic_name_len = rt_strlen(pic->ops->name);
  67. rt_list_insert_before(&_pic_nodes, &pic->list);
  68. if (pic_name_len > _pic_name_max)
  69. {
  70. _pic_name_max = pic_name_len;
  71. }
  72. }
  73. void rt_pic_default_name(struct rt_pic *pic)
  74. {
  75. if (pic)
  76. {
  77. #if RT_NAME_MAX > 0
  78. rt_strncpy(pic->parent.name, "PIC", RT_NAME_MAX - 1);
  79. pic->parent.name[RT_NAME_MAX - 1] = '\0';
  80. #else
  81. pic->parent.name = "PIC";
  82. #endif
  83. }
  84. }
  85. struct rt_pic *rt_pic_dynamic_cast(void *ptr)
  86. {
  87. struct rt_pic *pic = RT_NULL, *tmp = RT_NULL;
  88. if (ptr)
  89. {
  90. struct rt_object *obj = ptr;
  91. if (obj->type == RT_Object_Class_Unknown)
  92. {
  93. tmp = (void *)obj;
  94. }
  95. else if (obj->type == RT_Object_Class_Device)
  96. {
  97. tmp = (void *)obj + sizeof(struct rt_device);
  98. }
  99. else
  100. {
  101. tmp = (void *)obj + sizeof(struct rt_object);
  102. }
  103. if (tmp && !rt_strcmp(tmp->parent.name, "PIC"))
  104. {
  105. pic = tmp;
  106. }
  107. }
  108. return pic;
  109. }
  110. rt_err_t rt_pic_linear_irq(struct rt_pic *pic, rt_size_t irq_nr)
  111. {
  112. rt_err_t err = RT_EOK;
  113. if (pic && pic->ops && pic->ops->name)
  114. {
  115. rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
  116. if (_pirq_hash_idx + irq_nr <= RT_ARRAY_SIZE(_pirq_hash))
  117. {
  118. rt_list_init(&pic->list);
  119. rt_pic_default_name(pic);
  120. pic->parent.type = RT_Object_Class_Unknown;
  121. pic->irq_start = _pirq_hash_idx;
  122. pic->irq_nr = irq_nr;
  123. pic->pirqs = &_pirq_hash[_pirq_hash_idx];
  124. _pirq_hash_idx += irq_nr;
  125. append_pic(pic);
  126. LOG_D("%s alloc irqs ranges [%d, %d]", pic->ops->name,
  127. pic->irq_start, pic->irq_start + pic->irq_nr);
  128. }
  129. else
  130. {
  131. LOG_E("%s alloc %d irqs is overflow", pic->ops->name, irq_nr);
  132. err = -RT_EEMPTY;
  133. }
  134. rt_spin_unlock_irqrestore(&_pic_lock, level);
  135. }
  136. else
  137. {
  138. err = -RT_EINVAL;
  139. }
  140. return err;
  141. }
  142. rt_err_t rt_pic_cancel_irq(struct rt_pic *pic)
  143. {
  144. rt_err_t err = RT_EOK;
  145. if (pic && pic->pirqs)
  146. {
  147. rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
  148. /*
  149. * This is only to make system runtime safely,
  150. * we don't recommend PICs to unregister.
  151. */
  152. rt_list_remove(&pic->list);
  153. rt_spin_unlock_irqrestore(&_pic_lock, level);
  154. }
  155. else
  156. {
  157. err = -RT_EINVAL;
  158. }
  159. return err;
  160. }
  161. static void config_pirq(struct rt_pic *pic, struct rt_pic_irq *pirq, int irq, int hwirq)
  162. {
  163. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  164. if (pirq->irq < 0)
  165. {
  166. rt_list_init(&pirq->list);
  167. rt_list_init(&pirq->children_nodes);
  168. rt_list_init(&pirq->isr.list);
  169. }
  170. else if (pirq->pic != pic)
  171. {
  172. RT_ASSERT(rt_list_isempty(&pirq->list) == RT_TRUE);
  173. RT_ASSERT(rt_list_isempty(&pirq->children_nodes) == RT_TRUE);
  174. RT_ASSERT(rt_list_isempty(&pirq->isr.list) == RT_TRUE);
  175. }
  176. pirq->irq = irq;
  177. pirq->hwirq = hwirq;
  178. pirq->pic = pic;
  179. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  180. }
  181. int rt_pic_config_ipi(struct rt_pic *pic, int ipi_index, int hwirq)
  182. {
  183. int ipi = ipi_index;
  184. struct rt_pic_irq *pirq;
  185. if (pic && ipi < RT_ARRAY_SIZE(_ipi_hash) && hwirq >= 0 && pic->ops->irq_send_ipi)
  186. {
  187. pirq = &_pirq_hash[ipi];
  188. config_pirq(pic, pirq, ipi, hwirq);
  189. for (int cpuid = 0; cpuid < RT_CPUS_NR; ++cpuid)
  190. {
  191. RT_IRQ_AFFINITY_SET(pirq->affinity, cpuid);
  192. }
  193. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "ipi", ipi, hwirq);
  194. }
  195. else
  196. {
  197. ipi = -RT_EINVAL;
  198. }
  199. return ipi;
  200. }
  201. int rt_pic_config_irq(struct rt_pic *pic, int irq_index, int hwirq)
  202. {
  203. int irq;
  204. if (pic && hwirq >= 0)
  205. {
  206. irq = pic->irq_start + irq_index;
  207. if (irq >= 0 && irq < MAX_HANDLERS)
  208. {
  209. config_pirq(pic, &_pirq_hash[irq], irq, hwirq);
  210. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "irq", irq, hwirq);
  211. }
  212. else
  213. {
  214. irq = -RT_ERROR;
  215. }
  216. }
  217. else
  218. {
  219. irq = -RT_EINVAL;
  220. }
  221. return irq;
  222. }
  223. struct rt_pic_irq *rt_pic_find_ipi(struct rt_pic *pic, int ipi_index)
  224. {
  225. struct rt_pic_irq *pirq = &_pirq_hash[ipi_index];
  226. RT_ASSERT(ipi_index < RT_ARRAY_SIZE(_ipi_hash));
  227. RT_ASSERT(pirq->pic == pic);
  228. return pirq;
  229. }
  230. struct rt_pic_irq *rt_pic_find_pirq(struct rt_pic *pic, int irq)
  231. {
  232. if (pic && irq >= pic->irq_start && irq <= pic->irq_start + pic->irq_nr)
  233. {
  234. return &pic->pirqs[irq - pic->irq_start];
  235. }
  236. return RT_NULL;
  237. }
  238. rt_err_t rt_pic_cascade(struct rt_pic_irq *pirq, int parent_irq)
  239. {
  240. rt_err_t err = RT_EOK;
  241. if (pirq && !pirq->parent && parent_irq >= 0)
  242. {
  243. struct rt_pic_irq *parent;
  244. rt_spin_lock(&pirq->rw_lock);
  245. parent = irq2pirq(parent_irq);
  246. if (parent)
  247. {
  248. pirq->parent = parent;
  249. pirq->priority = parent->priority;
  250. rt_memcpy(&pirq->affinity, &parent->affinity, sizeof(pirq->affinity));
  251. }
  252. rt_spin_unlock(&pirq->rw_lock);
  253. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  254. {
  255. rt_spin_lock(&parent->rw_lock);
  256. rt_list_insert_before(&parent->children_nodes, &pirq->list);
  257. rt_spin_unlock(&parent->rw_lock);
  258. }
  259. }
  260. else
  261. {
  262. err = -RT_EINVAL;
  263. }
  264. return err;
  265. }
  266. rt_err_t rt_pic_uncascade(struct rt_pic_irq *pirq)
  267. {
  268. rt_err_t err = RT_EOK;
  269. if (pirq && pirq->parent)
  270. {
  271. struct rt_pic_irq *parent;
  272. rt_spin_lock(&pirq->rw_lock);
  273. parent = pirq->parent;
  274. pirq->parent = RT_NULL;
  275. rt_spin_unlock(&pirq->rw_lock);
  276. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  277. {
  278. rt_spin_lock(&parent->rw_lock);
  279. rt_list_remove(&pirq->list);
  280. rt_spin_unlock(&parent->rw_lock);
  281. }
  282. }
  283. else
  284. {
  285. err = -RT_EINVAL;
  286. }
  287. return err;
  288. }
  289. rt_err_t rt_pic_attach_irq(int irq, rt_isr_handler_t handler, void *uid, const char *name, int flags)
  290. {
  291. rt_err_t err = -RT_EINVAL;
  292. struct rt_pic_irq *pirq;
  293. if (handler && name && (pirq = irq2pirq(irq)))
  294. {
  295. struct rt_pic_isr *isr = RT_NULL;
  296. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  297. err = RT_EOK;
  298. if (!pirq->isr.action.handler)
  299. {
  300. /* first attach */
  301. isr = &pirq->isr;
  302. rt_list_init(&isr->list);
  303. }
  304. else
  305. {
  306. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  307. if ((isr = rt_malloc(sizeof(*isr))))
  308. {
  309. rt_list_init(&isr->list);
  310. level = rt_spin_lock_irqsave(&pirq->rw_lock);
  311. rt_list_insert_after(&pirq->isr.list, &isr->list);
  312. }
  313. else
  314. {
  315. LOG_E("No memory to save '%s' isr", name);
  316. err = -RT_ERROR;
  317. }
  318. }
  319. if (!err)
  320. {
  321. isr->flags = flags;
  322. isr->action.handler = handler;
  323. isr->action.param = uid;
  324. #ifdef RT_USING_INTERRUPT_INFO
  325. isr->action.counter = 0;
  326. rt_strncpy(isr->action.name, name, RT_NAME_MAX - 1);
  327. isr->action.name[RT_NAME_MAX - 1] = '\0';
  328. #ifdef RT_USING_SMP
  329. rt_memset(isr->action.cpu_counter, 0, sizeof(isr->action.cpu_counter));
  330. #endif
  331. #endif
  332. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  333. }
  334. }
  335. return err;
  336. }
  337. rt_err_t rt_pic_detach_irq(int irq, void *uid)
  338. {
  339. rt_err_t err = -RT_EINVAL;
  340. struct rt_pic_irq *pirq = irq2pirq(irq);
  341. if (pirq)
  342. {
  343. rt_bool_t will_free = RT_FALSE;
  344. struct rt_pic_isr *isr = RT_NULL;
  345. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  346. isr = &pirq->isr;
  347. if (isr->action.param == uid)
  348. {
  349. if (rt_list_isempty(&isr->list))
  350. {
  351. isr->action.handler = RT_NULL;
  352. isr->action.param = RT_NULL;
  353. }
  354. else
  355. {
  356. struct rt_pic_isr *next_isr = rt_list_first_entry(&isr->list, struct rt_pic_isr, list);
  357. rt_list_remove(&next_isr->list);
  358. isr->action.handler = next_isr->action.handler;
  359. isr->action.param = next_isr->action.param;
  360. #ifdef RT_USING_INTERRUPT_INFO
  361. isr->action.counter = next_isr->action.counter;
  362. rt_strncpy(isr->action.name, next_isr->action.name, RT_NAME_MAX);
  363. #ifdef RT_USING_SMP
  364. rt_memcpy(isr->action.cpu_counter, next_isr->action.cpu_counter, sizeof(next_isr->action.cpu_counter));
  365. #endif
  366. #endif
  367. isr = next_isr;
  368. will_free = RT_TRUE;
  369. }
  370. err = RT_EOK;
  371. }
  372. else
  373. {
  374. rt_list_for_each_entry(isr, &pirq->isr.list, list)
  375. {
  376. if (isr->action.param == uid)
  377. {
  378. err = RT_EOK;
  379. will_free = RT_TRUE;
  380. rt_list_remove(&isr->list);
  381. break;
  382. }
  383. }
  384. }
  385. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  386. if (will_free)
  387. {
  388. rt_free(isr);
  389. }
  390. }
  391. return err;
  392. }
  393. rt_err_t rt_pic_add_traps(rt_bool_t (*handler)(void *), void *data)
  394. {
  395. rt_err_t err = -RT_EINVAL;
  396. if (handler)
  397. {
  398. struct irq_traps *traps = rt_malloc(sizeof(*traps));
  399. if (traps)
  400. {
  401. rt_ubase_t level = rt_hw_interrupt_disable();
  402. rt_list_init(&traps->list);
  403. traps->data = data;
  404. traps->handler = handler;
  405. rt_list_insert_before(&_traps_nodes, &traps->list);
  406. err = RT_EOK;
  407. rt_hw_interrupt_enable(level);
  408. }
  409. else
  410. {
  411. LOG_E("No memory to save '%p' handler", handler);
  412. err = -RT_ENOMEM;
  413. }
  414. }
  415. return err;
  416. }
  417. rt_err_t rt_pic_do_traps(void)
  418. {
  419. rt_err_t err = -RT_ERROR;
  420. struct irq_traps *traps;
  421. rt_interrupt_enter();
  422. rt_list_for_each_entry(traps, &_traps_nodes, list)
  423. {
  424. if (traps->handler(traps->data))
  425. {
  426. err = RT_EOK;
  427. break;
  428. }
  429. }
  430. rt_interrupt_leave();
  431. return err;
  432. }
  433. rt_err_t rt_pic_handle_isr(struct rt_pic_irq *pirq)
  434. {
  435. rt_err_t err = -RT_EEMPTY;
  436. rt_list_t *handler_nodes;
  437. struct rt_irq_desc *action;
  438. #ifdef RT_USING_PIC_STATISTICS
  439. struct timespec ts;
  440. rt_ubase_t irq_time_ns;
  441. rt_ubase_t current_irq_begin;
  442. #endif
  443. RT_ASSERT(pirq != RT_NULL);
  444. RT_ASSERT(pirq->pic != RT_NULL);
  445. #ifdef RT_USING_PIC_STATISTICS
  446. rt_ktime_boottime_get_ns(&ts);
  447. current_irq_begin = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec;
  448. #endif
  449. handler_nodes = &pirq->isr.list;
  450. action = &pirq->isr.action;
  451. if (!rt_list_isempty(&pirq->children_nodes))
  452. {
  453. struct rt_pic_irq *child;
  454. rt_list_for_each_entry(child, &pirq->children_nodes, list)
  455. {
  456. if (child->pic->ops->irq_ack)
  457. {
  458. child->pic->ops->irq_ack(child);
  459. }
  460. err = rt_pic_handle_isr(child);
  461. if (child->pic->ops->irq_eoi)
  462. {
  463. child->pic->ops->irq_eoi(child);
  464. }
  465. }
  466. }
  467. if (action->handler)
  468. {
  469. action->handler(pirq->irq, action->param);
  470. #ifdef RT_USING_INTERRUPT_INFO
  471. action->counter++;
  472. #ifdef RT_USING_SMP
  473. action->cpu_counter[rt_hw_cpu_id()]++;
  474. #endif
  475. #endif
  476. if (!rt_list_isempty(handler_nodes))
  477. {
  478. struct rt_pic_isr *isr;
  479. rt_list_for_each_entry(isr, handler_nodes, list)
  480. {
  481. action = &isr->action;
  482. RT_ASSERT(action->handler != RT_NULL);
  483. action->handler(pirq->irq, action->param);
  484. #ifdef RT_USING_INTERRUPT_INFO
  485. action->counter++;
  486. #ifdef RT_USING_SMP
  487. action->cpu_counter[rt_hw_cpu_id()]++;
  488. #endif
  489. #endif
  490. }
  491. }
  492. err = RT_EOK;
  493. }
  494. #ifdef RT_USING_PIC_STATISTICS
  495. rt_ktime_boottime_get_ns(&ts);
  496. irq_time_ns = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec - current_irq_begin;
  497. pirq->stat.sum_irq_time_ns += irq_time_ns;
  498. if (irq_time_ns < pirq->stat.min_irq_time_ns || pirq->stat.min_irq_time_ns == 0)
  499. {
  500. pirq->stat.min_irq_time_ns = irq_time_ns;
  501. }
  502. if (irq_time_ns > pirq->stat.max_irq_time_ns)
  503. {
  504. pirq->stat.max_irq_time_ns = irq_time_ns;
  505. }
  506. #endif
  507. return err;
  508. }
  509. rt_weak rt_err_t rt_pic_user_extends(struct rt_pic *pic)
  510. {
  511. return -RT_ENOSYS;
  512. }
  513. rt_err_t rt_pic_irq_init(void)
  514. {
  515. rt_err_t err = RT_EOK;
  516. struct rt_pic *pic;
  517. rt_list_for_each_entry(pic, &_pic_nodes, list)
  518. {
  519. if (pic->ops->irq_init)
  520. {
  521. err = pic->ops->irq_init(pic);
  522. if (err)
  523. {
  524. LOG_E("PIC = %s init fail", pic->ops->name);
  525. break;
  526. }
  527. }
  528. }
  529. return err;
  530. }
  531. rt_err_t rt_pic_irq_finit(void)
  532. {
  533. rt_err_t err = RT_EOK;
  534. struct rt_pic *pic;
  535. rt_list_for_each_entry(pic, &_pic_nodes, list)
  536. {
  537. if (pic->ops->irq_finit)
  538. {
  539. err = pic->ops->irq_finit(pic);
  540. if (err)
  541. {
  542. LOG_E("PIC = %s finit fail", pic->ops->name);
  543. break;
  544. }
  545. }
  546. }
  547. return err;
  548. }
  549. void rt_pic_irq_enable(int irq)
  550. {
  551. struct rt_pic_irq *pirq = irq2pirq(irq);
  552. RT_ASSERT(pirq != RT_NULL);
  553. rt_hw_spin_lock(&pirq->rw_lock.lock);
  554. if (pirq->pic->ops->irq_enable)
  555. {
  556. pirq->pic->ops->irq_enable(pirq);
  557. }
  558. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  559. }
  560. void rt_pic_irq_disable(int irq)
  561. {
  562. struct rt_pic_irq *pirq = irq2pirq(irq);
  563. RT_ASSERT(pirq != RT_NULL);
  564. rt_hw_spin_lock(&pirq->rw_lock.lock);
  565. if (pirq->pic->ops->irq_disable)
  566. {
  567. pirq->pic->ops->irq_disable(pirq);
  568. }
  569. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  570. }
  571. void rt_pic_irq_ack(int irq)
  572. {
  573. struct rt_pic_irq *pirq = irq2pirq(irq);
  574. RT_ASSERT(pirq != RT_NULL);
  575. rt_hw_spin_lock(&pirq->rw_lock.lock);
  576. if (pirq->pic->ops->irq_ack)
  577. {
  578. pirq->pic->ops->irq_ack(pirq);
  579. }
  580. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  581. }
  582. void rt_pic_irq_mask(int irq)
  583. {
  584. struct rt_pic_irq *pirq = irq2pirq(irq);
  585. RT_ASSERT(pirq != RT_NULL);
  586. rt_hw_spin_lock(&pirq->rw_lock.lock);
  587. if (pirq->pic->ops->irq_mask)
  588. {
  589. pirq->pic->ops->irq_mask(pirq);
  590. }
  591. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  592. }
  593. void rt_pic_irq_unmask(int irq)
  594. {
  595. struct rt_pic_irq *pirq = irq2pirq(irq);
  596. RT_ASSERT(pirq != RT_NULL);
  597. rt_hw_spin_lock(&pirq->rw_lock.lock);
  598. if (pirq->pic->ops->irq_unmask)
  599. {
  600. pirq->pic->ops->irq_unmask(pirq);
  601. }
  602. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  603. }
  604. void rt_pic_irq_eoi(int irq)
  605. {
  606. struct rt_pic_irq *pirq = irq2pirq(irq);
  607. RT_ASSERT(pirq != RT_NULL);
  608. rt_hw_spin_lock(&pirq->rw_lock.lock);
  609. if (pirq->pic->ops->irq_eoi)
  610. {
  611. pirq->pic->ops->irq_eoi(pirq);
  612. }
  613. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  614. }
  615. rt_err_t rt_pic_irq_set_priority(int irq, rt_uint32_t priority)
  616. {
  617. rt_err_t err = -RT_EINVAL;
  618. struct rt_pic_irq *pirq = irq2pirq(irq);
  619. if (pirq)
  620. {
  621. rt_hw_spin_lock(&pirq->rw_lock.lock);
  622. if (pirq->pic->ops->irq_set_priority)
  623. {
  624. err = pirq->pic->ops->irq_set_priority(pirq, priority);
  625. if (!err)
  626. {
  627. pirq->priority = priority;
  628. }
  629. }
  630. else
  631. {
  632. err = -RT_ENOSYS;
  633. }
  634. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  635. }
  636. return err;
  637. }
  638. rt_uint32_t rt_pic_irq_get_priority(int irq)
  639. {
  640. rt_uint32_t priority = RT_UINT32_MAX;
  641. struct rt_pic_irq *pirq = irq2pirq(irq);
  642. if (pirq)
  643. {
  644. rt_hw_spin_lock(&pirq->rw_lock.lock);
  645. priority = pirq->priority;
  646. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  647. }
  648. return priority;
  649. }
  650. rt_err_t rt_pic_irq_set_affinity(int irq, rt_bitmap_t *affinity)
  651. {
  652. rt_err_t err = -RT_EINVAL;
  653. struct rt_pic_irq *pirq;
  654. if (affinity && (pirq = irq2pirq(irq)))
  655. {
  656. rt_hw_spin_lock(&pirq->rw_lock.lock);
  657. if (pirq->pic->ops->irq_set_affinity)
  658. {
  659. err = pirq->pic->ops->irq_set_affinity(pirq, affinity);
  660. if (!err)
  661. {
  662. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  663. }
  664. }
  665. else
  666. {
  667. err = -RT_ENOSYS;
  668. }
  669. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  670. }
  671. return err;
  672. }
  673. rt_err_t rt_pic_irq_get_affinity(int irq, rt_bitmap_t *out_affinity)
  674. {
  675. rt_err_t err = -RT_EINVAL;
  676. struct rt_pic_irq *pirq;
  677. if (out_affinity && (pirq = irq2pirq(irq)))
  678. {
  679. rt_hw_spin_lock(&pirq->rw_lock.lock);
  680. rt_memcpy(out_affinity, pirq->affinity, sizeof(pirq->affinity));
  681. err = RT_EOK;
  682. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  683. }
  684. return err;
  685. }
  686. rt_err_t rt_pic_irq_set_triger_mode(int irq, rt_uint32_t mode)
  687. {
  688. rt_err_t err = -RT_EINVAL;
  689. struct rt_pic_irq *pirq;
  690. if ((~mode & RT_IRQ_MODE_MASK) && (pirq = irq2pirq(irq)))
  691. {
  692. rt_hw_spin_lock(&pirq->rw_lock.lock);
  693. if (pirq->pic->ops->irq_set_triger_mode)
  694. {
  695. err = pirq->pic->ops->irq_set_triger_mode(pirq, mode);
  696. if (!err)
  697. {
  698. pirq->mode = mode;
  699. }
  700. }
  701. else
  702. {
  703. err = -RT_ENOSYS;
  704. }
  705. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  706. }
  707. return err;
  708. }
  709. rt_uint32_t rt_pic_irq_get_triger_mode(int irq)
  710. {
  711. rt_uint32_t mode = RT_UINT32_MAX;
  712. struct rt_pic_irq *pirq = irq2pirq(irq);
  713. if (pirq)
  714. {
  715. rt_hw_spin_lock(&pirq->rw_lock.lock);
  716. mode = pirq->mode;
  717. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  718. }
  719. return mode;
  720. }
  721. void rt_pic_irq_send_ipi(int irq, rt_bitmap_t *cpumask)
  722. {
  723. struct rt_pic_irq *pirq;
  724. if (cpumask && (pirq = irq2pirq(irq)))
  725. {
  726. rt_hw_spin_lock(&pirq->rw_lock.lock);
  727. if (pirq->pic->ops->irq_send_ipi)
  728. {
  729. pirq->pic->ops->irq_send_ipi(pirq, cpumask);
  730. }
  731. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  732. }
  733. }
  734. rt_err_t rt_pic_irq_set_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  735. {
  736. rt_err_t err;
  737. if (pic && hwirq >= 0)
  738. {
  739. if (pic->ops->irq_set_state)
  740. {
  741. err = pic->ops->irq_set_state(pic, hwirq, type, state);
  742. }
  743. else
  744. {
  745. err = -RT_ENOSYS;
  746. }
  747. }
  748. else
  749. {
  750. err = -RT_EINVAL;
  751. }
  752. return err;
  753. }
  754. rt_err_t rt_pic_irq_get_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
  755. {
  756. rt_err_t err;
  757. if (pic && hwirq >= 0)
  758. {
  759. if (pic->ops->irq_get_state)
  760. {
  761. rt_bool_t state;
  762. if (!(err = pic->ops->irq_get_state(pic, hwirq, type, &state)) && out_state)
  763. {
  764. *out_state = state;
  765. }
  766. }
  767. else
  768. {
  769. err = -RT_ENOSYS;
  770. }
  771. }
  772. else
  773. {
  774. err = -RT_EINVAL;
  775. }
  776. return err;
  777. }
  778. rt_err_t rt_pic_irq_set_state(int irq, int type, rt_bool_t state)
  779. {
  780. rt_err_t err;
  781. struct rt_pic_irq *pirq = irq2pirq(irq);
  782. RT_ASSERT(pirq != RT_NULL);
  783. rt_hw_spin_lock(&pirq->rw_lock.lock);
  784. err = rt_pic_irq_set_state_raw(pirq->pic, pirq->hwirq, type, state);
  785. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  786. return err;
  787. }
  788. rt_err_t rt_pic_irq_get_state(int irq, int type, rt_bool_t *out_state)
  789. {
  790. rt_err_t err;
  791. struct rt_pic_irq *pirq = irq2pirq(irq);
  792. RT_ASSERT(pirq != RT_NULL);
  793. rt_hw_spin_lock(&pirq->rw_lock.lock);
  794. err = rt_pic_irq_get_state_raw(pirq->pic, pirq->hwirq, type, out_state);
  795. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  796. return err;
  797. }
  798. void rt_pic_irq_parent_enable(struct rt_pic_irq *pirq)
  799. {
  800. RT_ASSERT(pirq != RT_NULL);
  801. pirq = pirq->parent;
  802. if (pirq->pic->ops->irq_enable)
  803. {
  804. pirq->pic->ops->irq_enable(pirq);
  805. }
  806. }
  807. void rt_pic_irq_parent_disable(struct rt_pic_irq *pirq)
  808. {
  809. RT_ASSERT(pirq != RT_NULL);
  810. pirq = pirq->parent;
  811. if (pirq->pic->ops->irq_disable)
  812. {
  813. pirq->pic->ops->irq_disable(pirq);
  814. }
  815. }
  816. void rt_pic_irq_parent_ack(struct rt_pic_irq *pirq)
  817. {
  818. RT_ASSERT(pirq != RT_NULL);
  819. pirq = pirq->parent;
  820. if (pirq->pic->ops->irq_ack)
  821. {
  822. pirq->pic->ops->irq_ack(pirq);
  823. }
  824. }
  825. void rt_pic_irq_parent_mask(struct rt_pic_irq *pirq)
  826. {
  827. RT_ASSERT(pirq != RT_NULL);
  828. pirq = pirq->parent;
  829. if (pirq->pic->ops->irq_mask)
  830. {
  831. pirq->pic->ops->irq_mask(pirq);
  832. }
  833. }
  834. void rt_pic_irq_parent_unmask(struct rt_pic_irq *pirq)
  835. {
  836. RT_ASSERT(pirq != RT_NULL);
  837. pirq = pirq->parent;
  838. if (pirq->pic->ops->irq_unmask)
  839. {
  840. pirq->pic->ops->irq_unmask(pirq);
  841. }
  842. }
  843. void rt_pic_irq_parent_eoi(struct rt_pic_irq *pirq)
  844. {
  845. RT_ASSERT(pirq != RT_NULL);
  846. pirq = pirq->parent;
  847. if (pirq->pic->ops->irq_eoi)
  848. {
  849. pirq->pic->ops->irq_eoi(pirq);
  850. }
  851. }
  852. rt_err_t rt_pic_irq_parent_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  853. {
  854. rt_err_t err = -RT_ENOSYS;
  855. RT_ASSERT(pirq != RT_NULL);
  856. pirq = pirq->parent;
  857. if (pirq->pic->ops->irq_set_priority)
  858. {
  859. if (!(err = pirq->pic->ops->irq_set_priority(pirq, priority)))
  860. {
  861. pirq->priority = priority;
  862. }
  863. }
  864. return err;
  865. }
  866. rt_err_t rt_pic_irq_parent_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  867. {
  868. rt_err_t err = -RT_ENOSYS;
  869. RT_ASSERT(pirq != RT_NULL);
  870. pirq = pirq->parent;
  871. if (pirq->pic->ops->irq_set_affinity)
  872. {
  873. if (!(err = pirq->pic->ops->irq_set_affinity(pirq, affinity)))
  874. {
  875. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  876. }
  877. }
  878. return err;
  879. }
  880. rt_err_t rt_pic_irq_parent_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
  881. {
  882. rt_err_t err = -RT_ENOSYS;
  883. RT_ASSERT(pirq != RT_NULL);
  884. pirq = pirq->parent;
  885. if (pirq->pic->ops->irq_set_triger_mode)
  886. {
  887. if (!(err = pirq->pic->ops->irq_set_triger_mode(pirq, mode)))
  888. {
  889. pirq->mode = mode;
  890. }
  891. }
  892. return err;
  893. }
  894. #ifdef RT_USING_OFW
  895. RT_OFW_STUB_RANGE_EXPORT(pic, _pic_ofw_start, _pic_ofw_end);
  896. static rt_err_t ofw_pic_init(void)
  897. {
  898. struct rt_ofw_node *ic_np;
  899. rt_ofw_foreach_node_by_prop(ic_np, "interrupt-controller")
  900. {
  901. rt_ofw_stub_probe_range(ic_np, &_pic_ofw_start, &_pic_ofw_end);
  902. }
  903. return RT_EOK;
  904. }
  905. #else
  906. static rt_err_t ofw_pic_init(void)
  907. {
  908. return RT_EOK;
  909. }
  910. #endif /* !RT_USING_OFW */
  911. rt_err_t rt_pic_init(void)
  912. {
  913. rt_err_t err;
  914. LOG_D("init start");
  915. err = ofw_pic_init();
  916. LOG_D("init end");
  917. return err;
  918. }
  919. #if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
  920. static int list_irq(int argc, char**argv)
  921. {
  922. rt_size_t irq_nr = 0;
  923. rt_bool_t dump_all = RT_FALSE;
  924. const char *const irq_modes[] =
  925. {
  926. [RT_IRQ_MODE_NONE] = "None",
  927. [RT_IRQ_MODE_EDGE_RISING] = "Edge-Rising",
  928. [RT_IRQ_MODE_EDGE_FALLING] = "Edge-Falling",
  929. [RT_IRQ_MODE_EDGE_BOTH] = "Edge-Both",
  930. [RT_IRQ_MODE_LEVEL_HIGH] = "Level-High",
  931. [RT_IRQ_MODE_LEVEL_LOW] = "Level-Low",
  932. };
  933. static char info[RT_CONSOLEBUF_SIZE];
  934. #ifdef RT_USING_SMP
  935. static char cpumask[RT_CPUS_NR + 1] = { [RT_CPUS_NR] = '\0' };
  936. #endif
  937. if (argc > 1)
  938. {
  939. if (!rt_strcmp(argv[1], "all"))
  940. {
  941. dump_all = RT_TRUE;
  942. }
  943. }
  944. rt_kprintf("%-*.s %-*.s %s %-*.s %-*.s %-*.s %-*.sUsers%-*.s",
  945. 6, "IRQ",
  946. 6, "HW-IRQ",
  947. "MSI",
  948. _pic_name_max, "PIC",
  949. 12, "Mode",
  950. #ifdef RT_USING_SMP
  951. RT_CPUS_NR, "CPUs",
  952. #else
  953. 0, 0,
  954. #endif
  955. #ifdef RT_USING_INTERRUPT_INFO
  956. 11, "Count",
  957. 5, ""
  958. #else
  959. 0, 0,
  960. 10, "-Number"
  961. #endif
  962. );
  963. #if defined(RT_USING_SMP) && defined(RT_USING_INTERRUPT_INFO)
  964. for (int i = 0; i < RT_CPUS_NR; i++)
  965. {
  966. rt_kprintf(" cpu%2d ", i);
  967. }
  968. #endif
  969. #ifdef RT_USING_PIC_STATISTICS
  970. rt_kprintf(" max/ns avg/ns min/ns");
  971. #endif
  972. rt_kputs("\n");
  973. for (int i = 0; i < RT_ARRAY_SIZE(_pirq_hash); ++i)
  974. {
  975. struct rt_pic_irq *pirq = &_pirq_hash[i];
  976. if (!pirq->pic || !(dump_all || pirq->isr.action.handler))
  977. {
  978. continue;
  979. }
  980. rt_snprintf(info, sizeof(info), "%-6d %-6d %c %-*.s %-*.s ",
  981. pirq->irq,
  982. pirq->hwirq,
  983. pirq->msi_desc ? 'Y' : 'N',
  984. _pic_name_max, pirq->pic->ops->name,
  985. 12, irq_modes[pirq->mode]);
  986. #ifdef RT_USING_SMP
  987. for (int group = 0, id = 0; group < RT_ARRAY_SIZE(pirq->affinity); ++group)
  988. {
  989. rt_bitmap_t mask = pirq->affinity[group];
  990. for (int idx = 0; id < RT_CPUS_NR && idx < RT_BITMAP_BIT_LEN(1); ++idx, ++id)
  991. {
  992. cpumask[RT_ARRAY_SIZE(cpumask) - id - 2] = '0' + ((mask >> idx) & 1);
  993. }
  994. }
  995. #endif /* RT_USING_SMP */
  996. rt_kputs(info);
  997. #ifdef RT_USING_SMP
  998. rt_kputs(cpumask);
  999. #endif
  1000. #ifdef RT_USING_INTERRUPT_INFO
  1001. rt_kprintf(" %-10d ", pirq->isr.action.counter);
  1002. rt_kprintf("%-*.s", 10, pirq->isr.action.name);
  1003. #ifdef RT_USING_SMP
  1004. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  1005. {
  1006. rt_kprintf(" %-10d", pirq->isr.action.cpu_counter[cpuid]);
  1007. }
  1008. #endif
  1009. #ifdef RT_USING_PIC_STATISTICS
  1010. rt_kprintf(" %-10d %-10d %-10d", pirq->stat.max_irq_time_ns, pirq->stat.sum_irq_time_ns/pirq->isr.action.counter, pirq->stat.min_irq_time_ns);
  1011. #endif
  1012. rt_kputs("\n");
  1013. if (!rt_list_isempty(&pirq->isr.list))
  1014. {
  1015. struct rt_pic_isr *repeat_isr;
  1016. rt_list_for_each_entry(repeat_isr, &pirq->isr.list, list)
  1017. {
  1018. rt_kputs(info);
  1019. #ifdef RT_USING_SMP
  1020. rt_kputs(cpumask);
  1021. #endif
  1022. rt_kprintf("%-10d ", repeat_isr->action.counter);
  1023. rt_kprintf("%-*.s", 10, repeat_isr->action.name);
  1024. #ifdef RT_USING_SMP
  1025. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  1026. {
  1027. rt_kprintf(" %-10d", repeat_isr->action.cpu_counter[cpuid]);
  1028. }
  1029. #endif
  1030. #ifdef RT_USING_PIC_STATISTICS
  1031. rt_kprintf(" --- --- ---");
  1032. #endif
  1033. rt_kputs("\n");
  1034. }
  1035. }
  1036. #else
  1037. rt_kprintf(" %d\n", rt_list_len(&pirq->isr.list));
  1038. #endif
  1039. ++irq_nr;
  1040. }
  1041. rt_kprintf("%d IRQs found\n", irq_nr);
  1042. return 0;
  1043. }
  1044. MSH_CMD_EXPORT(list_irq, dump using or args = all of irq information);
  1045. #endif /* RT_USING_CONSOLE && RT_USING_MSH */