msi.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-07 GuEe-GUI first version
  9. */
  10. #include <drivers/pci_msi.h>
  11. #include <drivers/core/numa.h>
  12. #define DBG_TAG "pci.msi"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. /* PCI has 2048 max IRQs in MSI-X */
  16. static RT_IRQ_AFFINITY_DECLARE(msi_affinity_default[2048]) rt_section(".bss.noclean.pci.msi");
  17. rt_inline void spin_lock(struct rt_spinlock *lock)
  18. {
  19. rt_hw_spin_lock(&lock->lock);
  20. }
  21. rt_inline void spin_unlock(struct rt_spinlock *lock)
  22. {
  23. rt_hw_spin_unlock(&lock->lock);
  24. }
  25. rt_inline void *msix_table_base(struct rt_pci_msix_conf *msix)
  26. {
  27. return msix->table_base + msix->index * PCIM_MSIX_ENTRY_SIZE;
  28. }
  29. rt_inline void *msix_vector_ctrl_base(struct rt_pci_msix_conf *msix)
  30. {
  31. return msix_table_base(msix) + PCIM_MSIX_ENTRY_VECTOR_CTRL;
  32. }
  33. rt_inline void msix_write_vector_ctrl(struct rt_pci_msix_conf *msix,
  34. rt_uint32_t ctrl)
  35. {
  36. void *vc_addr = msix_vector_ctrl_base(msix);
  37. HWREG32(vc_addr) = ctrl;
  38. }
  39. rt_inline void msix_mask(struct rt_pci_msix_conf *msix)
  40. {
  41. msix->msg_ctrl |= PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
  42. msix_write_vector_ctrl(msix, msix->msg_ctrl);
  43. /* Flush write to device */
  44. HWREG32(msix->table_base);
  45. }
  46. static void msix_update_ctrl(struct rt_pci_device *pdev,
  47. rt_uint16_t clear, rt_uint16_t set)
  48. {
  49. rt_uint16_t msgctl;
  50. rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
  51. msgctl &= ~clear;
  52. msgctl |= set;
  53. rt_pci_write_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, msgctl);
  54. }
  55. rt_inline void msix_unmask(struct rt_pci_msix_conf *msix)
  56. {
  57. msix->msg_ctrl &= ~PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
  58. msix_write_vector_ctrl(msix, msix->msg_ctrl);
  59. }
  60. rt_inline rt_uint32_t msi_multi_mask(struct rt_pci_msi_conf *msi)
  61. {
  62. if (msi->cap.multi_msg_max >= 5)
  63. {
  64. return 0xffffffff;
  65. }
  66. return (1 << (1 << msi->cap.multi_msg_max)) - 1;
  67. }
  68. static void msi_write_mask(struct rt_pci_msi_conf *msi,
  69. rt_uint32_t clear, rt_uint32_t set, struct rt_pci_device *pdev)
  70. {
  71. if (msi->cap.is_masking)
  72. {
  73. rt_ubase_t level = rt_spin_lock_irqsave(&pdev->msi_lock);
  74. msi->mask &= ~clear;
  75. msi->mask |= set;
  76. rt_pci_write_config_u32(pdev, msi->mask_pos, msi->mask);
  77. rt_spin_unlock_irqrestore(&pdev->msi_lock, level);
  78. }
  79. }
  80. rt_inline void msi_mask(struct rt_pci_msi_conf *msi,
  81. rt_uint32_t mask, struct rt_pci_device *pdev)
  82. {
  83. msi_write_mask(msi, 0, mask, pdev);
  84. }
  85. rt_inline void msi_unmask(struct rt_pci_msi_conf *msi,
  86. rt_uint32_t mask, struct rt_pci_device *pdev)
  87. {
  88. msi_write_mask(msi, mask, 0, pdev);
  89. }
  90. static void msi_write_enable(struct rt_pci_device *pdev, rt_bool_t enable)
  91. {
  92. rt_uint16_t msgctl;
  93. rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
  94. msgctl &= ~PCIM_MSICTRL_MSI_ENABLE;
  95. if (enable)
  96. {
  97. msgctl |= PCIM_MSICTRL_MSI_ENABLE;
  98. }
  99. rt_pci_write_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, msgctl);
  100. }
  101. static void msi_affinity_init(struct rt_pci_msi_desc *desc, int msi_index,
  102. rt_bitmap_t *cpumasks)
  103. {
  104. int irq;
  105. struct rt_pic_irq *pirq;
  106. struct rt_pci_device *pdev = desc->pdev;
  107. struct rt_pic *msi_pic = pdev->msi_pic;
  108. irq = desc->irq + desc->is_msix ? 0 : msi_index;
  109. pirq = rt_pic_find_pirq(msi_pic, irq);
  110. /* Save affinity */
  111. if (desc->is_msix)
  112. {
  113. desc->affinity = pirq->affinity;
  114. }
  115. else
  116. {
  117. desc->affinities[msi_index] = pirq->affinity;
  118. }
  119. if ((void *)cpumasks > (void *)msi_affinity_default &&
  120. (void *)cpumasks < (void *)msi_affinity_default + sizeof(msi_affinity_default))
  121. {
  122. rt_uint64_t data_address;
  123. /* Get MSI/MSI-X write data adddress */
  124. data_address = desc->msg.address_hi;
  125. data_address <<= 32;
  126. data_address |= desc->msg.address_lo;
  127. /* Prepare affinity */
  128. cpumasks = pirq->affinity;
  129. rt_numa_memory_affinity(data_address, cpumasks);
  130. }
  131. else if (rt_bitmap_next_set_bit(cpumasks, 0, RT_CPUS_NR) >= RT_CPUS_NR)
  132. {
  133. /* No affinity info found, give up */
  134. return;
  135. }
  136. if (!rt_pic_irq_set_affinity(irq, cpumasks))
  137. {
  138. if (msi_pic->ops->irq_write_msi_msg)
  139. {
  140. msi_pic->ops->irq_write_msi_msg(pirq, &desc->msg);
  141. }
  142. }
  143. }
  144. void rt_pci_msi_shutdown(struct rt_pci_device *pdev)
  145. {
  146. struct rt_pci_msi_desc *desc;
  147. if (!pdev)
  148. {
  149. return;
  150. }
  151. msi_write_enable(pdev, RT_FALSE);
  152. rt_pci_intx(pdev, RT_TRUE);
  153. if ((desc = rt_pci_msi_first_desc(pdev)))
  154. {
  155. msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
  156. }
  157. /* Restore pdev->irq to its default pin-assertion IRQ */
  158. pdev->irq = desc->msi.default_irq;
  159. pdev->msi_enabled = RT_FALSE;
  160. }
  161. void rt_pci_msix_shutdown(struct rt_pci_device *pdev)
  162. {
  163. struct rt_pci_msi_desc *desc;
  164. if (!pdev)
  165. {
  166. return;
  167. }
  168. rt_pci_msi_for_each_desc(pdev, desc)
  169. {
  170. msix_mask(&desc->msix);
  171. }
  172. msix_update_ctrl(pdev, PCIM_MSIXCTRL_MSIX_ENABLE, 0);
  173. rt_pci_intx(pdev, RT_TRUE);
  174. pdev->msix_enabled = RT_FALSE;
  175. }
  176. void rt_pci_msi_free_irqs(struct rt_pci_device *pdev)
  177. {
  178. struct rt_pci_msi_desc *desc, *last_desc = RT_NULL;
  179. if (!pdev)
  180. {
  181. return;
  182. }
  183. if (pdev->msix_base)
  184. {
  185. rt_iounmap(pdev->msix_base);
  186. pdev->msix_base = RT_NULL;
  187. }
  188. rt_pci_msi_cleanup_irqs(pdev);
  189. rt_pci_msi_for_each_desc(pdev, desc)
  190. {
  191. /* To safety */
  192. if (last_desc)
  193. {
  194. rt_list_remove(&last_desc->list);
  195. rt_free(last_desc);
  196. }
  197. last_desc = desc;
  198. }
  199. /* The last one */
  200. if (last_desc)
  201. {
  202. rt_list_remove(&last_desc->list);
  203. rt_free(last_desc);
  204. }
  205. }
  206. void rt_pci_msi_write_msg(struct rt_pci_msi_desc *desc, struct rt_pci_msi_msg *msg)
  207. {
  208. struct rt_pci_device *pdev = desc->pdev;
  209. if (desc->is_msix)
  210. {
  211. void *msix_entry;
  212. rt_bool_t unmasked;
  213. rt_uint32_t msgctl;
  214. struct rt_pci_msix_conf *msix = &desc->msix;
  215. msgctl = msix->msg_ctrl;
  216. unmasked = !(msgctl & PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
  217. msix_entry = msix_table_base(msix);
  218. if (unmasked)
  219. {
  220. msix_write_vector_ctrl(msix, msgctl | PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
  221. }
  222. HWREG32(msix_entry + PCIM_MSIX_ENTRY_LOWER_ADDR) = msg->address_lo;
  223. HWREG32(msix_entry + PCIM_MSIX_ENTRY_UPPER_ADDR) = msg->address_hi;
  224. HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA) = msg->data;
  225. if (unmasked)
  226. {
  227. msix_write_vector_ctrl(msix, msgctl);
  228. }
  229. /* Ensure that the writes are visible in the device */
  230. HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA);
  231. }
  232. else
  233. {
  234. rt_uint16_t msgctl;
  235. int pos = pdev->msi_cap;
  236. struct rt_pci_msi_conf *msi = &desc->msi;
  237. rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
  238. msgctl &= ~PCIM_MSICTRL_MME_MASK;
  239. msgctl |= msi->cap.multi_msg_use << PCIM_MSICTRL_MME_SHIFT;
  240. rt_pci_write_config_u16(pdev, pos + PCIR_MSI_CTRL, msgctl);
  241. rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR, msg->address_lo);
  242. /*
  243. * The value stored in this field is related to the processor system,
  244. * the processor will initialize this field
  245. * when the PCIe device is initialized, and the rules for filling
  246. * in this field are not the same for different processors.
  247. * If the Multiple Message Enable field is not 0b000 (multiple IRQs),
  248. * the PCIe device can send different interrupt requests
  249. * by changing the low data in the Message Data field
  250. */
  251. if (msi->cap.is_64bit)
  252. {
  253. rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR_HIGH, msg->address_hi);
  254. rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA_64BIT, msg->data);
  255. }
  256. else
  257. {
  258. rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA, msg->data);
  259. }
  260. /* Ensure that the writes are visible in the device */
  261. rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
  262. }
  263. desc->msg = *msg;
  264. if (desc->write_msi_msg)
  265. {
  266. desc->write_msi_msg(desc, desc->write_msi_msg_data);
  267. }
  268. }
  269. void rt_pci_msi_mask_irq(struct rt_pic_irq *pirq)
  270. {
  271. struct rt_pci_msi_desc *desc;
  272. if (pirq && (desc = pirq->msi_desc))
  273. {
  274. if (desc->is_msix)
  275. {
  276. msix_mask(&desc->msix);
  277. }
  278. else
  279. {
  280. msi_mask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
  281. }
  282. }
  283. }
  284. void rt_pci_msi_unmask_irq(struct rt_pic_irq *pirq)
  285. {
  286. struct rt_pci_msi_desc *desc;
  287. if (pirq && (desc = pirq->msi_desc))
  288. {
  289. if (desc->is_msix)
  290. {
  291. msix_unmask(&desc->msix);
  292. }
  293. else
  294. {
  295. msi_unmask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
  296. }
  297. }
  298. }
  299. rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
  300. rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)))
  301. {
  302. rt_ssize_t res = -RT_ENOSYS;
  303. if (!pdev || min > max)
  304. {
  305. return -RT_EINVAL;
  306. }
  307. if (flags & RT_PCI_IRQ_F_AFFINITY)
  308. {
  309. if (!affinities)
  310. {
  311. affinities = msi_affinity_default;
  312. }
  313. }
  314. else
  315. {
  316. affinities = RT_NULL;
  317. }
  318. if (flags & RT_PCI_IRQ_F_MSIX)
  319. {
  320. res = rt_pci_msix_enable_range_affinity(pdev, RT_NULL, min, max, affinities);
  321. if (res > 0)
  322. {
  323. return res;
  324. }
  325. }
  326. if (flags & RT_PCI_IRQ_F_MSI)
  327. {
  328. res = rt_pci_msi_enable_range_affinity(pdev, min, max, affinities);
  329. if (res > 0)
  330. {
  331. return res;
  332. }
  333. }
  334. if (flags & RT_PCI_IRQ_F_LEGACY)
  335. {
  336. if (min == 1 && pdev->irq >= 0)
  337. {
  338. if (affinities)
  339. {
  340. int cpuid;
  341. RT_IRQ_AFFINITY_DECLARE(old_affinity);
  342. /* INTx is shared, we should update it */
  343. rt_pic_irq_get_affinity(pdev->irq, old_affinity);
  344. rt_bitmap_for_each_set_bit(affinities[0], cpuid, RT_CPUS_NR)
  345. {
  346. RT_IRQ_AFFINITY_SET(old_affinity, cpuid);
  347. }
  348. rt_pic_irq_set_affinity(pdev->irq, old_affinity);
  349. }
  350. rt_pci_intx(pdev, RT_TRUE);
  351. return min;
  352. }
  353. }
  354. return res;
  355. }
  356. void rt_pci_free_vector(struct rt_pci_device *pdev)
  357. {
  358. if (!pdev)
  359. {
  360. return;
  361. }
  362. rt_pci_msi_disable(pdev);
  363. rt_pci_msix_disable(pdev);
  364. rt_pci_irq_mask(pdev);
  365. }
  366. static rt_err_t msi_verify_entries(struct rt_pci_device *pdev)
  367. {
  368. if (pdev->no_64bit_msi)
  369. {
  370. struct rt_pci_msi_desc *desc;
  371. rt_pci_msi_for_each_desc(pdev, desc)
  372. {
  373. if (desc->msg.address_hi)
  374. {
  375. LOG_D("%s: Arch assigned 64-bit MSI address %08x%08x"
  376. "but device only supports 32 bits",
  377. rt_dm_dev_get_name(&pdev->parent),
  378. desc->msg.address_hi, desc->msg.address_lo);
  379. return -RT_EIO;
  380. }
  381. }
  382. }
  383. return RT_EOK;
  384. }
  385. static rt_err_t msi_insert_desc(struct rt_pci_device *pdev,
  386. struct rt_pci_msi_desc *init_desc)
  387. {
  388. rt_size_t msi_affinity_ptr_size = 0;
  389. struct rt_pci_msi_desc *msi_desc;
  390. if (!init_desc->is_msix)
  391. {
  392. msi_affinity_ptr_size += sizeof(msi_desc->affinities[0]) * 32;
  393. }
  394. msi_desc = rt_calloc(1, sizeof(*msi_desc) + msi_affinity_ptr_size);
  395. if (!msi_desc)
  396. {
  397. return -RT_ENOMEM;
  398. }
  399. rt_memcpy(msi_desc, init_desc, sizeof(*msi_desc));
  400. if (!init_desc->is_msix)
  401. {
  402. msi_desc->affinities = (void *)msi_desc + sizeof(*msi_desc);
  403. }
  404. msi_desc->pdev = pdev;
  405. rt_list_init(&msi_desc->list);
  406. rt_list_insert_before(&pdev->msi_desc_nodes, &msi_desc->list);
  407. return RT_EOK;
  408. }
  409. rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev)
  410. {
  411. rt_uint16_t msgctl;
  412. if (!pdev)
  413. {
  414. return -RT_EINVAL;
  415. }
  416. if (!pdev->msi_cap)
  417. {
  418. return -RT_EINVAL;
  419. }
  420. rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
  421. return 1 << ((msgctl & PCIM_MSICTRL_MMC_MASK) >> 1);
  422. }
  423. rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev)
  424. {
  425. if (!pdev)
  426. {
  427. return -RT_EINVAL;
  428. }
  429. if (!pdev->msi_enabled)
  430. {
  431. return -RT_EINVAL;
  432. }
  433. spin_lock(&pdev->msi_lock);
  434. rt_pci_msi_shutdown(pdev);
  435. rt_pci_msi_free_irqs(pdev);
  436. spin_unlock(&pdev->msi_lock);
  437. return RT_EOK;
  438. }
  439. static rt_err_t msi_setup_msi_desc(struct rt_pci_device *pdev, int nvec)
  440. {
  441. rt_uint16_t msgctl;
  442. struct rt_pci_msi_desc desc;
  443. rt_memset(&desc, 0, sizeof(desc));
  444. desc.vector_used = nvec;
  445. desc.vector_count = rt_pci_msi_vector_count(pdev);
  446. desc.is_msix = RT_FALSE;
  447. rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
  448. desc.msi.cap.is_64bit = !!(msgctl & PCIM_MSICTRL_64BIT);
  449. desc.msi.cap.is_masking = !!(msgctl & PCIM_MSICTRL_VECTOR);
  450. desc.msi.cap.multi_msg_max = (msgctl & PCIM_MSICTRL_MMC_MASK) >> 1;
  451. for (int log2 = 0; log2 < 5; ++log2)
  452. {
  453. if (nvec <= (1 << log2))
  454. {
  455. desc.msi.cap.multi_msg_use = log2;
  456. break;
  457. }
  458. }
  459. if (desc.msi.cap.is_64bit)
  460. {
  461. desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK_64BIT;
  462. }
  463. else
  464. {
  465. desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK;
  466. }
  467. /* Save pdev->irq for its default pin-assertion IRQ */
  468. desc.msi.default_irq = pdev->irq;
  469. if (desc.msi.cap.is_masking)
  470. {
  471. /* Get the old mask status */
  472. rt_pci_read_config_u32(pdev, desc.msi.mask_pos, &desc.msi.mask);
  473. }
  474. return msi_insert_desc(pdev, &desc);
  475. }
  476. static rt_ssize_t msi_capability_init(struct rt_pci_device *pdev,
  477. int nvec, RT_IRQ_AFFINITY_DECLARE((*affinities)))
  478. {
  479. rt_err_t err;
  480. struct rt_pci_msi_desc *desc;
  481. msi_write_enable(pdev, RT_FALSE);
  482. spin_lock(&pdev->msi_lock);
  483. if (!(err = msi_setup_msi_desc(pdev, nvec)))
  484. {
  485. /* All MSIs are unmasked by default; mask them all */
  486. desc = rt_pci_msi_first_desc(pdev);
  487. msi_mask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
  488. if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSI)))
  489. {
  490. err = msi_verify_entries(pdev);
  491. }
  492. if (err)
  493. {
  494. msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
  495. }
  496. }
  497. spin_unlock(&pdev->msi_lock);
  498. if (err)
  499. {
  500. rt_pci_msi_free_irqs(pdev);
  501. LOG_E("%s: Setup %s interrupts(%d) error = %s",
  502. rt_dm_dev_get_name(&pdev->parent), "MSI", nvec, rt_strerror(err));
  503. return err;
  504. }
  505. if (affinities)
  506. {
  507. for (int idx = 0; idx < nvec; ++idx)
  508. {
  509. msi_affinity_init(desc, idx, affinities[idx]);
  510. }
  511. }
  512. /* Disable INTX */
  513. rt_pci_intx(pdev, RT_FALSE);
  514. /* Set MSI enabled bits */
  515. msi_write_enable(pdev, RT_TRUE);
  516. pdev->irq = desc->irq;
  517. pdev->msi_enabled = RT_TRUE;
  518. return nvec;
  519. }
  520. rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
  521. int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)))
  522. {
  523. int nvec = max;
  524. rt_size_t entries_nr;
  525. if (!pdev || min > max)
  526. {
  527. return -RT_EINVAL;
  528. }
  529. if (pdev->no_msi)
  530. {
  531. return -RT_ENOSYS;
  532. }
  533. if (!pdev->msi_pic)
  534. {
  535. return -RT_ENOSYS;
  536. }
  537. if (pdev->msi_enabled)
  538. {
  539. LOG_W("%s: MSI is enabled", rt_dm_dev_get_name(&pdev->parent));
  540. return -RT_EINVAL;
  541. }
  542. entries_nr = rt_pci_msi_vector_count(pdev);
  543. if (entries_nr < 0)
  544. {
  545. return entries_nr;
  546. }
  547. if (nvec > entries_nr)
  548. {
  549. return -RT_EEMPTY;
  550. }
  551. return msi_capability_init(pdev, nvec, affinities);
  552. }
  553. rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev)
  554. {
  555. rt_uint16_t msgctl;
  556. if (!pdev)
  557. {
  558. return -RT_EINVAL;
  559. }
  560. if (!pdev->msix_cap)
  561. {
  562. return -RT_EINVAL;
  563. }
  564. rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
  565. return rt_pci_msix_table_size(msgctl);
  566. }
  567. rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev)
  568. {
  569. if (!pdev)
  570. {
  571. return -RT_EINVAL;
  572. }
  573. if (!pdev->msix_enabled)
  574. {
  575. return -RT_EINVAL;
  576. }
  577. spin_lock(&pdev->msi_lock);
  578. rt_pci_msix_shutdown(pdev);
  579. rt_pci_msi_free_irqs(pdev);
  580. spin_unlock(&pdev->msi_lock);
  581. return RT_EOK;
  582. }
  583. static void *msix_table_remap(struct rt_pci_device *pdev, rt_size_t entries_nr)
  584. {
  585. rt_uint8_t bir;
  586. rt_uint32_t table_offset;
  587. rt_ubase_t table_base_phys;
  588. rt_pci_read_config_u32(pdev, pdev->msix_cap + PCIR_MSIX_TABLE, &table_offset);
  589. bir = (rt_uint8_t)(table_offset & PCIM_MSIX_BIR_MASK);
  590. if (pdev->resource[bir].flags & PCI_BUS_REGION_F_NONE)
  591. {
  592. LOG_E("%s: BAR[bir = %d] is invalid", rt_dm_dev_get_name(&pdev->parent), bir);
  593. return RT_NULL;
  594. }
  595. table_base_phys = pdev->resource[bir].base + (table_offset & ~PCIM_MSIX_BIR_MASK);
  596. return rt_ioremap((void *)table_base_phys, entries_nr * PCIM_MSIX_ENTRY_SIZE);
  597. }
  598. static rt_err_t msix_setup_msi_descs(struct rt_pci_device *pdev,
  599. void *table_base, struct rt_pci_msix_entry *entries, int nvec)
  600. {
  601. rt_err_t err;
  602. struct rt_pci_msi_desc desc;
  603. rt_memset(&desc, 0, sizeof(desc));
  604. desc.vector_used = 1;
  605. desc.vector_count = rt_pci_msix_vector_count(pdev);
  606. desc.is_msix = RT_TRUE;
  607. desc.msix.table_base = table_base;
  608. for (int i = 0; i < nvec; ++i)
  609. {
  610. void *table_entry;
  611. int index = entries ? entries[i].index : i;
  612. desc.msix.index = index;
  613. table_entry = msix_table_base(&desc.msix);
  614. desc.msix.msg_ctrl = HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL);
  615. if ((err = msi_insert_desc(pdev, &desc)))
  616. {
  617. break;
  618. }
  619. }
  620. return err;
  621. }
  622. static rt_ssize_t msix_capability_init(struct rt_pci_device *pdev,
  623. struct rt_pci_msix_entry *entries, int nvec,
  624. RT_IRQ_AFFINITY_DECLARE((*affinities)))
  625. {
  626. rt_err_t err;
  627. rt_uint16_t msgctl;
  628. rt_size_t table_size;
  629. void *table_base, *table_entry;
  630. struct rt_pci_msi_desc *desc;
  631. struct rt_pci_msix_entry *entry;
  632. /*
  633. * Some devices require MSI-X to be enabled before the MSI-X
  634. * registers can be accessed.
  635. * Mask all the vectors to prevent interrupts coming in before
  636. * they're fully set up.
  637. */
  638. msix_update_ctrl(pdev, 0, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE);
  639. rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
  640. /* Request & Map MSI-X table region */
  641. table_size = rt_pci_msix_table_size(msgctl);
  642. table_base = msix_table_remap(pdev, table_size);
  643. if (!table_base)
  644. {
  645. LOG_E("%s: Remap MSI-X table fail", rt_dm_dev_get_name(&pdev->parent));
  646. err = -RT_ENOMEM;
  647. goto _out_disbale_msix;
  648. }
  649. pdev->msix_base = table_base;
  650. spin_lock(&pdev->msi_lock);
  651. if (!(err = msix_setup_msi_descs(pdev, table_base, entries, nvec)))
  652. {
  653. if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSIX)))
  654. {
  655. /* Check if all MSI entries honor device restrictions */
  656. err = msi_verify_entries(pdev);
  657. }
  658. }
  659. spin_unlock(&pdev->msi_lock);
  660. if (err)
  661. {
  662. rt_pci_msi_free_irqs(pdev);
  663. LOG_E("%s: Setup %s interrupts(%d) error = %s",
  664. rt_dm_dev_get_name(&pdev->parent), "MSI-X", nvec, rt_strerror(err));
  665. goto _out_disbale_msix;
  666. }
  667. entry = entries;
  668. rt_pci_msi_for_each_desc(pdev, desc)
  669. {
  670. if (affinities)
  671. {
  672. msi_affinity_init(desc, desc->msix.index, affinities[entry->index]);
  673. }
  674. entry->irq = desc->irq;
  675. ++entry;
  676. }
  677. /* Disable INTX */
  678. rt_pci_intx(pdev, RT_FALSE);
  679. /* Maske all table entries */
  680. table_entry = table_base;
  681. for (int i = 0; i < table_size; ++i, table_entry += PCIM_MSIX_ENTRY_SIZE)
  682. {
  683. HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL) = PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
  684. }
  685. msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK, 0);
  686. pdev->msix_enabled = RT_TRUE;
  687. return nvec;
  688. _out_disbale_msix:
  689. msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE, 0);
  690. return err;
  691. }
  692. rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
  693. struct rt_pci_msix_entry *entries, int min, int max,
  694. RT_IRQ_AFFINITY_DECLARE((*affinities)))
  695. {
  696. int nvec = max;
  697. rt_size_t entries_nr;
  698. if (!pdev || min > max)
  699. {
  700. return -RT_EINVAL;
  701. }
  702. if (pdev->no_msi)
  703. {
  704. return -RT_ENOSYS;
  705. }
  706. if (!pdev->msi_pic)
  707. {
  708. return -RT_ENOSYS;
  709. }
  710. if (pdev->msix_enabled)
  711. {
  712. LOG_W("%s: MSI-X is enabled", rt_dm_dev_get_name(&pdev->parent));
  713. return -RT_EINVAL;
  714. }
  715. entries_nr = rt_pci_msix_vector_count(pdev);
  716. if (entries_nr < 0)
  717. {
  718. return entries_nr;
  719. }
  720. if (nvec > entries_nr)
  721. {
  722. return -RT_EEMPTY;
  723. }
  724. if (!entries)
  725. {
  726. return 0;
  727. }
  728. /* Check if entries is valid */
  729. for (int i = 0; i < nvec; ++i)
  730. {
  731. struct rt_pci_msix_entry *target = &entries[i];
  732. if (target->index >= entries_nr)
  733. {
  734. return -RT_EINVAL;
  735. }
  736. for (int j = i + 1; j < nvec; ++j)
  737. {
  738. /* Check duplicate */
  739. if (target->index == entries[j].index)
  740. {
  741. LOG_E("%s: msix entry[%d].index = entry[%d].index",
  742. rt_dm_dev_get_name(&pdev->parent), i, j);
  743. return -RT_EINVAL;
  744. }
  745. }
  746. }
  747. return msix_capability_init(pdev, entries, nvec, affinities);
  748. }