pcie-dw_host.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-23 GuEe-GUI first version
  9. */
  10. #define DBG_TAG "pcie.dw-host"
  11. #define DBG_LVL DBG_INFO
  12. #include <rtdbg.h>
  13. #include "pcie-dw.h"
  14. static void dw_pcie_irq_ack(struct rt_pic_irq *pirq)
  15. {
  16. int hwirq = pirq->hwirq;
  17. rt_uint32_t res, bit, ctrl;
  18. struct dw_pcie_port *port = pirq->pic->priv_data;
  19. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  20. ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
  21. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  22. bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
  23. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, RT_BIT(bit));
  24. }
  25. static void dw_pcie_irq_mask(struct rt_pic_irq *pirq)
  26. {
  27. rt_ubase_t level;
  28. int hwirq = pirq->hwirq;
  29. rt_uint32_t res, bit, ctrl;
  30. struct dw_pcie_port *port = pirq->pic->priv_data;
  31. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  32. rt_pci_msi_mask_irq(pirq);
  33. level = rt_spin_lock_irqsave(&port->lock);
  34. ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
  35. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  36. bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
  37. port->irq_mask[ctrl] |= RT_BIT(bit);
  38. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
  39. rt_spin_unlock_irqrestore(&port->lock, level);
  40. }
  41. static void dw_pcie_irq_unmask(struct rt_pic_irq *pirq)
  42. {
  43. rt_ubase_t level;
  44. int hwirq = pirq->hwirq;
  45. rt_uint32_t res, bit, ctrl;
  46. struct dw_pcie_port *port = pirq->pic->priv_data;
  47. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  48. rt_pci_msi_unmask_irq(pirq);
  49. level = rt_spin_lock_irqsave(&port->lock);
  50. ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
  51. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  52. bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
  53. port->irq_mask[ctrl] &= ~RT_BIT(bit);
  54. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
  55. rt_spin_unlock_irqrestore(&port->lock, level);
  56. }
  57. static void dw_pcie_compose_msi_msg(struct rt_pic_irq *pirq, struct rt_pci_msi_msg *msg)
  58. {
  59. rt_uint64_t msi_target;
  60. struct dw_pcie_port *port = pirq->pic->priv_data;
  61. msi_target = (rt_uint64_t)port->msi_data_phy;
  62. msg->address_lo = rt_lower_32_bits(msi_target);
  63. msg->address_hi = rt_upper_32_bits(msi_target);
  64. msg->data = pirq->hwirq;
  65. }
  66. static int dw_pcie_irq_alloc_msi(struct rt_pic *pic, struct rt_pci_msi_desc *msi_desc)
  67. {
  68. rt_ubase_t level;
  69. int irq, hwirq;
  70. struct rt_pic_irq *pirq;
  71. struct dw_pcie_port *port = pic->priv_data;
  72. level = rt_spin_lock_irqsave(&port->lock);
  73. hwirq = rt_bitmap_next_clear_bit(port->msi_map, 0, port->irq_count);
  74. if (hwirq >= port->irq_count)
  75. {
  76. irq = -RT_EEMPTY;
  77. goto _out_lock;
  78. }
  79. pirq = rt_pic_find_irq(pic, hwirq);
  80. irq = rt_pic_config_irq(pic, hwirq, hwirq);
  81. pirq->mode = RT_IRQ_MODE_EDGE_RISING;
  82. rt_bitmap_set_bit(port->msi_map, hwirq);
  83. _out_lock:
  84. rt_spin_unlock_irqrestore(&port->lock, level);
  85. return irq;
  86. }
  87. static void dw_pcie_irq_free_msi(struct rt_pic *pic, int irq)
  88. {
  89. rt_ubase_t level;
  90. struct rt_pic_irq *pirq;
  91. struct dw_pcie_port *port = pic->priv_data;
  92. pirq = rt_pic_find_pirq(pic, irq);
  93. if (!pirq)
  94. {
  95. return;
  96. }
  97. level = rt_spin_lock_irqsave(&port->lock);
  98. rt_bitmap_clear_bit(port->msi_map, pirq->hwirq);
  99. rt_spin_unlock_irqrestore(&port->lock, level);
  100. }
  101. const static struct rt_pic_ops dw_pci_msi_ops =
  102. {
  103. .name = "DWPCI-MSI",
  104. .irq_ack = dw_pcie_irq_ack,
  105. .irq_mask = dw_pcie_irq_mask,
  106. .irq_unmask = dw_pcie_irq_unmask,
  107. .irq_compose_msi_msg = dw_pcie_compose_msi_msg,
  108. .irq_alloc_msi = dw_pcie_irq_alloc_msi,
  109. .irq_free_msi = dw_pcie_irq_free_msi,
  110. .flags = RT_PIC_F_IRQ_ROUTING,
  111. };
  112. /* MSI int handler */
  113. rt_err_t dw_handle_msi_irq(struct dw_pcie_port *port)
  114. {
  115. rt_err_t err;
  116. int i, pos;
  117. rt_bitmap_t status;
  118. rt_uint32_t num_ctrls;
  119. struct rt_pic_irq *pirq;
  120. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  121. struct rt_pic *msi_pic = port->msi_pic;
  122. err = -RT_EEMPTY;
  123. num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
  124. for (i = 0; i < num_ctrls; ++i)
  125. {
  126. status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
  127. (i * MSI_REG_CTRL_BLOCK_SIZE));
  128. if (!status)
  129. {
  130. continue;
  131. }
  132. err = RT_EOK;
  133. rt_bitmap_for_each_set_bit(&status, pos, MAX_MSI_IRQS_PER_CTRL)
  134. {
  135. pirq = rt_pic_find_irq(msi_pic, pos + i * MAX_MSI_IRQS_PER_CTRL);
  136. dw_pcie_irq_ack(pirq);
  137. rt_pic_handle_isr(pirq);
  138. }
  139. }
  140. return err;
  141. }
  142. static void dw_pcie_msi_isr(int irqno, void *param)
  143. {
  144. struct dw_pcie_port *port = param;
  145. dw_handle_msi_irq(port);
  146. }
  147. void dw_pcie_free_msi(struct dw_pcie_port *port)
  148. {
  149. if (port->msi_irq >= 0)
  150. {
  151. rt_hw_interrupt_mask(port->msi_irq);
  152. rt_pic_detach_irq(port->msi_irq, port);
  153. }
  154. if (port->msi_data)
  155. {
  156. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  157. rt_dma_free_coherent(pci->dev, sizeof(rt_uint64_t), port->msi_data,
  158. port->msi_data_phy);
  159. port->msi_data = RT_NULL;
  160. }
  161. }
  162. void dw_pcie_msi_init(struct dw_pcie_port *port)
  163. {
  164. #ifdef RT_PCI_MSI
  165. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  166. rt_uint64_t msi_target = (rt_uint64_t)port->msi_data_phy;
  167. /* Program the msi_data_phy */
  168. dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, rt_lower_32_bits(msi_target));
  169. dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, rt_upper_32_bits(msi_target));
  170. #endif
  171. }
  172. static const struct rt_pci_ops dw_child_pcie_ops;
  173. static const struct rt_pci_ops dw_pcie_ops;
  174. rt_err_t dw_pcie_host_init(struct dw_pcie_port *port)
  175. {
  176. rt_err_t err;
  177. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  178. struct rt_device *dev = pci->dev;
  179. struct rt_pci_host_bridge *bridge;
  180. rt_spin_lock_init(&port->lock);
  181. rt_dm_dev_get_address_by_name(dev, "config", &port->cfg0_addr, &port->cfg0_size);
  182. if (port->cfg0_addr)
  183. {
  184. port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
  185. if (!port->cfg0_base)
  186. {
  187. return -RT_EIO;
  188. }
  189. }
  190. else if (!port->cfg0_base)
  191. {
  192. LOG_E("Missing 'config' reg space");
  193. }
  194. if (!(bridge = rt_pci_host_bridge_alloc(0)))
  195. {
  196. return -RT_ENOMEM;
  197. }
  198. bridge->parent.ofw_node = dev->ofw_node;
  199. if ((err = rt_pci_host_bridge_init(bridge)))
  200. {
  201. goto _err_free_bridge;
  202. }
  203. port->bridge = bridge;
  204. for (int i = 0; i < bridge->bus_regions_nr; ++i)
  205. {
  206. struct rt_pci_bus_region *region = &bridge->bus_regions[i];
  207. switch (region->flags)
  208. {
  209. case PCI_BUS_REGION_F_IO:
  210. port->io_addr = region->cpu_addr;
  211. port->io_bus_addr = region->phy_addr;
  212. port->io_size = region->size;
  213. break;
  214. case PCI_BUS_REGION_F_NONE:
  215. port->cfg0_size = region->size;
  216. port->cfg0_addr = region->cpu_addr;
  217. if (!pci->dbi_base)
  218. {
  219. pci->dbi_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
  220. if (!pci->dbi_base)
  221. {
  222. LOG_E("Error with ioremap");
  223. return -RT_ENOMEM;
  224. }
  225. }
  226. break;
  227. default:
  228. break;
  229. }
  230. }
  231. if (!port->cfg0_base && port->cfg0_addr)
  232. {
  233. port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
  234. if (!port->cfg0_base)
  235. {
  236. return -RT_ENOMEM;
  237. }
  238. }
  239. if (rt_dm_dev_prop_read_u32(dev, "num-viewport", &pci->num_viewport))
  240. {
  241. pci->num_viewport = 2;
  242. }
  243. if (pci->link_gen < 1)
  244. {
  245. pci->link_gen = -1;
  246. rt_dm_dev_prop_read_u32(dev, "max-link-speed", &pci->link_gen);
  247. }
  248. /*
  249. * If a specific SoC driver needs to change the default number of vectors,
  250. * it needs to implement the set_irq_count callback.
  251. */
  252. if (!port->ops->set_irq_count)
  253. {
  254. port->irq_count = MSI_DEF_NUM_VECTORS;
  255. }
  256. else
  257. {
  258. port->ops->set_irq_count(port);
  259. if (port->irq_count > MAX_MSI_IRQS || port->irq_count == 0)
  260. {
  261. LOG_E("Invalid count of irq = %d", port->irq_count);
  262. err = -RT_EINVAL;
  263. goto _err_free_cfg;
  264. }
  265. }
  266. if (!port->ops->msi_host_init)
  267. {
  268. port->msi_pic = rt_calloc(1, sizeof(*port->msi_pic));
  269. if (!port->msi_pic)
  270. {
  271. err = -RT_ENOMEM;
  272. goto _err_free_cfg;
  273. }
  274. port->msi_pic->priv_data = port;
  275. port->msi_pic->ops = &dw_pci_msi_ops;
  276. rt_pic_linear_irq(port->msi_pic, port->irq_count);
  277. rt_pic_user_extends(port->msi_pic);
  278. if (port->msi_irq)
  279. {
  280. rt_hw_interrupt_install(port->msi_irq, dw_pcie_msi_isr, port, "dwc-pci-msi");
  281. rt_hw_interrupt_umask(port->msi_irq);
  282. }
  283. port->msi_data = rt_dma_alloc_coherent(pci->dev, sizeof(rt_uint64_t),
  284. &port->msi_data_phy);
  285. if (!port->msi_data)
  286. {
  287. err = -RT_ENOMEM;
  288. goto _err_free_msi;
  289. }
  290. }
  291. else
  292. {
  293. if ((err = port->ops->msi_host_init(port)))
  294. {
  295. return err;
  296. }
  297. }
  298. /* Set default bus ops */
  299. bridge->ops = &dw_pcie_ops;
  300. bridge->child_ops = &dw_child_pcie_ops;
  301. if (port->ops->host_init && (err = port->ops->host_init(port)))
  302. {
  303. goto _err_free_msi;
  304. }
  305. bridge->sysdata = port;
  306. if ((err = rt_pci_host_bridge_probe(bridge)))
  307. {
  308. goto _err_free_msi;
  309. }
  310. return RT_EOK;
  311. _err_free_msi:
  312. if (!port->ops->msi_host_init)
  313. {
  314. dw_pcie_free_msi(port);
  315. rt_pic_cancel_irq(port->msi_pic);
  316. rt_free(port->msi_pic);
  317. port->msi_pic = RT_NULL;
  318. }
  319. _err_free_bridge:
  320. rt_pci_host_bridge_free(bridge);
  321. port->bridge = RT_NULL;
  322. _err_free_cfg:
  323. if (port->cfg0_base)
  324. {
  325. rt_iounmap(port->cfg0_base);
  326. port->cfg0_base = RT_NULL;
  327. }
  328. return err;
  329. }
  330. void dw_pcie_host_deinit(struct dw_pcie_port *port)
  331. {
  332. if (!port->ops->msi_host_init)
  333. {
  334. dw_pcie_free_msi(port);
  335. }
  336. }
  337. void dw_pcie_host_free(struct dw_pcie_port *port)
  338. {
  339. if (!port->ops->msi_host_init)
  340. {
  341. dw_pcie_free_msi(port);
  342. rt_pic_cancel_irq(port->msi_pic);
  343. rt_free(port->msi_pic);
  344. }
  345. if (port->bridge)
  346. {
  347. rt_pci_host_bridge_free(port->bridge);
  348. }
  349. }
  350. static void *dw_pcie_other_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
  351. {
  352. int type;
  353. rt_uint32_t busdev;
  354. struct dw_pcie_port *port = bus->sysdata;
  355. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  356. /*
  357. * Checking whether the link is up here is a last line of defense
  358. * against platforms that forward errors on the system bus as
  359. * SError upon PCI configuration transactions issued when the link is down.
  360. * This check is racy by definition and does not stop the system from
  361. * triggering an SError if the link goes down after this check is performed.
  362. */
  363. if (!dw_pcie_link_up(pci))
  364. {
  365. return RT_NULL;
  366. }
  367. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(RT_PCI_SLOT(devfn)) |
  368. PCIE_ATU_FUNC(RT_PCI_FUNC(devfn));
  369. if (rt_pci_is_root_bus(bus->parent))
  370. {
  371. type = PCIE_ATU_TYPE_CFG0;
  372. }
  373. else
  374. {
  375. type = PCIE_ATU_TYPE_CFG1;
  376. }
  377. dw_pcie_prog_outbound_atu(pci, 0, type, port->cfg0_addr, busdev, port->cfg0_size);
  378. return port->cfg0_base + reg;
  379. }
  380. static rt_err_t dw_pcie_other_read_conf(struct rt_pci_bus *bus,
  381. rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
  382. {
  383. rt_err_t err;
  384. struct dw_pcie_port *port = bus->sysdata;
  385. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  386. err = rt_pci_bus_read_config_uxx(bus, devfn, reg, width, value);
  387. if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
  388. {
  389. dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
  390. port->io_addr, port->io_bus_addr, port->io_size);
  391. }
  392. return err;
  393. }
  394. static rt_err_t dw_pcie_other_write_conf(struct rt_pci_bus *bus,
  395. rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
  396. {
  397. rt_err_t err;
  398. struct dw_pcie_port *port = bus->sysdata;
  399. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  400. err = rt_pci_bus_write_config_uxx(bus, devfn, reg, width, value);
  401. if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
  402. {
  403. dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
  404. port->io_addr, port->io_bus_addr, port->io_size);
  405. }
  406. return err;
  407. }
  408. static const struct rt_pci_ops dw_child_pcie_ops =
  409. {
  410. .map = dw_pcie_other_conf_map,
  411. .read = dw_pcie_other_read_conf,
  412. .write = dw_pcie_other_write_conf,
  413. };
  414. void *dw_pcie_own_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
  415. {
  416. struct dw_pcie_port *port = bus->sysdata;
  417. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  418. if (RT_PCI_SLOT(devfn) > 0)
  419. {
  420. return RT_NULL;
  421. }
  422. return pci->dbi_base + reg;
  423. }
  424. static const struct rt_pci_ops dw_pcie_ops =
  425. {
  426. .map = dw_pcie_own_conf_map,
  427. .read = rt_pci_bus_read_config_uxx,
  428. .write = rt_pci_bus_write_config_uxx,
  429. };
  430. void dw_pcie_setup_rc(struct dw_pcie_port *port)
  431. {
  432. rt_uint32_t val, num_ctrls;
  433. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  434. /*
  435. * Enable DBI read-only registers for writing/updating configuration.
  436. * Write permission gets disabled towards the end of this function.
  437. */
  438. dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
  439. dw_pcie_setup(pci);
  440. if (!port->ops->msi_host_init)
  441. {
  442. num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
  443. /* Initialize IRQ Status array */
  444. for (int ctrl = 0; ctrl < num_ctrls; ++ctrl)
  445. {
  446. port->irq_mask[ctrl] = ~0;
  447. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
  448. (ctrl * MSI_REG_CTRL_BLOCK_SIZE), port->irq_mask[ctrl]);
  449. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
  450. (ctrl * MSI_REG_CTRL_BLOCK_SIZE), ~0);
  451. }
  452. }
  453. /* Setup RC BARs */
  454. dw_pcie_writel_dbi(pci, PCIR_BAR(0), PCIM_BAR_MEM_TYPE_64);
  455. dw_pcie_writel_dbi(pci, PCIR_BAR(1), PCIM_BAR_MEM_TYPE_32);
  456. /* Setup interrupt pins */
  457. val = dw_pcie_readl_dbi(pci, PCIR_INTLINE);
  458. val &= 0xffff00ff;
  459. val |= 0x00000100;
  460. dw_pcie_writel_dbi(pci, PCIR_INTLINE, val);
  461. /* Setup bus numbers */
  462. val = dw_pcie_readl_dbi(pci, PCIR_PRIBUS_1);
  463. val &= 0xff000000;
  464. val |= 0x00ff0100;
  465. dw_pcie_writel_dbi(pci, PCIR_PRIBUS_1, val);
  466. /* Setup command register */
  467. val = dw_pcie_readl_dbi(pci, PCIR_COMMAND);
  468. val &= 0xffff0000;
  469. val |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN;
  470. dw_pcie_writel_dbi(pci, PCIR_COMMAND, val);
  471. /*
  472. * If the platform provides its own child bus config accesses, it means
  473. * the platform uses its own address translation component rather than
  474. * ATU, so we should not program the ATU here.
  475. */
  476. if (pci->port.bridge->child_ops == &dw_child_pcie_ops)
  477. {
  478. int atu_idx = 0;
  479. struct rt_pci_host_bridge *bridge = port->bridge;
  480. /* Get last memory resource entry */
  481. for (int i = 0; i < bridge->bus_regions_nr; ++i)
  482. {
  483. struct rt_pci_bus_region *region = &bridge->bus_regions[i];
  484. if (region->flags != PCI_BUS_REGION_F_MEM)
  485. {
  486. continue;
  487. }
  488. if (pci->num_viewport <= ++atu_idx)
  489. {
  490. break;
  491. }
  492. dw_pcie_prog_outbound_atu(pci, atu_idx,
  493. PCIE_ATU_TYPE_MEM, region->cpu_addr,
  494. region->phy_addr, region->size);
  495. }
  496. if (port->io_size)
  497. {
  498. if (pci->num_viewport > ++atu_idx)
  499. {
  500. dw_pcie_prog_outbound_atu(pci, atu_idx,
  501. PCIE_ATU_TYPE_IO, port->io_addr,
  502. port->io_bus_addr, port->io_size);
  503. }
  504. else
  505. {
  506. pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED;
  507. }
  508. }
  509. if (pci->num_viewport <= atu_idx)
  510. {
  511. LOG_W("Resources exceed number of ATU entries (%d)", pci->num_viewport);
  512. }
  513. }
  514. dw_pcie_writel_dbi(pci, PCIR_BAR(0), 0);
  515. /* Program correct class for RC */
  516. dw_pcie_writew_dbi(pci, PCIR_SUBCLASS, PCIS_BRIDGE_PCI);
  517. val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  518. val |= PORT_LOGIC_SPEED_CHANGE;
  519. dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
  520. dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
  521. }