netdev_hpmicro.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /*
  2. * Copyright (c) 2025, sakumisu
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "hpm_clock_drv.h"
  7. #include "hpm_enet_drv.h"
  8. #include "hpm_otp_drv.h"
  9. #include "hpm_l1c_drv.h"
  10. #include "board.h"
  11. #include "ec_master.h"
  12. #if !defined(RMII) && !defined(RGMII)
  13. #error "Please define RMII or RGMII in ec_config.h to choose the ENET interface type"
  14. #endif
  15. #if defined(RGMII)
  16. #define ENET_INF_TYPE enet_inf_rgmii
  17. #define ENET BOARD_ENET_RGMII
  18. #else
  19. #define ENET_INF_TYPE enet_inf_rmii
  20. #define ENET BOARD_ENET_RMII
  21. #endif
  22. #define __ENABLE_ENET_RECEIVE_INTERRUPT 1
  23. #define MAC_ADDR0 0x00
  24. #define MAC_ADDR1 0x80
  25. #define MAC_ADDR2 0xE1
  26. #define MAC_ADDR3 0x00
  27. #define MAC_ADDR4 0x00
  28. #define MAC_ADDR5 0x00
  29. #define ENET_TX_BUFF_COUNT CONFIG_EC_MAX_ENET_TXBUF_COUNT
  30. #define ENET_RX_BUFF_COUNT CONFIG_EC_MAX_ENET_RXBUF_COUNT
  31. #define ENET_RX_BUFF_SIZE ENET_MAX_FRAME_SIZE
  32. #define ENET_TX_BUFF_SIZE ENET_MAX_FRAME_SIZE
  33. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  34. __RW enet_rx_desc_t dma_rx_desc_tab[ENET_RX_BUFF_COUNT]; /* Ethernet Rx DMA Descriptor */
  35. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  36. __RW enet_tx_desc_t dma_tx_desc_tab[ENET_TX_BUFF_COUNT]; /* Ethernet Tx DMA Descriptor */
  37. ATTR_PLACE_AT_FAST_RAM_WITH_ALIGNMENT(ENET_SOC_BUFF_ADDR_ALIGNMENT)
  38. __RW uint8_t rx_buff[ENET_RX_BUFF_COUNT][ENET_RX_BUFF_SIZE]; /* Ethernet Receive Buffer */
  39. ATTR_PLACE_AT_FAST_RAM_WITH_ALIGNMENT(ENET_SOC_BUFF_ADDR_ALIGNMENT)
  40. __RW uint8_t tx_buff[ENET_TX_BUFF_COUNT][ENET_TX_BUFF_SIZE]; /* Ethernet Transmit Buffer */
  41. enet_desc_t desc;
  42. uint8_t mac[ETH_ALEN];
  43. ec_netdev_t g_netdev;
  44. ATTR_WEAK void enet_get_mac_address(uint8_t *mac)
  45. {
  46. bool invalid = true;
  47. uint32_t uuid[(ETH_ALEN + (ETH_ALEN - 1)) / sizeof(uint32_t)];
  48. for (int i = 0; i < ARRAY_SIZE(uuid); i++) {
  49. uuid[i] = otp_read_from_shadow(OTP_SOC_UUID_IDX + i);
  50. if (uuid[i] != 0xFFFFFFFFUL && uuid[i] != 0) {
  51. invalid = false;
  52. }
  53. }
  54. if (invalid == true) {
  55. ec_memcpy(mac, &uuid, ETH_ALEN);
  56. } else {
  57. mac[0] = MAC_ADDR0;
  58. mac[1] = MAC_ADDR1;
  59. mac[2] = MAC_ADDR2;
  60. mac[3] = MAC_ADDR3;
  61. mac[4] = MAC_ADDR4;
  62. mac[5] = MAC_ADDR5;
  63. }
  64. }
  65. hpm_stat_t enet_init(ENET_Type *ptr)
  66. {
  67. enet_int_config_t int_config = { .int_enable = 0, .int_mask = 0 };
  68. enet_mac_config_t enet_config;
  69. enet_tx_control_config_t enet_tx_control_config;
  70. /* Initialize td, rd and the corresponding buffers */
  71. memset((uint8_t *)dma_tx_desc_tab, 0x00, sizeof(dma_tx_desc_tab));
  72. memset((uint8_t *)dma_rx_desc_tab, 0x00, sizeof(dma_rx_desc_tab));
  73. memset((uint8_t *)rx_buff, 0x00, sizeof(rx_buff));
  74. memset((uint8_t *)tx_buff, 0x00, sizeof(tx_buff));
  75. desc.tx_desc_list_head = (enet_tx_desc_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)dma_tx_desc_tab);
  76. desc.rx_desc_list_head = (enet_rx_desc_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)dma_rx_desc_tab);
  77. desc.tx_buff_cfg.buffer = core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)tx_buff);
  78. desc.tx_buff_cfg.count = ENET_TX_BUFF_COUNT;
  79. desc.tx_buff_cfg.size = ENET_TX_BUFF_SIZE;
  80. desc.rx_buff_cfg.buffer = core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)rx_buff);
  81. desc.rx_buff_cfg.count = ENET_RX_BUFF_COUNT;
  82. desc.rx_buff_cfg.size = ENET_RX_BUFF_SIZE;
  83. /*Get a default control config for tx descriptor */
  84. enet_get_default_tx_control_config(ENET, &enet_tx_control_config);
  85. /* Set the control config for tx descriptor */
  86. ec_memcpy(&desc.tx_control_config, &enet_tx_control_config, sizeof(enet_tx_control_config_t));
  87. /* Get MAC address */
  88. enet_get_mac_address(mac);
  89. /* Set MAC0 address */
  90. enet_config.mac_addr_high[0] = mac[5] << 8 | mac[4];
  91. enet_config.mac_addr_low[0] = mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0];
  92. enet_config.valid_max_count = 1;
  93. /* Set DMA PBL */
  94. enet_config.dma_pbl = board_get_enet_dma_pbl(ENET);
  95. /* Set SARC */
  96. enet_config.sarc = enet_sarc_replace_mac0;
  97. #if defined(__ENABLE_ENET_RECEIVE_INTERRUPT) && __ENABLE_ENET_RECEIVE_INTERRUPT
  98. /* Enable Enet IRQ */
  99. board_enable_enet_irq(ENET);
  100. /* Get the default interrupt config */
  101. enet_get_default_interrupt_config(ENET, &int_config);
  102. #endif
  103. /* Initialize enet controller */
  104. if (enet_controller_init(ptr, ENET_INF_TYPE, &desc, &enet_config, &int_config) != status_success) {
  105. return status_fail;
  106. }
  107. #if defined(__ENABLE_ENET_RECEIVE_INTERRUPT) && __ENABLE_ENET_RECEIVE_INTERRUPT
  108. /* Disable LPI interrupt */
  109. enet_disable_lpi_interrupt(ENET);
  110. #endif
  111. return status_success;
  112. }
  113. ec_netdev_t *ec_netdev_low_level_init(uint8_t netdev_index)
  114. {
  115. /* Initialize GPIOs */
  116. board_init_enet_pins(ENET);
  117. /* Reset an enet PHY */
  118. board_reset_enet_phy(ENET);
  119. #if defined(RGMII)
  120. /* Set RGMII clock delay */
  121. board_init_enet_rgmii_clock_delay(ENET);
  122. #else
  123. /* Set RMII reference clock */
  124. board_init_enet_rmii_reference_clock(ENET, BOARD_ENET_RMII_INT_REF_CLK);
  125. EC_LOG_DBG("Reference Clock: %s\n", BOARD_ENET_RMII_INT_REF_CLK ? "Internal Clock" : "External Clock");
  126. #endif
  127. /* Initialize MAC and DMA */
  128. if (enet_init(ENET) == 0) {
  129. } else {
  130. EC_LOG_DBG("Enet initialization fails !!!\n");
  131. while (1) {
  132. }
  133. }
  134. ec_memcpy(g_netdev.mac_addr, mac, ETH_ALEN);
  135. for (uint32_t i = 0; i < ENET_TX_BUFF_COUNT; i++) {
  136. for (uint8_t j = 0; j < 6; j++) { // dst MAC
  137. EC_WRITE_U8(&tx_buff[i][j], 0xFF);
  138. }
  139. for (uint8_t j = 0; j < 6; j++) { // src MAC
  140. EC_WRITE_U8(&tx_buff[i][6 + j], mac[j]);
  141. }
  142. EC_WRITE_U16(&tx_buff[i][12], ec_htons(0x88a4));
  143. }
  144. return &g_netdev;
  145. }
  146. void ec_mdio_low_level_write(struct chry_phy_device *phydev, uint16_t phy_addr, uint16_t regnum, uint16_t val)
  147. {
  148. //ec_netdev_t *netdev = (ec_netdev_t *)phydev->user_data;
  149. enet_write_phy(ENET, phy_addr, regnum, val);
  150. }
  151. uint16_t ec_mdio_low_level_read(struct chry_phy_device *phydev, uint16_t phy_addr, uint16_t regnum)
  152. {
  153. //ec_netdev_t *netdev = (ec_netdev_t *)phydev->user_data;
  154. return enet_read_phy(ENET, phy_addr, regnum);
  155. }
  156. void ec_netdev_low_level_link_up(ec_netdev_t *netdev, struct chry_phy_status *status)
  157. {
  158. enet_line_speed_t line_speed = enet_line_speed_10mbps;
  159. switch (status->speed) {
  160. case 10:
  161. line_speed = enet_line_speed_10mbps;
  162. break;
  163. case 100:
  164. line_speed = enet_line_speed_100mbps;
  165. break;
  166. case 1000:
  167. line_speed = enet_line_speed_1000mbps;
  168. break;
  169. default:
  170. break;
  171. }
  172. if (status->link) {
  173. enet_set_line_speed(ENET, line_speed);
  174. enet_set_duplex_mode(ENET, status->duplex);
  175. } else {
  176. }
  177. }
  178. EC_FAST_CODE_SECTION uint8_t *ec_netdev_low_level_get_txbuf(ec_netdev_t *netdev)
  179. {
  180. __IO enet_tx_desc_t *dma_tx_desc;
  181. dma_tx_desc = desc.tx_desc_list_cur;
  182. EC_ASSERT_MSG(dma_tx_desc->tdes0_bm.own == 0, "No free tx buffer available\n");
  183. return (uint8_t *)sys_address_to_core_local_mem(BOARD_RUNNING_CORE, dma_tx_desc->tdes2_bm.buffer1);
  184. }
  185. EC_FAST_CODE_SECTION int ec_netdev_low_level_output(ec_netdev_t *netdev, uint32_t size)
  186. {
  187. __IO enet_tx_desc_t *dma_tx_desc;
  188. dma_tx_desc = desc.tx_desc_list_cur;
  189. if (dma_tx_desc->tdes0_bm.own != 0) {
  190. return -1;
  191. }
  192. /* Prepare transmit descriptors to give to DMA*/
  193. enet_prepare_transmission_descriptors(ENET, &desc.tx_desc_list_cur, size + 4, desc.tx_buff_cfg.size);
  194. return 0;
  195. }
  196. EC_FAST_CODE_SECTION int ec_netdev_low_level_input(ec_netdev_t *netdev)
  197. {
  198. uint32_t len;
  199. uint8_t *buffer;
  200. enet_frame_t frame = { 0, 0, 0 };
  201. enet_rx_desc_t *dma_rx_desc;
  202. uint32_t i = 0;
  203. int ret = 0;
  204. /* Check and get a received frame */
  205. if (enet_check_received_frame(&desc.rx_desc_list_cur, &desc.rx_frame_info) == 1) {
  206. frame = enet_get_received_frame(&desc.rx_desc_list_cur, &desc.rx_frame_info);
  207. }
  208. /* Obtain the size of the packet and put it into the "len" variable. */
  209. len = frame.length;
  210. buffer = (uint8_t *)sys_address_to_core_local_mem(BOARD_RUNNING_CORE, (uint32_t)frame.buffer);
  211. if (len > 0) {
  212. ec_netdev_receive(netdev, buffer, len);
  213. /* Release descriptors to DMA */
  214. dma_rx_desc = frame.rx_desc;
  215. /* Set Own bit in Rx descriptors: gives the buffers back to DMA */
  216. for (i = 0; i < desc.rx_frame_info.seg_count; i++) {
  217. dma_rx_desc->rdes0_bm.own = 1;
  218. dma_rx_desc = (enet_rx_desc_t *)(dma_rx_desc->rdes3_bm.next_desc);
  219. }
  220. /* Clear Segment_Count */
  221. desc.rx_frame_info.seg_count = 0;
  222. } else {
  223. ret = -1;
  224. }
  225. /* Resume Rx Process */
  226. enet_rx_resume(ENET);
  227. return ret;
  228. }
  229. #if defined(__ENABLE_ENET_RECEIVE_INTERRUPT) && __ENABLE_ENET_RECEIVE_INTERRUPT
  230. void isr_enet(ENET_Type *ptr)
  231. {
  232. uint32_t status;
  233. uint32_t rxgbfrmis;
  234. uint32_t intr_status;
  235. status = ptr->DMA_STATUS;
  236. rxgbfrmis = ptr->MMC_INTR_RX;
  237. intr_status = ptr->INTR_STATUS;
  238. if (ENET_DMA_STATUS_GLPII_GET(status)) {
  239. /* read LPI_CSR to clear interrupt status */
  240. ptr->LPI_CSR;
  241. }
  242. if (ENET_INTR_STATUS_RGSMIIIS_GET(intr_status)) {
  243. /* read XMII_CSR to clear interrupt status */
  244. ptr->XMII_CSR;
  245. }
  246. if (ENET_DMA_STATUS_RI_GET(status)) {
  247. ptr->DMA_STATUS |= ENET_DMA_STATUS_RI_MASK;
  248. while (ec_netdev_low_level_input(&g_netdev) == 0) {
  249. }
  250. }
  251. if (ENET_MMC_INTR_RX_RXCTRLFIS_GET(rxgbfrmis)) {
  252. ptr->RXFRAMECOUNT_GB;
  253. }
  254. }
  255. #ifdef HPM_ENET0_BASE
  256. void isr_enet0(void)
  257. {
  258. isr_enet(ENET);
  259. }
  260. SDK_DECLARE_EXT_ISR_M(IRQn_ENET0, isr_enet0)
  261. #endif
  262. #ifdef HPM_ENET1_BASE
  263. void isr_enet1(void)
  264. {
  265. isr_enet(ENET);
  266. }
  267. SDK_DECLARE_EXT_ISR_M(IRQn_ENET1, isr_enet1)
  268. #endif
  269. #endif
  270. #include "hpm_gptmr_drv.h"
  271. #define EC_HTIMER BOARD_GPTMR
  272. #define EC_HTIMER_CH BOARD_GPTMR_CHANNEL
  273. #define EC_HTIMER_IRQ BOARD_GPTMR_IRQ
  274. #define EC_HTIMER_CLK_NAME BOARD_GPTMR_CLK_NAME
  275. static ec_htimer_cb g_ec_htimer_cb = NULL;
  276. static void *g_ec_htimer_arg = NULL;
  277. static uint32_t g_timer_reload_us_div = 0;
  278. void ec_htimer_isr(void)
  279. {
  280. if (gptmr_check_status(EC_HTIMER, GPTMR_CH_RLD_STAT_MASK(EC_HTIMER_CH))) {
  281. gptmr_clear_status(EC_HTIMER, GPTMR_CH_RLD_STAT_MASK(EC_HTIMER_CH));
  282. g_ec_htimer_cb(g_ec_htimer_arg);
  283. }
  284. }
  285. SDK_DECLARE_EXT_ISR_M(EC_HTIMER_IRQ, ec_htimer_isr);
  286. void ec_htimer_start(uint32_t us, ec_htimer_cb cb, void *arg)
  287. {
  288. uint32_t gptmr_freq;
  289. gptmr_channel_config_t config;
  290. g_ec_htimer_cb = cb;
  291. g_ec_htimer_arg = arg;
  292. gptmr_channel_get_default_config(EC_HTIMER, &config);
  293. clock_add_to_group(EC_HTIMER_CLK_NAME, 0);
  294. gptmr_freq = clock_get_frequency(EC_HTIMER_CLK_NAME);
  295. g_timer_reload_us_div = gptmr_freq / 1000000;
  296. config.reload = g_timer_reload_us_div * us;
  297. gptmr_stop_counter(EC_HTIMER, EC_HTIMER_CH);
  298. gptmr_channel_config(EC_HTIMER, EC_HTIMER_CH, &config, false);
  299. gptmr_enable_irq(EC_HTIMER, GPTMR_CH_RLD_IRQ_MASK(EC_HTIMER_CH));
  300. intc_m_enable_irq_with_priority(EC_HTIMER_IRQ, 10);
  301. gptmr_channel_reset_count(EC_HTIMER, EC_HTIMER_CH);
  302. gptmr_start_counter(EC_HTIMER, EC_HTIMER_CH);
  303. }
  304. void ec_htimer_stop(void)
  305. {
  306. gptmr_stop_counter(EC_HTIMER, EC_HTIMER_CH);
  307. gptmr_disable_irq(EC_HTIMER, GPTMR_CH_RLD_IRQ_MASK(EC_HTIMER_CH));
  308. intc_m_disable_irq(EC_HTIMER_IRQ);
  309. }
  310. EC_FAST_CODE_SECTION void ec_htimer_update(uint32_t us)
  311. {
  312. gptmr_channel_config_update_reload(EC_HTIMER, EC_HTIMER_CH, us * g_timer_reload_us_div);
  313. }
  314. #ifndef CONFIG_EC_TIMESTAMP_CUSTOM
  315. uint32_t ec_get_cpu_frequency(void)
  316. {
  317. return clock_get_frequency(clock_cpu0);
  318. }
  319. #endif