drv_eth.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2017-10-10 Tanek the first version
  9. * 2019-5-10 misonyo add DMA TX and RX function
  10. * 2020-10-14 wangqiang use phy device in phy monitor thread
  11. * 2022-08-29 xjy198903 add 1170 rgmii support
  12. */
  13. #include <rtthread.h>
  14. #include "board.h"
  15. #include <rtdevice.h>
  16. #ifdef RT_USING_FINSH
  17. #include <finsh.h>
  18. #endif
  19. #include "fsl_enet.h"
  20. #include "fsl_gpio.h"
  21. #include "fsl_cache.h"
  22. #include "fsl_iomuxc.h"
  23. #include "fsl_common.h"
  24. #ifdef RT_USING_LWIP
  25. #include <netif/ethernetif.h>
  26. #include "lwipopts.h"
  27. #define ENET_RXBD_NUM (5)
  28. #define ENET_TXBD_NUM (3)
  29. #define ENET_RXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  30. #define ENET_TXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  31. /* debug option */
  32. #define ETH_RX_DUMP
  33. #undef ETH_RX_DUMP
  34. #define ETH_TX_DUMP
  35. #undef ETH_TX_DUMP
  36. #define DBG_ENABLE
  37. #define DBG_SECTION_NAME "[ETH]"
  38. #define DBG_COLOR
  39. #define DBG_LEVEL DBG_INFO
  40. #include <rtdbg.h>
  41. #define RING_ID 0
  42. #define ENET_RING_NUM 1U
  43. #define MAX_ADDR_LEN 6
  44. //#ifdef SOC_IMXRT1170_SERIES
  45. typedef uint8_t rx_buffer_t[RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  46. typedef uint8_t tx_buffer_t[RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  47. #ifndef ENET_RXBUFF_NUM
  48. #define ENET_RXBUFF_NUM (ENET_RXBD_NUM * 2)
  49. #endif
  50. //#endif
  51. //#ifdef SOC_IMXRT1170_SERIES
  52. typedef void (*pbuf_free_custom_fn)(struct pbuf *p);
  53. /** A custom pbuf: like a pbuf, but following a function pointer to free it. */
  54. struct pbuf_custom
  55. {
  56. /** The actual pbuf */
  57. struct pbuf pbuf;
  58. /** This function is called when pbuf_free deallocates this pbuf(_custom) */
  59. pbuf_free_custom_fn custom_free_function;
  60. };
  61. typedef struct rx_pbuf_wrapper
  62. {
  63. struct pbuf_custom p; /*!< Pbuf wrapper. Has to be first. */
  64. void *buffer; /*!< Original buffer wrapped by p. */
  65. volatile bool buffer_used; /*!< Wrapped buffer is used by ENET */
  66. } rx_pbuf_wrapper_t;
  67. //#endif
  68. struct rt_imxrt_eth
  69. {
  70. /* inherit from ethernet device */
  71. struct eth_device parent;
  72. enet_handle_t enet_handle;
  73. ENET_Type *enet_base;
  74. enet_data_error_stats_t error_statistic;
  75. rt_uint8_t dev_addr[MAX_ADDR_LEN]; /* hw address */
  76. rt_bool_t tx_is_waiting;
  77. struct rt_semaphore tx_wait;
  78. struct rt_semaphore buff_wait;
  79. enet_mii_speed_t speed;
  80. enet_mii_duplex_t duplex;
  81. //#ifdef SOC_IMXRT1170_SERIES
  82. enet_rx_bd_struct_t *RxBuffDescrip;
  83. enet_tx_bd_struct_t *TxBuffDescrip;
  84. rx_buffer_t *RxDataBuff;
  85. tx_buffer_t *TxDataBuff;
  86. rx_pbuf_wrapper_t RxPbufs[ENET_RXBUFF_NUM];
  87. //#endif
  88. };
  89. //#if defined(__ICCARM__) /* IAR Workbench */
  90. //#pragma location = "enet_mem_section"
  91. //ALIGN(ENET_BUFF_ALIGNMENT)
  92. //static enet_tx_bd_struct_t g_txBuffDescrip[ENET_TXBD_NUM];
  93. //
  94. //ALIGN(ENET_BUFF_ALIGNMENT)
  95. //rt_uint8_t g_txDataBuff[ENET_TXBD_NUM][RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  96. //
  97. //#pragma location = "enet_mem_section"
  98. //ALIGN(ENET_BUFF_ALIGNMENT)
  99. //static enet_rx_bd_struct_t g_rxBuffDescrip[ENET_RXBD_NUM];
  100. //
  101. //ALIGN(ENET_BUFF_ALIGNMENT)
  102. //rt_uint8_t g_rxDataBuff[ENET_RXBD_NUM][RT_ALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  103. //
  104. //#else
  105. AT_NONCACHEABLE_SECTION_ALIGN(static enet_tx_bd_struct_t g_txBuffDescrip[ENET_TXBD_NUM], ENET_BUFF_ALIGNMENT);
  106. rt_align(ENET_BUFF_ALIGNMENT)
  107. rt_uint8_t g_txDataBuff[ENET_TXBD_NUM][RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  108. AT_NONCACHEABLE_SECTION_ALIGN(static enet_rx_bd_struct_t g_rxBuffDescrip[ENET_RXBD_NUM], ENET_BUFF_ALIGNMENT);
  109. rt_align(ENET_BUFF_ALIGNMENT)
  110. rt_uint8_t g_rxDataBuff[ENET_RXBD_NUM][RT_ALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  111. //#endif
  112. static struct rt_imxrt_eth imxrt_eth_device;
  113. void _enet_rx_callback(struct rt_imxrt_eth *eth)
  114. {
  115. rt_err_t result;
  116. ENET_DisableInterrupts(eth->enet_base, kENET_RxFrameInterrupt);
  117. result = eth_device_ready(&(eth->parent));
  118. if (result != RT_EOK)
  119. rt_kprintf("RX err =%d\n", result);
  120. }
  121. void _enet_tx_callback(struct rt_imxrt_eth *eth)
  122. {
  123. dbg_log(DBG_LOG, "_enet_tx_callback\n");
  124. if (eth->tx_is_waiting == RT_TRUE)
  125. {
  126. eth->tx_is_waiting = RT_FALSE;
  127. rt_sem_release(&eth->tx_wait);
  128. }
  129. }
  130. static void _enet_callback(ENET_Type *base,
  131. enet_handle_t *handle,
  132. #if FSL_FEATURE_ENET_QUEUE > 1
  133. uint32_t ringId,
  134. #endif /* FSL_FEATURE_ENET_QUEUE */
  135. enet_event_t event,
  136. enet_frame_info_t *frameInfo,
  137. void *userData)
  138. {
  139. switch (event)
  140. {
  141. case kENET_RxEvent:
  142. _enet_rx_callback((struct rt_imxrt_eth *)userData);
  143. break;
  144. case kENET_TxEvent:
  145. _enet_tx_callback((struct rt_imxrt_eth *)userData);
  146. break;
  147. case kENET_ErrEvent:
  148. dbg_log(DBG_LOG, "kENET_ErrEvent\n");
  149. break;
  150. case kENET_WakeUpEvent:
  151. dbg_log(DBG_LOG, "kENET_WakeUpEvent\n");
  152. break;
  153. case kENET_TimeStampEvent:
  154. dbg_log(DBG_LOG, "kENET_TimeStampEvent\n");
  155. break;
  156. case kENET_TimeStampAvailEvent:
  157. dbg_log(DBG_LOG, "kENET_TimeStampAvailEvent \n");
  158. break;
  159. default:
  160. dbg_log(DBG_LOG, "unknow error\n");
  161. break;
  162. }
  163. }
  164. static void _enet_clk_init(void)
  165. {
  166. #ifdef SOC_IMXRT1170_SERIES
  167. #ifdef PHY_USING_RTL8211F
  168. const clock_sys_pll1_config_t sysPll1Config = {
  169. .pllDiv2En = true,
  170. };
  171. CLOCK_InitSysPll1(&sysPll1Config);
  172. clock_root_config_t rootCfg = {.mux = 4, .div = 4}; /* Generate 125M root clock. */
  173. CLOCK_SetRootClock(kCLOCK_Root_Enet2, &rootCfg);
  174. IOMUXC_GPR->GPR5 |= IOMUXC_GPR_GPR5_ENET1G_RGMII_EN_MASK; /* bit1:iomuxc_gpr_enet_clk_dir
  175. bit0:GPR_ENET_TX_CLK_SEL(internal or OSC) */
  176. #else
  177. const clock_sys_pll1_config_t sysPll1Config = {
  178. .pllDiv2En = true,
  179. };
  180. CLOCK_InitSysPll1(&sysPll1Config);
  181. clock_root_config_t rootCfg = {.mux = 4, .div = 10}; /* Generate 50M root clock. */
  182. CLOCK_SetRootClock(kCLOCK_Root_Enet1, &rootCfg);
  183. /* Select syspll2pfd3, 528*18/24 = 396M */
  184. CLOCK_InitPfd(kCLOCK_PllSys2, kCLOCK_Pfd3, 24);
  185. rootCfg.mux = 7;
  186. rootCfg.div = 2;
  187. CLOCK_SetRootClock(kCLOCK_Root_Bus, &rootCfg); /* Generate 198M bus clock. */
  188. IOMUXC_GPR->GPR4 |= 0x3;
  189. #endif
  190. #else
  191. // const clock_enet_pll_config_t config = {.enableClkOutput = true, .enableClkOutput25M = false, .loopDivider = 1};
  192. // CLOCK_InitEnetPll(&config);
  193. //
  194. // IOMUXC_EnableMode(IOMUXC_GPR, kIOMUXC_GPR_ENET1TxClkOutputDir, true);
  195. // IOMUXC_GPR->GPR1 |= 1 << 23;
  196. /* Set 50MHz output clock required by PHY. */
  197. const clock_enet_pll_config_t config = {.enableClkOutput = true, .loopDivider = 1};
  198. CLOCK_InitEnetPll(&config);
  199. /* Output 50M clock to PHY. */
  200. IOMUXC_EnableMode(IOMUXC_GPR, kIOMUXC_GPR_ENET1TxClkOutputDir, true);
  201. #endif
  202. }
  203. //#ifdef SOC_IMXRT1170_SERIES
  204. static void *_enet_rx_alloc(ENET_Type *base, void *userData, uint8_t ringId)
  205. {
  206. void *buffer = NULL;
  207. int i;
  208. // dbg_log(DBG_LOG, "get buff_wait sem in %d\r\n", __LINE__);
  209. rt_sem_take(&imxrt_eth_device.buff_wait, RT_WAITING_FOREVER);
  210. for (i = 0; i < ENET_RXBUFF_NUM; i++)
  211. {
  212. if (!imxrt_eth_device.RxPbufs[i].buffer_used)
  213. {
  214. imxrt_eth_device.RxPbufs[i].buffer_used = true;
  215. buffer = &imxrt_eth_device.RxDataBuff[i];
  216. break;
  217. }
  218. }
  219. rt_sem_release(&imxrt_eth_device.buff_wait);
  220. // dbg_log(DBG_LOG, "release buff_wait sem in %d\r\n", __LINE__);
  221. return buffer;
  222. }
  223. static void _enet_rx_free(ENET_Type *base, void *buffer, void *userData, uint8_t ringId)
  224. {
  225. int idx = ((rx_buffer_t *)buffer) - imxrt_eth_device.RxDataBuff;
  226. if (!((idx >= 0) && (idx < ENET_RXBUFF_NUM)))
  227. {
  228. LOG_E("Freed buffer out of range\r\n");
  229. }
  230. // dbg_log(DBG_LOG, "get buff_wait sem in %d\r\n", __LINE__);
  231. rt_sem_take(&imxrt_eth_device.buff_wait, RT_WAITING_FOREVER);
  232. if (!(imxrt_eth_device.RxPbufs[idx].buffer_used))
  233. {
  234. LOG_E("_enet_rx_free: freeing unallocated buffer\r\n");
  235. }
  236. imxrt_eth_device.RxPbufs[idx].buffer_used = false;
  237. rt_sem_release(&imxrt_eth_device.buff_wait);
  238. // dbg_log(DBG_LOG, "release buff_wait sem in %d\r\n", __LINE__);
  239. }
  240. /**
  241. * Reclaims RX buffer held by the p after p is no longer used
  242. * by the application / lwIP.
  243. */
  244. static void _enet_rx_release(struct pbuf *p)
  245. {
  246. rx_pbuf_wrapper_t *wrapper = (rx_pbuf_wrapper_t *)p;
  247. _enet_rx_free(imxrt_eth_device.enet_base, wrapper->buffer, &imxrt_eth_device, 0);
  248. }
  249. //#endif
  250. static void _enet_config(void)
  251. {
  252. enet_config_t config;
  253. uint32_t sysClock;
  254. /* prepare the buffer configuration. */
  255. //#ifndef SOC_IMXRT1170_SERIES
  256. // enet_buffer_config_t buffConfig[] =
  257. // {
  258. // ENET_RXBD_NUM,
  259. // ENET_TXBD_NUM,
  260. // SDK_SIZEALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  261. // SDK_SIZEALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  262. // &g_rxBuffDescrip[0],
  263. // &g_txBuffDescrip[0],
  264. // &g_rxDataBuff[0][0],
  265. // &g_txDataBuff[0][0],
  266. // };
  267. // /* Get default configuration. */
  268. // /*
  269. // * config.miiMode = kENET_RmiiMode;
  270. // * config.miiSpeed = kENET_MiiSpeed100M;
  271. // * config.miiDuplex = kENET_MiiFullDuplex;
  272. // * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  273. // */
  274. //
  275. // ENET_GetDefaultConfig(&config);
  276. // config.ringNum = ENET_RING_NUM;
  277. // config.miiSpeed = imxrt_eth_device.speed;
  278. // config.miiDuplex = imxrt_eth_device.duplex;
  279. //
  280. ////#ifdef PHY_USING_RTL8211F
  281. //// config.miiMode = kENET_RgmiiMode;
  282. //// EnableIRQ(ENET_1G_MAC0_Tx_Rx_1_IRQn);
  283. //// EnableIRQ(ENET_1G_MAC0_Tx_Rx_2_IRQn);
  284. ////#else
  285. //// config.miiMode = kENET_RmiiMode;
  286. ////#endif
  287. //
  288. //
  289. // config.interrupt |= kENET_TxFrameInterrupt | kENET_RxFrameInterrupt;
  290. // config.callback = _enet_callback;
  291. //
  292. //// ENET_GetDefaultConfig(&config);
  293. // config.ringNum = ENET_RING_NUM;
  294. //// config.interrupt = kENET_TxFrameInterrupt | kENET_RxFrameInterrupt;
  295. // config.miiSpeed = imxrt_eth_device.speed;
  296. // config.miiDuplex = imxrt_eth_device.duplex;
  297. //
  298. // /* Set SMI to get PHY link status. */
  299. // sysClock = CLOCK_GetFreq(kCLOCK_IpgClk);
  300. //
  301. // dbg_log(DBG_LOG, "deinit\n");
  302. // ENET_Deinit(imxrt_eth_device.enet_base);
  303. // dbg_log(DBG_LOG, "init\n");
  304. // ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig[0], &imxrt_eth_device.dev_addr[0], sysClock);
  305. //// dbg_log(DBG_LOG, "set call back\n");
  306. //// ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  307. // dbg_log(DBG_LOG, "active read\n");
  308. // ENET_ActiveRead(imxrt_eth_device.enet_base);
  309. //#else
  310. int i;
  311. enet_buffer_config_t buffConfig[ENET_RING_NUM];
  312. imxrt_eth_device.RxBuffDescrip = &g_rxBuffDescrip[0];
  313. imxrt_eth_device.TxBuffDescrip = &g_txBuffDescrip[0];
  314. imxrt_eth_device.RxDataBuff = &g_rxDataBuff[0];
  315. imxrt_eth_device.TxDataBuff = &g_txDataBuff[0];
  316. buffConfig[0].rxBdNumber = ENET_RXBD_NUM; /* Receive buffer descriptor number. */
  317. buffConfig[0].txBdNumber = ENET_TXBD_NUM; /* Transmit buffer descriptor number. */
  318. buffConfig[0].rxBuffSizeAlign = sizeof(rx_buffer_t); /* Aligned receive data buffer size. */
  319. buffConfig[0].txBuffSizeAlign = sizeof(tx_buffer_t); /* Aligned transmit data buffer size. */
  320. buffConfig[0].rxBdStartAddrAlign =
  321. &(imxrt_eth_device.RxBuffDescrip[0]); /* Aligned receive buffer descriptor start address. */
  322. buffConfig[0].txBdStartAddrAlign =
  323. &(imxrt_eth_device.TxBuffDescrip[0]); /* Aligned transmit buffer descriptor start address. */
  324. buffConfig[0].rxBufferAlign =
  325. NULL; /* Receive data buffer start address. NULL when buffers are allocated by callback for RX zero-copy. */
  326. buffConfig[0].txBufferAlign = &(imxrt_eth_device.TxDataBuff[0][0]); /* Transmit data buffer start address. */
  327. buffConfig[0].txFrameInfo = NULL; /* Transmit frame information start address. Set only if using zero-copy transmit. */
  328. buffConfig[0].rxMaintainEnable = true; /* Receive buffer cache maintain. */
  329. buffConfig[0].txMaintainEnable = true; /* Transmit buffer cache maintain. */
  330. /* Get default configuration. */
  331. /*
  332. * config.miiMode = kENET_RmiiMode;
  333. * config.miiSpeed = kENET_MiiSpeed100M;
  334. * config.miiDuplex = kENET_MiiFullDuplex;
  335. * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  336. */
  337. ENET_GetDefaultConfig(&config);
  338. config.ringNum = ENET_RING_NUM;
  339. config.miiSpeed = imxrt_eth_device.speed;
  340. config.miiDuplex = imxrt_eth_device.duplex;
  341. #ifdef PHY_USING_RTL8211F
  342. config.miiMode = kENET_RgmiiMode;
  343. EnableIRQ(ENET_1G_MAC0_Tx_Rx_1_IRQn);
  344. EnableIRQ(ENET_1G_MAC0_Tx_Rx_2_IRQn);
  345. #else
  346. config.miiMode = kENET_RmiiMode;
  347. #endif
  348. config.rxBuffAlloc = _enet_rx_alloc;
  349. config.rxBuffFree = _enet_rx_free;
  350. config.userData = &imxrt_eth_device;
  351. #ifdef SOC_IMXRT1170_SERIES
  352. /* Set SMI to get PHY link status. */
  353. sysClock = CLOCK_GetRootClockFreq(kCLOCK_Root_Bus);
  354. #else
  355. sysClock = CLOCK_GetFreq(kCLOCK_IpgClk);
  356. #endif
  357. config.interrupt |= kENET_TxFrameInterrupt | kENET_RxFrameInterrupt | kENET_TxBufferInterrupt | kENET_LateCollisionInterrupt;
  358. config.callback = _enet_callback;
  359. for (i = 0; i < ENET_RXBUFF_NUM; i++)
  360. {
  361. imxrt_eth_device.RxPbufs[i].p.custom_free_function = _enet_rx_release;
  362. imxrt_eth_device.RxPbufs[i].buffer = &(imxrt_eth_device.RxDataBuff[i][0]);
  363. imxrt_eth_device.RxPbufs[i].buffer_used = false;
  364. }
  365. // dbg_log(DBG_LOG, "deinit\n");
  366. // ENET_Deinit(imxrt_eth_device.enet_base);
  367. dbg_log(DBG_LOG, "init\n");
  368. ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig[0], &imxrt_eth_device.dev_addr[0], sysClock);
  369. // dbg_log(DBG_LOG, "set call back\n");
  370. // ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  371. dbg_log(DBG_LOG, "active read\n");
  372. ENET_ActiveRead(imxrt_eth_device.enet_base);
  373. //#endif
  374. }
  375. #if defined(ETH_RX_DUMP) || defined(ETH_TX_DUMP)
  376. static void packet_dump(const char *msg, const struct pbuf *p)
  377. {
  378. const struct pbuf *q;
  379. rt_uint32_t i, j;
  380. rt_uint8_t *ptr;
  381. rt_kprintf("%s %d byte\n", msg, p->tot_len);
  382. i = 0;
  383. for (q = p; q != RT_NULL; q = q->next)
  384. {
  385. ptr = q->payload;
  386. for (j = 0; j < q->len; j++)
  387. {
  388. if ((i % 8) == 0)
  389. {
  390. rt_kprintf(" ");
  391. }
  392. if ((i % 16) == 0)
  393. {
  394. rt_kprintf("\r\n");
  395. }
  396. rt_kprintf("%02x ", *ptr);
  397. i++;
  398. ptr++;
  399. }
  400. }
  401. rt_kprintf("\n\n");
  402. }
  403. #else
  404. #define packet_dump(...)
  405. #endif /* dump */
  406. /* initialize the interface */
  407. static rt_err_t rt_imxrt_eth_init(rt_device_t dev)
  408. {
  409. dbg_log(DBG_LOG, "rt_imxrt_eth_init...\n");
  410. _enet_config();
  411. return RT_EOK;
  412. }
  413. static rt_err_t rt_imxrt_eth_open(rt_device_t dev, rt_uint16_t oflag)
  414. {
  415. dbg_log(DBG_LOG, "rt_imxrt_eth_open...\n");
  416. return RT_EOK;
  417. }
  418. static rt_err_t rt_imxrt_eth_close(rt_device_t dev)
  419. {
  420. dbg_log(DBG_LOG, "rt_imxrt_eth_close...\n");
  421. return RT_EOK;
  422. }
  423. static rt_ssize_t rt_imxrt_eth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  424. {
  425. dbg_log(DBG_LOG, "rt_imxrt_eth_read...\n");
  426. rt_set_errno(-RT_ENOSYS);
  427. return 0;
  428. }
  429. static rt_ssize_t rt_imxrt_eth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  430. {
  431. dbg_log(DBG_LOG, "rt_imxrt_eth_write...\n");
  432. rt_set_errno(-RT_ENOSYS);
  433. return 0;
  434. }
  435. static rt_err_t rt_imxrt_eth_control(rt_device_t dev, int cmd, void *args)
  436. {
  437. dbg_log(DBG_LOG, "rt_imxrt_eth_control...\n");
  438. switch (cmd)
  439. {
  440. case NIOCTL_GADDR:
  441. /* get mac address */
  442. if (args)
  443. rt_memcpy(args, imxrt_eth_device.dev_addr, 6);
  444. else
  445. return -RT_ERROR;
  446. break;
  447. default:
  448. break;
  449. }
  450. return RT_EOK;
  451. }
  452. static bool _ENET_TxDirtyRingAvailable(enet_tx_dirty_ring_t *txDirtyRing)
  453. {
  454. return !txDirtyRing->isFull;
  455. }
  456. static uint16_t _ENET_IncreaseIndex(uint16_t index, uint16_t max)
  457. {
  458. assert(index < max);
  459. /* Increase the index. */
  460. index++;
  461. if (index >= max)
  462. {
  463. index = 0;
  464. }
  465. return index;
  466. }
  467. static void _ENET_ActiveSendRing(ENET_Type *base, uint8_t ringId)
  468. {
  469. assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
  470. volatile uint32_t *txDesActive = NULL;
  471. /* Ensure previous data update is completed with Data Synchronization Barrier before activing Tx BD. */
  472. __DSB();
  473. switch (ringId)
  474. {
  475. case 0:
  476. txDesActive = &(base->TDAR);
  477. break;
  478. #if FSL_FEATURE_ENET_QUEUE > 1
  479. case 1:
  480. txDesActive = &(base->TDAR1);
  481. break;
  482. case 2:
  483. txDesActive = &(base->TDAR2);
  484. break;
  485. #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
  486. default:
  487. txDesActive = &(base->TDAR);
  488. break;
  489. }
  490. #if defined(FSL_FEATURE_ENET_HAS_ERRATA_007885) && FSL_FEATURE_ENET_HAS_ERRATA_007885
  491. /* There is a TDAR race condition for mutliQ when the software sets TDAR
  492. * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
  493. * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
  494. * Software workaround: introduces a delay by reading the relevant ENET_TDARn_TDAR 4 times
  495. */
  496. for (uint8_t i = 0; i < 4U; i++)
  497. {
  498. if (*txDesActive == 0U)
  499. {
  500. break;
  501. }
  502. }
  503. #endif
  504. /* Write to active tx descriptor */
  505. *txDesActive = 0;
  506. }
  507. static status_t _ENET_SendFrame(ENET_Type *base,
  508. enet_handle_t *handle,
  509. const uint8_t *data,
  510. uint32_t length,
  511. uint8_t ringId,
  512. bool tsFlag,
  513. void *context)
  514. {
  515. assert(handle != NULL);
  516. assert(data != NULL);
  517. assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
  518. assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
  519. volatile enet_tx_bd_struct_t *curBuffDescrip;
  520. enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
  521. enet_tx_dirty_ring_t *txDirtyRing = &handle->txDirtyRing[ringId];
  522. enet_frame_info_t *txDirty = NULL;
  523. uint32_t len = 0;
  524. uint32_t sizeleft = 0;
  525. uint32_t address;
  526. status_t result = kStatus_Success;
  527. uint32_t src;
  528. uint32_t configVal;
  529. bool isReturn = false;
  530. uint32_t primask;
  531. /* Check the frame length. */
  532. if (length > ENET_FRAME_TX_LEN_LIMITATION(base))
  533. {
  534. result = kStatus_ENET_TxFrameOverLen;
  535. }
  536. else
  537. {
  538. /* Check if the transmit buffer is ready. */
  539. curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
  540. if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))
  541. {
  542. result = kStatus_ENET_TxFrameBusy;
  543. }
  544. /* Check txDirtyRing if need frameinfo in tx interrupt callback. */
  545. else if ((handle->txReclaimEnable[ringId]) && !_ENET_TxDirtyRingAvailable(txDirtyRing))
  546. {
  547. result = kStatus_ENET_TxFrameBusy;
  548. }
  549. else
  550. {
  551. /* One transmit buffer is enough for one frame. */
  552. if (handle->txBuffSizeAlign[ringId] >= length)
  553. {
  554. /* Copy data to the buffer for uDMA transfer. */
  555. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  556. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  557. #else
  558. address = (uint32_t)curBuffDescrip->buffer;
  559. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  560. pbuf_copy_partial((const struct pbuf *)data, (void *)address, length, 0);
  561. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  562. if (handle->txMaintainEnable[ringId])
  563. {
  564. DCACHE_CleanByRange(address, length);
  565. }
  566. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  567. /* Set data length. */
  568. curBuffDescrip->length = (uint16_t)length;
  569. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  570. /* For enable the timestamp. */
  571. if (tsFlag)
  572. {
  573. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  574. }
  575. else
  576. {
  577. curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
  578. }
  579. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  580. curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
  581. /* Increase the buffer descriptor address. */
  582. txBdRing->txGenIdx = _ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
  583. /* Add context to frame info ring */
  584. if (handle->txReclaimEnable[ringId])
  585. {
  586. txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
  587. txDirty->context = context;
  588. txDirtyRing->txGenIdx = _ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
  589. if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
  590. {
  591. txDirtyRing->isFull = true;
  592. }
  593. primask = DisableGlobalIRQ();
  594. txBdRing->txDescUsed++;
  595. EnableGlobalIRQ(primask);
  596. }
  597. /* Active the transmit buffer descriptor. */
  598. _ENET_ActiveSendRing(base, ringId);
  599. }
  600. else
  601. {
  602. /* One frame requires more than one transmit buffers. */
  603. do
  604. {
  605. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  606. /* For enable the timestamp. */
  607. if (tsFlag)
  608. {
  609. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  610. }
  611. else
  612. {
  613. curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
  614. }
  615. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  616. /* Update the size left to be transmit. */
  617. sizeleft = length - len;
  618. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  619. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  620. #else
  621. address = (uint32_t)curBuffDescrip->buffer;
  622. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  623. src = (uint32_t)data + len;
  624. /* Increase the current software index of BD */
  625. txBdRing->txGenIdx = _ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
  626. if (sizeleft > handle->txBuffSizeAlign[ringId])
  627. {
  628. /* Data copy. */
  629. (void)memcpy((void *)(uint32_t *)address, (void *)(uint32_t *)src,
  630. handle->txBuffSizeAlign[ringId]);
  631. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  632. if (handle->txMaintainEnable[ringId])
  633. {
  634. /* Add the cache clean maintain. */
  635. DCACHE_CleanByRange(address, handle->txBuffSizeAlign[ringId]);
  636. }
  637. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  638. /* Data length update. */
  639. curBuffDescrip->length = handle->txBuffSizeAlign[ringId];
  640. len += handle->txBuffSizeAlign[ringId];
  641. /* Sets the control flag. */
  642. configVal = (uint32_t)curBuffDescrip->control;
  643. configVal &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  644. configVal |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
  645. curBuffDescrip->control = (uint16_t)configVal;
  646. if (handle->txReclaimEnable[ringId])
  647. {
  648. primask = DisableGlobalIRQ();
  649. txBdRing->txDescUsed++;
  650. EnableGlobalIRQ(primask);
  651. }
  652. /* Active the transmit buffer descriptor*/
  653. _ENET_ActiveSendRing(base, ringId);
  654. }
  655. else
  656. {
  657. (void)memcpy((void *)(uint32_t *)address, (void *)(uint32_t *)src, sizeleft);
  658. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  659. if (handle->txMaintainEnable[ringId])
  660. {
  661. /* Add the cache clean maintain. */
  662. DCACHE_CleanByRange(address, sizeleft);
  663. }
  664. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  665. curBuffDescrip->length = (uint16_t)sizeleft;
  666. /* Set Last buffer wrap flag. */
  667. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  668. if (handle->txReclaimEnable[ringId])
  669. {
  670. /* Add context to frame info ring */
  671. txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
  672. txDirty->context = context;
  673. txDirtyRing->txGenIdx = _ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
  674. if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
  675. {
  676. txDirtyRing->isFull = true;
  677. }
  678. primask = DisableGlobalIRQ();
  679. txBdRing->txDescUsed++;
  680. EnableGlobalIRQ(primask);
  681. }
  682. /* Active the transmit buffer descriptor. */
  683. _ENET_ActiveSendRing(base, ringId);
  684. isReturn = true;
  685. break;
  686. }
  687. /* Update the buffer descriptor address. */
  688. curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
  689. } while (0U == (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
  690. if (isReturn == false)
  691. {
  692. result = kStatus_ENET_TxFrameBusy;
  693. }
  694. }
  695. }
  696. }
  697. return result;
  698. }
  699. /* ethernet device interface */
  700. /* transmit packet. */
  701. rt_err_t rt_imxrt_eth_tx(rt_device_t dev, struct pbuf *p)
  702. {
  703. rt_err_t result = RT_EOK;
  704. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  705. RT_ASSERT(p != NULL);
  706. RT_ASSERT(enet_handle != RT_NULL);
  707. dbg_log(DBG_LOG, "rt_imxrt_eth_tx: %d\n", p->len);
  708. #ifdef ETH_TX_DUMP
  709. packet_dump("send", p);
  710. #endif
  711. do
  712. {
  713. result = _ENET_SendFrame(imxrt_eth_device.enet_base, enet_handle, (const uint8_t *)p, p->tot_len, RING_ID, false, NULL);
  714. if (result == kStatus_ENET_TxFrameBusy)
  715. {
  716. imxrt_eth_device.tx_is_waiting = RT_TRUE;
  717. rt_sem_take(&imxrt_eth_device.tx_wait, RT_WAITING_FOREVER);
  718. }
  719. } while (result == kStatus_ENET_TxFrameBusy);
  720. return RT_EOK;
  721. }
  722. /* reception packet. */
  723. struct pbuf *rt_imxrt_eth_rx(rt_device_t dev)
  724. {
  725. uint32_t length = 0;
  726. status_t status;
  727. struct pbuf *p = RT_NULL;
  728. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  729. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  730. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  731. /* Get the Frame size */
  732. status = ENET_GetRxFrameSize(enet_handle, &length, RING_ID);
  733. /* Call ENET_ReadFrame when there is a received frame. */
  734. if (length != 0)
  735. {
  736. /* Received valid frame. Deliver the rx buffer with the size equal to length. */
  737. p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
  738. if (p != NULL)
  739. {
  740. status = ENET_ReadFrame(enet_base, enet_handle, p->payload, length, RING_ID, NULL);
  741. if (status == kStatus_Success)
  742. {
  743. #ifdef ETH_RX_DUMP
  744. packet_dump("recv", p);
  745. #endif
  746. return p;
  747. }
  748. else
  749. {
  750. dbg_log(DBG_LOG, " A frame read failed\n");
  751. pbuf_free(p);
  752. }
  753. }
  754. else
  755. {
  756. dbg_log(DBG_LOG, " pbuf_alloc faild\n");
  757. }
  758. }
  759. else if (status == kStatus_ENET_RxFrameError)
  760. {
  761. dbg_log(DBG_WARNING, "ENET_GetRxFrameSize: kStatus_ENET_RxFrameError\n");
  762. /* Update the received buffer when error happened. */
  763. /* Get the error information of the received g_frame. */
  764. ENET_GetRxErrBeforeReadFrame(enet_handle, error_statistic, RING_ID);
  765. /* update the receive buffer. */
  766. ENET_ReadFrame(enet_base, enet_handle, NULL, 0, RING_ID, NULL);
  767. }
  768. ENET_EnableInterrupts(enet_base, kENET_RxFrameInterrupt);
  769. return NULL;
  770. }
  771. #ifdef BSP_USING_PHY
  772. static struct rt_phy_device *phy_dev = RT_NULL;
  773. static void phy_monitor_thread_entry(void *parameter)
  774. {
  775. rt_uint32_t speed;
  776. rt_uint32_t duplex;
  777. rt_bool_t link = RT_FALSE;
  778. #ifdef SOC_IMXRT1170_SERIES
  779. #ifdef PHY_USING_RTL8211F
  780. phy_dev = (struct rt_phy_device *)rt_device_find("rtl8211f");
  781. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  782. {
  783. // TODO print warning information
  784. LOG_E("Can not find phy device called \"rtl8211f\"");
  785. return;
  786. }
  787. #else
  788. phy_dev = (struct rt_phy_device *)rt_device_find("ksz8081");
  789. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  790. {
  791. // TODO print warning information
  792. LOG_E("Can not find phy device called \"ksz8081\"");
  793. return;
  794. }
  795. #endif
  796. #else
  797. phy_dev = (struct rt_phy_device *)rt_device_find("ksz8081");
  798. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  799. {
  800. // TODO print warning information
  801. LOG_E("Can not find phy device called \"rtt-phy\"");
  802. return;
  803. }
  804. #endif
  805. if (RT_NULL == phy_dev->ops->init)
  806. {
  807. LOG_E("phy driver error!");
  808. return;
  809. }
  810. #ifdef SOC_IMXRT1170_SERIES
  811. #ifdef PHY_USING_RTL8211F
  812. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_RTL8211F_ADDRESS, CLOCK_GetRootClockFreq(kCLOCK_Root_Bus));
  813. #else
  814. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_KSZ8081_ADDRESS, CLOCK_GetRootClockFreq(kCLOCK_Root_Bus));
  815. #endif
  816. #else
  817. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_KSZ8081_ADDRESS, CLOCK_GetFreq(kCLOCK_IpgClk));
  818. #endif
  819. if (PHY_STATUS_OK != status)
  820. {
  821. LOG_E("Phy device initialize unsuccessful!\n");
  822. return;
  823. }
  824. LOG_I("Phy device initialize successfully!\n");
  825. while (1)
  826. {
  827. rt_bool_t new_link = RT_FALSE;
  828. rt_phy_status status = phy_dev->ops->get_link_status(&new_link);
  829. if ((PHY_STATUS_OK == status) && (link != new_link))
  830. {
  831. link = new_link;
  832. if (link) // link up
  833. {
  834. phy_dev->ops->get_link_speed_duplex(&speed, &duplex);
  835. if (PHY_SPEED_10M == speed)
  836. {
  837. dbg_log(DBG_LOG, "10M\n");
  838. }
  839. else if (PHY_SPEED_100M == speed)
  840. {
  841. dbg_log(DBG_LOG, "100M\n");
  842. }
  843. else
  844. {
  845. dbg_log(DBG_LOG, "1000M\n");
  846. }
  847. if (PHY_HALF_DUPLEX == duplex)
  848. {
  849. dbg_log(DBG_LOG, "half dumplex\n");
  850. }
  851. else
  852. {
  853. dbg_log(DBG_LOG, "full dumplex\n");
  854. }
  855. if ((imxrt_eth_device.speed != (enet_mii_speed_t)speed) || (imxrt_eth_device.duplex != (enet_mii_duplex_t)duplex))
  856. {
  857. imxrt_eth_device.speed = (enet_mii_speed_t)speed;
  858. imxrt_eth_device.duplex = (enet_mii_duplex_t)duplex;
  859. dbg_log(DBG_LOG, "link up, and update eth mode.\n");
  860. rt_imxrt_eth_init((rt_device_t)&imxrt_eth_device);
  861. }
  862. else
  863. {
  864. dbg_log(DBG_LOG, "link up, eth not need re-config.\n");
  865. }
  866. dbg_log(DBG_LOG, "link up.\n");
  867. eth_device_linkchange(&imxrt_eth_device.parent, RT_TRUE);
  868. }
  869. else
  870. {
  871. dbg_log(DBG_LOG, "link down.\n");
  872. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  873. }
  874. }
  875. rt_thread_delay(RT_TICK_PER_SECOND * 2);
  876. // rt_thread_mdelay(300);
  877. }
  878. }
  879. #endif
  880. static int rt_hw_imxrt_eth_init(void)
  881. {
  882. rt_err_t state;
  883. _enet_clk_init();
  884. #ifdef PHY_USING_RTL8211F
  885. /* NXP (Freescale) MAC OUI */
  886. imxrt_eth_device.dev_addr[0] = 0x54;
  887. imxrt_eth_device.dev_addr[1] = 0x27;
  888. imxrt_eth_device.dev_addr[2] = 0x8d;
  889. /* generate MAC addr from 96bit unique ID (only for test). */
  890. imxrt_eth_device.dev_addr[3] = 0x11;
  891. imxrt_eth_device.dev_addr[4] = 0x22;
  892. imxrt_eth_device.dev_addr[5] = 0x33;
  893. imxrt_eth_device.speed = kENET_MiiSpeed100M;//Ҫ֧��ǧ�ף�ֱ�ӽ���ֵ��ΪkENET_MiiSpeed1000M
  894. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  895. imxrt_eth_device.enet_base = ENET_1G;
  896. #else
  897. /* NXP (Freescale) MAC OUI */
  898. imxrt_eth_device.dev_addr[0] = 0x54;
  899. imxrt_eth_device.dev_addr[1] = 0x27;
  900. imxrt_eth_device.dev_addr[2] = 0x8d;
  901. /* generate MAC addr from 96bit unique ID (only for test). */
  902. imxrt_eth_device.dev_addr[3] = 0x00;
  903. imxrt_eth_device.dev_addr[4] = 0x00;
  904. imxrt_eth_device.dev_addr[5] = 0x00;
  905. imxrt_eth_device.speed = kENET_MiiSpeed100M;
  906. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  907. imxrt_eth_device.enet_base = ENET;
  908. #endif
  909. imxrt_eth_device.parent.parent.init = rt_imxrt_eth_init;
  910. imxrt_eth_device.parent.parent.open = rt_imxrt_eth_open;
  911. imxrt_eth_device.parent.parent.close = rt_imxrt_eth_close;
  912. imxrt_eth_device.parent.parent.read = rt_imxrt_eth_read;
  913. imxrt_eth_device.parent.parent.write = rt_imxrt_eth_write;
  914. imxrt_eth_device.parent.parent.control = rt_imxrt_eth_control;
  915. imxrt_eth_device.parent.parent.user_data = RT_NULL;
  916. imxrt_eth_device.parent.eth_rx = rt_imxrt_eth_rx;
  917. imxrt_eth_device.parent.eth_tx = rt_imxrt_eth_tx;
  918. dbg_log(DBG_LOG, "sem init: tx_wait\r\n");
  919. /* init tx semaphore */
  920. rt_sem_init(&imxrt_eth_device.tx_wait, "tx_wait", 0, RT_IPC_FLAG_FIFO);
  921. dbg_log(DBG_LOG, "sem init: buff_wait\r\n");
  922. /* init tx semaphore */
  923. rt_sem_init(&imxrt_eth_device.buff_wait, "buff_wait", 1, RT_IPC_FLAG_FIFO);
  924. /* register eth device */
  925. dbg_log(DBG_LOG, "eth_device_init start\r\n");
  926. state = eth_device_init(&(imxrt_eth_device.parent), "e0");
  927. if (RT_EOK == state)
  928. {
  929. dbg_log(DBG_LOG, "eth_device_init success\r\n");
  930. }
  931. else
  932. {
  933. dbg_log(DBG_LOG, "eth_device_init faild: %d\r\n", state);
  934. }
  935. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  936. /* start phy monitor */
  937. {
  938. #ifdef BSP_USING_PHY
  939. rt_thread_t tid;
  940. tid = rt_thread_create("phy",
  941. phy_monitor_thread_entry,
  942. RT_NULL,
  943. 4096,
  944. /*RT_THREAD_PRIORITY_MAX - 2,*/
  945. 15,
  946. 2);
  947. if (tid != RT_NULL)
  948. rt_thread_startup(tid);
  949. #endif
  950. }
  951. return state;
  952. }
  953. INIT_DEVICE_EXPORT(rt_hw_imxrt_eth_init);
  954. #endif
  955. #if defined(RT_USING_FINSH) && defined(RT_USING_PHY)
  956. #include <finsh.h>
  957. void phy_read(rt_uint32_t phy_reg)
  958. {
  959. rt_uint32_t data;
  960. rt_phy_status status = phy_dev->ops->read(phy_reg, &data);
  961. if (PHY_STATUS_OK == status)
  962. {
  963. rt_kprintf("PHY_Read: %02X --> %08X", phy_reg, data);
  964. }
  965. else
  966. {
  967. rt_kprintf("PHY_Read: %02X --> faild", phy_reg);
  968. }
  969. }
  970. void phy_write(rt_uint32_t phy_reg, rt_uint32_t data)
  971. {
  972. rt_phy_status status = phy_dev->ops->write(phy_reg, data);
  973. if (PHY_STATUS_OK == status)
  974. {
  975. rt_kprintf("PHY_Write: %02X --> %08X\n", phy_reg, data);
  976. }
  977. else
  978. {
  979. rt_kprintf("PHY_Write: %02X --> faild\n", phy_reg);
  980. }
  981. }
  982. void phy_dump(void)
  983. {
  984. rt_uint32_t data;
  985. rt_phy_status status;
  986. int i;
  987. for (i = 0; i < 32; i++)
  988. {
  989. status = phy_dev->ops->read(i, &data);
  990. if (PHY_STATUS_OK != status)
  991. {
  992. rt_kprintf("phy_dump: %02X --> faild", i);
  993. break;
  994. }
  995. if (i % 8 == 7)
  996. {
  997. rt_kprintf("%02X --> %08X ", i, data);
  998. }
  999. else
  1000. {
  1001. rt_kprintf("%02X --> %08X\n", i, data);
  1002. }
  1003. }
  1004. }
  1005. #endif
  1006. #if defined(RT_USING_FINSH) && defined(RT_USING_LWIP)
  1007. void enet_reg_dump(void)
  1008. {
  1009. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  1010. #define DUMP_REG(__REG) \
  1011. rt_kprintf("%s(%08X): %08X\n", #__REG, (uint32_t)&enet_base->__REG, enet_base->__REG)
  1012. DUMP_REG(EIR);
  1013. DUMP_REG(EIMR);
  1014. DUMP_REG(RDAR);
  1015. DUMP_REG(TDAR);
  1016. DUMP_REG(ECR);
  1017. DUMP_REG(MMFR);
  1018. DUMP_REG(MSCR);
  1019. DUMP_REG(MIBC);
  1020. DUMP_REG(RCR);
  1021. DUMP_REG(TCR);
  1022. DUMP_REG(PALR);
  1023. DUMP_REG(PAUR);
  1024. DUMP_REG(OPD);
  1025. DUMP_REG(TXIC);
  1026. DUMP_REG(RXIC);
  1027. DUMP_REG(IAUR);
  1028. DUMP_REG(IALR);
  1029. DUMP_REG(GAUR);
  1030. DUMP_REG(GALR);
  1031. DUMP_REG(TFWR);
  1032. DUMP_REG(RDSR);
  1033. DUMP_REG(TDSR);
  1034. DUMP_REG(MRBR);
  1035. DUMP_REG(RSFL);
  1036. DUMP_REG(RSEM);
  1037. DUMP_REG(RAEM);
  1038. DUMP_REG(RAFL);
  1039. DUMP_REG(TSEM);
  1040. DUMP_REG(TAEM);
  1041. DUMP_REG(TAFL);
  1042. DUMP_REG(TIPG);
  1043. DUMP_REG(FTRL);
  1044. DUMP_REG(TACC);
  1045. DUMP_REG(RACC);
  1046. // DUMP_REG(RMON_T_DROP);
  1047. DUMP_REG(RMON_T_PACKETS);
  1048. DUMP_REG(RMON_T_BC_PKT);
  1049. DUMP_REG(RMON_T_MC_PKT);
  1050. DUMP_REG(RMON_T_CRC_ALIGN);
  1051. DUMP_REG(RMON_T_UNDERSIZE);
  1052. DUMP_REG(RMON_T_OVERSIZE);
  1053. DUMP_REG(RMON_T_FRAG);
  1054. DUMP_REG(RMON_T_JAB);
  1055. DUMP_REG(RMON_T_COL);
  1056. DUMP_REG(RMON_T_P64);
  1057. DUMP_REG(RMON_T_P65TO127);
  1058. DUMP_REG(RMON_T_P128TO255);
  1059. DUMP_REG(RMON_T_P256TO511);
  1060. DUMP_REG(RMON_T_P512TO1023);
  1061. DUMP_REG(RMON_T_P1024TO2047);
  1062. DUMP_REG(RMON_T_P_GTE2048);
  1063. DUMP_REG(RMON_T_OCTETS);
  1064. // DUMP_REG(IEEE_T_DROP);
  1065. DUMP_REG(IEEE_T_FRAME_OK);
  1066. DUMP_REG(IEEE_T_1COL);
  1067. DUMP_REG(IEEE_T_MCOL);
  1068. DUMP_REG(IEEE_T_DEF);
  1069. DUMP_REG(IEEE_T_LCOL);
  1070. DUMP_REG(IEEE_T_EXCOL);
  1071. DUMP_REG(IEEE_T_MACERR);
  1072. DUMP_REG(IEEE_T_CSERR);
  1073. DUMP_REG(IEEE_T_SQE);
  1074. DUMP_REG(IEEE_T_FDXFC);
  1075. DUMP_REG(IEEE_T_OCTETS_OK);
  1076. DUMP_REG(RMON_R_PACKETS);
  1077. DUMP_REG(RMON_R_BC_PKT);
  1078. DUMP_REG(RMON_R_MC_PKT);
  1079. DUMP_REG(RMON_R_CRC_ALIGN);
  1080. DUMP_REG(RMON_R_UNDERSIZE);
  1081. DUMP_REG(RMON_R_OVERSIZE);
  1082. DUMP_REG(RMON_R_FRAG);
  1083. DUMP_REG(RMON_R_JAB);
  1084. // DUMP_REG(RMON_R_RESVD_0);
  1085. DUMP_REG(RMON_R_P64);
  1086. DUMP_REG(RMON_R_P65TO127);
  1087. DUMP_REG(RMON_R_P128TO255);
  1088. DUMP_REG(RMON_R_P256TO511);
  1089. DUMP_REG(RMON_R_P512TO1023);
  1090. DUMP_REG(RMON_R_P1024TO2047);
  1091. DUMP_REG(RMON_R_P_GTE2048);
  1092. DUMP_REG(RMON_R_OCTETS);
  1093. DUMP_REG(IEEE_R_DROP);
  1094. DUMP_REG(IEEE_R_FRAME_OK);
  1095. DUMP_REG(IEEE_R_CRC);
  1096. DUMP_REG(IEEE_R_ALIGN);
  1097. DUMP_REG(IEEE_R_MACERR);
  1098. DUMP_REG(IEEE_R_FDXFC);
  1099. DUMP_REG(IEEE_R_OCTETS_OK);
  1100. DUMP_REG(ATCR);
  1101. DUMP_REG(ATVR);
  1102. DUMP_REG(ATOFF);
  1103. DUMP_REG(ATPER);
  1104. DUMP_REG(ATCOR);
  1105. DUMP_REG(ATINC);
  1106. DUMP_REG(ATSTMP);
  1107. DUMP_REG(TGSR);
  1108. }
  1109. void enet_nvic_tog(void)
  1110. {
  1111. NVIC_SetPendingIRQ(ENET_IRQn);
  1112. }
  1113. void enet_rx_stat(void)
  1114. {
  1115. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  1116. #define DUMP_STAT(__VAR) \
  1117. rt_kprintf("%-25s: %08X\n", #__VAR, error_statistic->__VAR);
  1118. DUMP_STAT(statsRxLenGreaterErr);
  1119. DUMP_STAT(statsRxAlignErr);
  1120. DUMP_STAT(statsRxFcsErr);
  1121. DUMP_STAT(statsRxOverRunErr);
  1122. DUMP_STAT(statsRxTruncateErr);
  1123. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  1124. DUMP_STAT(statsRxProtocolChecksumErr);
  1125. DUMP_STAT(statsRxIpHeadChecksumErr);
  1126. DUMP_STAT(statsRxMacErr);
  1127. DUMP_STAT(statsRxPhyErr);
  1128. DUMP_STAT(statsRxCollisionErr);
  1129. DUMP_STAT(statsTxErr);
  1130. DUMP_STAT(statsTxFrameErr);
  1131. DUMP_STAT(statsTxOverFlowErr);
  1132. DUMP_STAT(statsTxLateCollisionErr);
  1133. DUMP_STAT(statsTxExcessCollisionErr);
  1134. DUMP_STAT(statsTxUnderFlowErr);
  1135. DUMP_STAT(statsTxTsErr);
  1136. #endif
  1137. }
  1138. void enet_buf_info(void)
  1139. {
  1140. int i = 0;
  1141. for (i = 0; i < ENET_RXBD_NUM; i++)
  1142. {
  1143. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  1144. i,
  1145. g_rxBuffDescrip[i].length,
  1146. g_rxBuffDescrip[i].control,
  1147. g_rxBuffDescrip[i].buffer);
  1148. }
  1149. for (i = 0; i < ENET_TXBD_NUM; i++)
  1150. {
  1151. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  1152. i,
  1153. g_txBuffDescrip[i].length,
  1154. g_txBuffDescrip[i].control,
  1155. g_txBuffDescrip[i].buffer);
  1156. }
  1157. }
  1158. FINSH_FUNCTION_EXPORT(phy_read, read phy register);
  1159. FINSH_FUNCTION_EXPORT(phy_write, write phy register);
  1160. FINSH_FUNCTION_EXPORT(phy_dump, dump phy registers);
  1161. FINSH_FUNCTION_EXPORT(enet_reg_dump, dump enet registers);
  1162. FINSH_FUNCTION_EXPORT(enet_nvic_tog, toggle enet nvic pendding bit);
  1163. FINSH_FUNCTION_EXPORT(enet_rx_stat, dump enet rx statistic);
  1164. FINSH_FUNCTION_EXPORT(enet_buf_info, dump enet tx and tx buffer descripter);
  1165. #endif