drv_emac.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-05-16 shelton first version
  9. * 2022-07-11 shelton optimize code to improve network throughput
  10. * performance
  11. * 2022-10-15 shelton optimize code
  12. * 2023-10-18 shelton optimize code
  13. * 2024-09-02 shelton add support phy lan8720 and yt8512
  14. * 2024-12-18 shelton add support f457
  15. */
  16. #include "drv_emac.h"
  17. #include <netif/ethernetif.h>
  18. #include <lwipopts.h>
  19. /* debug option */
  20. //#define EMAC_RX_DUMP
  21. //#define EMAC_TX_DUMP
  22. //#define DRV_DEBUG
  23. #define LOG_TAG "drv.emac"
  24. #include <drv_log.h>
  25. #define CRYSTAL_ON_PHY 0
  26. /* emac memory buffer configuration */
  27. #define EMAC_NUM_RX_BUF 5 /* rx (5 * 1500) */
  28. #define EMAC_NUM_TX_BUF 5 /* tx (5 * 1500) */
  29. #define MAX_ADDR_LEN 6
  30. #define DMARXDESC_FRAMELENGTH_SHIFT 16
  31. struct rt_at32_emac
  32. {
  33. /* inherit from ethernet device */
  34. struct eth_device parent;
  35. #ifndef PHY_USING_INTERRUPT_MODE
  36. rt_timer_t poll_link_timer;
  37. #endif
  38. /* interface address info, hw address */
  39. rt_uint8_t dev_addr[MAX_ADDR_LEN];
  40. /* emac_speed */
  41. emac_speed_type emac_speed;
  42. /* emac_duplex_mode */
  43. emac_duplex_type emac_mode;
  44. };
  45. typedef struct {
  46. rt_uint32_t length;
  47. rt_uint32_t buffer;
  48. emac_dma_desc_type *descriptor;
  49. emac_dma_desc_type *rx_fs_desc;
  50. emac_dma_desc_type *rx_ls_desc;
  51. rt_uint8_t g_seg_count;
  52. } frame_type;
  53. static emac_dma_desc_type *dma_rx_dscr_tab, *dma_tx_dscr_tab;
  54. extern emac_dma_desc_type *dma_rx_desc_to_get, *dma_tx_desc_to_set;
  55. frame_type rx_frame;
  56. static rt_uint8_t *rx_buff, *tx_buff;
  57. static struct rt_at32_emac at32_emac_device;
  58. static uint8_t phy_addr = 0xFF;
  59. #if defined(EMAC_RX_DUMP) || defined(EMAC_TX_DUMP)
  60. #define __is_print(ch) ((unsigned int)((ch) - ' ') < 127u - ' ')
  61. static void dump_hex(const rt_uint8_t *ptr, rt_size_t buflen)
  62. {
  63. unsigned char *buf = (unsigned char *)ptr;
  64. int i, j;
  65. for (i = 0; i < buflen; i += 16)
  66. {
  67. rt_kprintf("%08X: ", i);
  68. for (j = 0; j < 16; j++)
  69. if (i + j < buflen)
  70. rt_kprintf("%02X ", buf[i + j]);
  71. else
  72. rt_kprintf(" ");
  73. rt_kprintf(" ");
  74. for (j = 0; j < 16; j++)
  75. if (i + j < buflen)
  76. rt_kprintf("%c", __is_print(buf[i + j]) ? buf[i + j] : '.');
  77. rt_kprintf("\n");
  78. }
  79. }
  80. #endif
  81. /**
  82. * @brief phy reset
  83. */
  84. static void phy_reset(void)
  85. {
  86. gpio_init_type gpio_init_struct;
  87. #if defined (SOC_SERIES_AT32F437) || defined (SOC_SERIES_AT32F457)
  88. crm_periph_clock_enable(CRM_GPIOE_PERIPH_CLOCK, TRUE);
  89. crm_periph_clock_enable(CRM_GPIOG_PERIPH_CLOCK, TRUE);
  90. gpio_default_para_init(&gpio_init_struct);
  91. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  92. gpio_init_struct.gpio_mode = GPIO_MODE_OUTPUT;
  93. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  94. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  95. gpio_init_struct.gpio_pins = GPIO_PINS_15;
  96. gpio_init(GPIOE, &gpio_init_struct);
  97. gpio_init_struct.gpio_pins = GPIO_PINS_15;
  98. gpio_init(GPIOG, &gpio_init_struct);
  99. gpio_bits_reset(GPIOE, GPIO_PINS_15);
  100. gpio_bits_reset(GPIOG, GPIO_PINS_15);
  101. rt_thread_mdelay(2);
  102. gpio_bits_set(GPIOE, GPIO_PINS_15);
  103. #endif
  104. #if defined (SOC_SERIES_AT32F407)
  105. crm_periph_clock_enable(CRM_GPIOC_PERIPH_CLOCK, TRUE);
  106. gpio_default_para_init(&gpio_init_struct);
  107. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  108. gpio_init_struct.gpio_mode = GPIO_MODE_OUTPUT;
  109. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  110. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  111. gpio_init_struct.gpio_pins = GPIO_PINS_8;
  112. gpio_init(GPIOC, &gpio_init_struct);
  113. gpio_bits_reset(GPIOC, GPIO_PINS_8);
  114. rt_thread_mdelay(2);
  115. gpio_bits_set(GPIOC, GPIO_PINS_8);
  116. #endif
  117. rt_thread_mdelay(2000);
  118. }
  119. /**
  120. * @brief phy clock config
  121. */
  122. static void phy_clock_config(void)
  123. {
  124. #if (CRYSTAL_ON_PHY == 0)
  125. /* if CRYSTAL_NO_PHY, output clock with pa8 of mcu */
  126. gpio_init_type gpio_init_struct;
  127. crm_periph_clock_enable(CRM_GPIOA_PERIPH_CLOCK, TRUE);
  128. gpio_default_para_init(&gpio_init_struct);
  129. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  130. gpio_init_struct.gpio_mode = GPIO_MODE_MUX;
  131. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  132. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  133. gpio_init_struct.gpio_pins = GPIO_PINS_8;
  134. gpio_init(GPIOA, &gpio_init_struct);
  135. /* 9162 clkout output 25 mhz */
  136. /* 83848 clkout output 50 mhz */
  137. #if defined (SOC_SERIES_AT32F407)
  138. crm_clock_out_set(CRM_CLKOUT_SCLK);
  139. #if defined (PHY_USING_DM9162) || defined (PHY_USING_LAN8720) || \
  140. defined (PHY_USING_YT8512)
  141. crm_clkout_div_set(CRM_CLKOUT_DIV_8);
  142. #elif defined (PHY_USING_DP83848)
  143. crm_clkout_div_set(CRM_CLKOUT_DIV_4);
  144. #endif
  145. #endif
  146. #if defined (SOC_SERIES_AT32F437)
  147. crm_clock_out1_set(CRM_CLKOUT1_PLL);
  148. #if defined (PHY_USING_DM9162) || defined (PHY_USING_LAN8720) || \
  149. defined (PHY_USING_YT8512)
  150. crm_clkout_div_set(CRM_CLKOUT_INDEX_1, CRM_CLKOUT_DIV1_5, CRM_CLKOUT_DIV2_2);
  151. #elif defined (PHY_USING_DP83848)
  152. crm_clkout_div_set(CRM_CLKOUT_INDEX_1, CRM_CLKOUT_DIV1_5, CRM_CLKOUT_DIV2_1);
  153. #endif
  154. #endif
  155. #endif
  156. }
  157. /**
  158. * @brief reset phy register
  159. */
  160. static error_status emac_phy_register_reset(void)
  161. {
  162. uint16_t data = 0;
  163. uint32_t timeout = 0;
  164. uint32_t i = 0;
  165. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, PHY_RESET_BIT) == ERROR)
  166. {
  167. return ERROR;
  168. }
  169. for(i = 0; i < 0x000FFFFF; i++);
  170. do
  171. {
  172. timeout++;
  173. if(emac_phy_register_read(phy_addr, PHY_CONTROL_REG, &data) == ERROR)
  174. {
  175. return ERROR;
  176. }
  177. } while((data & PHY_RESET_BIT) && (timeout < PHY_TIMEOUT));
  178. for(i = 0; i < 0x00FFFFF; i++);
  179. if(timeout == PHY_TIMEOUT)
  180. {
  181. return ERROR;
  182. }
  183. return SUCCESS;
  184. }
  185. /**
  186. * @brief set mac speed related parameters
  187. */
  188. static error_status emac_speed_config(emac_auto_negotiation_type nego, emac_duplex_type mode, emac_speed_type speed)
  189. {
  190. uint16_t data = 0;
  191. uint32_t timeout = 0;
  192. if(nego == EMAC_AUTO_NEGOTIATION_ON)
  193. {
  194. do
  195. {
  196. timeout++;
  197. if(emac_phy_register_read(phy_addr, PHY_STATUS_REG, &data) == ERROR)
  198. {
  199. return ERROR;
  200. }
  201. } while(!(data & PHY_LINKED_STATUS_BIT) && (timeout < PHY_TIMEOUT));
  202. if(timeout == PHY_TIMEOUT)
  203. {
  204. return ERROR;
  205. }
  206. timeout = 0;
  207. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, PHY_AUTO_NEGOTIATION_BIT) == ERROR)
  208. {
  209. return ERROR;
  210. }
  211. do
  212. {
  213. timeout++;
  214. if(emac_phy_register_read(phy_addr, PHY_STATUS_REG, &data) == ERROR)
  215. {
  216. return ERROR;
  217. }
  218. } while(!(data & PHY_NEGO_COMPLETE_BIT) && (timeout < PHY_TIMEOUT));
  219. if(timeout == PHY_TIMEOUT)
  220. {
  221. return ERROR;
  222. }
  223. if(emac_phy_register_read(phy_addr, PHY_SPECIFIED_CS_REG, &data) == ERROR)
  224. {
  225. return ERROR;
  226. }
  227. #if defined (PHY_USING_DM9162) || defined (PHY_USING_LAN8720)
  228. if(data & PHY_FULL_DUPLEX_100MBPS_BIT)
  229. {
  230. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  231. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  232. }
  233. else if(data & PHY_HALF_DUPLEX_100MBPS_BIT)
  234. {
  235. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  236. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  237. }
  238. else if(data & PHY_FULL_DUPLEX_10MBPS_BIT)
  239. {
  240. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  241. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  242. }
  243. else if(data & PHY_HALF_DUPLEX_10MBPS_BIT)
  244. {
  245. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  246. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  247. }
  248. #endif
  249. #if defined (PHY_USING_DP83848)
  250. if(data & PHY_DUPLEX_MODE)
  251. {
  252. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  253. }
  254. else
  255. {
  256. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  257. }
  258. if(data & PHY_SPEED_MODE)
  259. {
  260. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  261. }
  262. else
  263. {
  264. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  265. }
  266. #endif
  267. #if defined (PHY_USING_YT8512)
  268. if(data & PHY_DUPLEX_MODE)
  269. {
  270. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  271. }
  272. else
  273. {
  274. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  275. }
  276. if(data & PHY_SPEED_MODE)
  277. {
  278. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  279. }
  280. else
  281. {
  282. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  283. }
  284. #endif
  285. }
  286. else
  287. {
  288. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, (uint16_t)((mode << 8) | (speed << 13))) == ERROR)
  289. {
  290. return ERROR;
  291. }
  292. if(speed == EMAC_SPEED_100MBPS)
  293. {
  294. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  295. }
  296. else
  297. {
  298. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  299. }
  300. if(mode == EMAC_FULL_DUPLEX)
  301. {
  302. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  303. }
  304. else
  305. {
  306. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  307. }
  308. }
  309. return SUCCESS;
  310. }
  311. /**
  312. * @brief initialize emac phy
  313. */
  314. static error_status emac_phy_init(emac_control_config_type *control_para)
  315. {
  316. emac_clock_range_set();
  317. if(emac_phy_register_reset() == ERROR)
  318. {
  319. return ERROR;
  320. }
  321. if(emac_speed_config(control_para->auto_nego, control_para->duplex_mode, control_para->fast_ethernet_speed) == ERROR)
  322. {
  323. return ERROR;
  324. }
  325. emac_control_config(control_para);
  326. return SUCCESS;
  327. }
  328. /**
  329. * @brief emac initialization function
  330. */
  331. static rt_err_t rt_at32_emac_init(rt_device_t dev)
  332. {
  333. emac_control_config_type mac_control_para;
  334. emac_dma_config_type dma_control_para;
  335. /* check till phy detected */
  336. while(phy_addr == 0xFF)
  337. {
  338. rt_thread_mdelay(1000);
  339. }
  340. /* emac reset */
  341. emac_reset();
  342. /* software reset emac dma */
  343. emac_dma_software_reset_set();
  344. while(emac_dma_software_reset_get() == SET);
  345. emac_control_para_init(&mac_control_para);
  346. mac_control_para.auto_nego = EMAC_AUTO_NEGOTIATION_ON;
  347. if(emac_phy_init(&mac_control_para) == ERROR)
  348. {
  349. LOG_E("emac hardware init failed");
  350. return -RT_ERROR;
  351. }
  352. else
  353. {
  354. LOG_D("emac hardware init success");
  355. }
  356. emac_transmit_flow_control_enable(TRUE);
  357. emac_zero_quanta_pause_disable(TRUE);
  358. /* set mac address */
  359. emac_local_address_set(at32_emac_device.dev_addr);
  360. /* set emac dma rx link list */
  361. emac_dma_descriptor_list_address_set(EMAC_DMA_RECEIVE, dma_rx_dscr_tab, rx_buff, EMAC_NUM_RX_BUF);
  362. /* set emac dma tx link list */
  363. emac_dma_descriptor_list_address_set(EMAC_DMA_TRANSMIT, dma_tx_dscr_tab, tx_buff, EMAC_NUM_TX_BUF);
  364. emac_dma_para_init(&dma_control_para);
  365. dma_control_para.rsf_enable = TRUE;
  366. dma_control_para.tsf_enable = TRUE;
  367. dma_control_para.osf_enable = TRUE;
  368. dma_control_para.aab_enable = TRUE;
  369. dma_control_para.usp_enable = TRUE;
  370. dma_control_para.fb_enable = TRUE;
  371. dma_control_para.flush_rx_disable = TRUE;
  372. dma_control_para.rx_dma_pal = EMAC_DMA_PBL_32;
  373. dma_control_para.tx_dma_pal = EMAC_DMA_PBL_32;
  374. dma_control_para.priority_ratio = EMAC_DMA_2_RX_1_TX;
  375. emac_dma_config(&dma_control_para);
  376. /* emac interrupt init */
  377. emac_dma_interrupt_enable(EMAC_DMA_INTERRUPT_NORMAL_SUMMARY, TRUE);
  378. emac_dma_interrupt_enable(EMAC_DMA_INTERRUPT_RX, TRUE);
  379. nvic_irq_enable(EMAC_IRQn, 0x07, 0);
  380. /* enable emac */
  381. emac_start();
  382. return RT_EOK;
  383. }
  384. static rt_err_t rt_at32_emac_open(rt_device_t dev, rt_uint16_t oflag)
  385. {
  386. LOG_D("emac open");
  387. return RT_EOK;
  388. }
  389. static rt_err_t rt_at32_emac_close(rt_device_t dev)
  390. {
  391. LOG_D("emac close");
  392. return RT_EOK;
  393. }
  394. static rt_ssize_t rt_at32_emac_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  395. {
  396. LOG_D("emac read");
  397. rt_set_errno(-RT_ENOSYS);
  398. return 0;
  399. }
  400. static rt_ssize_t rt_at32_emac_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  401. {
  402. LOG_D("emac write");
  403. rt_set_errno(-RT_ENOSYS);
  404. return 0;
  405. }
  406. static rt_err_t rt_at32_emac_control(rt_device_t dev, int cmd, void *args)
  407. {
  408. switch (cmd)
  409. {
  410. case NIOCTL_GADDR:
  411. /* get mac address */
  412. if (args)
  413. {
  414. SMEMCPY(args, at32_emac_device.dev_addr, 6);
  415. }
  416. else
  417. {
  418. return -RT_ERROR;
  419. }
  420. break;
  421. default :
  422. break;
  423. }
  424. return RT_EOK;
  425. }
  426. /**
  427. * @brief emac txpkt chainmode
  428. */
  429. rt_err_t emac_txpkt_chainmode(rt_uint32_t frame_length)
  430. {
  431. rt_uint32_t buf_cnt = 0, index = 0;
  432. /* check if the descriptor is owned by the ethernet dma (when set) or cpu (when reset) */
  433. if((dma_tx_desc_to_set->status & EMAC_DMATXDESC_OWN) != (u32)RESET)
  434. {
  435. /* return error: own bit set */
  436. return -RT_ERROR;
  437. }
  438. if(frame_length == 0)
  439. {
  440. return -RT_ERROR;
  441. }
  442. if(frame_length > EMAC_MAX_PACKET_LENGTH)
  443. {
  444. buf_cnt = frame_length / EMAC_MAX_PACKET_LENGTH;
  445. if(frame_length % EMAC_MAX_PACKET_LENGTH)
  446. {
  447. buf_cnt += 1;
  448. }
  449. }
  450. else
  451. {
  452. buf_cnt = 1;
  453. }
  454. if(buf_cnt == 1)
  455. {
  456. /* setting the last segment and first segment bits (in this case a frame is transmitted in one descriptor) */
  457. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_LS | EMAC_DMATXDESC_FS;
  458. /* setting the frame length: bits[12:0] */
  459. dma_tx_desc_to_set->controlsize = (frame_length & EMAC_DMATXDESC_TBS1);
  460. /* set own bit of the tx descriptor status: gives the buffer back to ethernet dma */
  461. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_OWN;
  462. /* selects the next dma tx descriptor list for next buffer to send */
  463. dma_tx_desc_to_set = (emac_dma_desc_type*) (dma_tx_desc_to_set->buf2nextdescaddr);
  464. }
  465. else
  466. {
  467. for(index = 0; index < buf_cnt; index ++)
  468. {
  469. /* clear first and last segments */
  470. dma_tx_desc_to_set->status &= ~(EMAC_DMATXDESC_LS | EMAC_DMATXDESC_FS);
  471. /* set first segments */
  472. if(index == 0)
  473. {
  474. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_FS;
  475. }
  476. /* set size */
  477. dma_tx_desc_to_set->controlsize = (EMAC_MAX_PACKET_LENGTH & EMAC_DMATXDESC_TBS1);
  478. /* set last segments */
  479. if(index == (buf_cnt - 1))
  480. {
  481. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_LS;
  482. dma_tx_desc_to_set->controlsize = ((frame_length - ((buf_cnt-1) * EMAC_MAX_PACKET_LENGTH)) & EMAC_DMATXDESC_TBS1);
  483. }
  484. /* set own bit of the tx descriptor status: gives the buffer back to ethernet dma */
  485. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_OWN;
  486. /* selects the next dma tx descriptor list for next buffer to send */
  487. dma_tx_desc_to_set = (emac_dma_desc_type*) (dma_tx_desc_to_set->buf2nextdescaddr);
  488. }
  489. }
  490. /* when tx buffer unavailable flag is set: clear it and resume transmission */
  491. if(emac_dma_flag_get(EMAC_DMA_TBU_FLAG))
  492. {
  493. /* clear tbus ethernet dma flag */
  494. emac_dma_flag_clear(EMAC_DMA_TBU_FLAG);
  495. /* resume dma transmission*/
  496. EMAC_DMA->tpd_bit.tpd = 0;
  497. }
  498. return RT_EOK;
  499. }
  500. /**
  501. * @brief transmit data
  502. */
  503. rt_err_t rt_at32_emac_tx(rt_device_t dev, struct pbuf *p)
  504. {
  505. rt_err_t ret = -RT_ERROR;
  506. struct pbuf *q;
  507. rt_uint32_t length = 0;
  508. rt_uint32_t buffer_offset = 0, payload_offset = 0, copy_count = 0;
  509. emac_dma_desc_type *dma_tx_desc;
  510. rt_uint8_t *buffer;
  511. dma_tx_desc = dma_tx_desc_to_set;
  512. buffer = (uint8_t *)(dma_tx_desc_to_set->buf1addr);
  513. /* copy data to buffer */
  514. for(q = p; q != NULL; q = q->next)
  515. {
  516. if((dma_tx_desc->status & EMAC_DMATXDESC_OWN) != RESET)
  517. {
  518. ret = RT_EOK;
  519. goto _error;
  520. }
  521. copy_count = q->len;
  522. payload_offset = 0;
  523. while((copy_count + buffer_offset) > EMAC_MAX_PACKET_LENGTH)
  524. {
  525. rt_memcpy(buffer + buffer_offset, (uint8_t *)q->payload + payload_offset, (EMAC_MAX_PACKET_LENGTH - buffer_offset));
  526. dma_tx_desc = (emac_dma_desc_type*)dma_tx_desc->buf2nextdescaddr;
  527. if((dma_tx_desc->status & EMAC_DMATXDESC_OWN) != RESET)
  528. {
  529. ret = RT_EOK;
  530. goto _error;
  531. }
  532. buffer = (uint8_t *)dma_tx_desc->buf1addr;
  533. copy_count = copy_count - (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  534. payload_offset = payload_offset + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  535. length = length + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  536. buffer_offset = 0;
  537. }
  538. rt_memcpy(buffer + buffer_offset, (uint8_t *)q->payload + payload_offset, copy_count);
  539. buffer_offset = buffer_offset + copy_count;
  540. length = length + copy_count;
  541. }
  542. emac_txpkt_chainmode(length);
  543. ret = RT_EOK;
  544. _error:
  545. /* when tx buffer unavailable flag is set: clear it and resume transmission */
  546. if(emac_dma_flag_get(EMAC_DMA_TBU_FLAG))
  547. {
  548. /* clear tbus ethernet dma flag */
  549. emac_dma_flag_clear(EMAC_DMA_TBU_FLAG);
  550. /* resume dma transmission*/
  551. EMAC_DMA->tpd_bit.tpd = 0;
  552. }
  553. return ret;
  554. }
  555. /**
  556. * @brief emac rxpkt chainmode
  557. */
  558. rt_err_t emac_rxpkt_chainmode(void)
  559. {
  560. /* check if the descriptor is owned by the ethernet dma (when set) or cpu (when reset) */
  561. if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_OWN) != (u32)RESET)
  562. {
  563. /* return error: own bit set */
  564. return -RT_ERROR;
  565. }
  566. if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_LS) != (u32)RESET)
  567. {
  568. rx_frame.g_seg_count ++;
  569. if(rx_frame.g_seg_count == 1)
  570. {
  571. rx_frame.rx_fs_desc = dma_rx_desc_to_get;
  572. }
  573. rx_frame.rx_ls_desc = dma_rx_desc_to_get;
  574. rx_frame.length = ((dma_rx_desc_to_get->status & EMAC_DMARXDESC_FL) >> DMARXDESC_FRAMELENGTH_SHIFT) - 4;
  575. rx_frame.buffer = rx_frame.rx_fs_desc->buf1addr;
  576. /* Selects the next DMA Rx descriptor list for next buffer to read */
  577. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  578. return RT_EOK;
  579. }
  580. else if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_FS) != (u32)RESET)
  581. {
  582. rx_frame.g_seg_count = 1;
  583. rx_frame.rx_fs_desc = dma_rx_desc_to_get;
  584. rx_frame.rx_ls_desc = NULL;
  585. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  586. }
  587. else
  588. {
  589. rx_frame.g_seg_count ++;
  590. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  591. }
  592. return -RT_ERROR;
  593. }
  594. /**
  595. * @brief receive data
  596. */
  597. struct pbuf *rt_at32_emac_rx(rt_device_t dev)
  598. {
  599. struct pbuf *p = NULL;
  600. struct pbuf *q = NULL;
  601. rt_uint16_t len = 0;
  602. rt_uint8_t *buffer;
  603. emac_dma_desc_type *dma_rx_desc;
  604. rt_uint32_t buffer_offset, payload_offset = 0, copy_count = 0;
  605. rt_uint32_t index = 0;
  606. if(emac_rxpkt_chainmode() != RT_EOK)
  607. {
  608. return NULL;
  609. }
  610. /* obtain the size of the packet and put it into the "len"
  611. variable. */
  612. len = rx_frame.length;
  613. buffer = (uint8_t *)rx_frame.buffer;
  614. /* we allocate a pbuf chain of pbufs from the pool. */
  615. if(len > 0)
  616. {
  617. p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
  618. }
  619. if(p != NULL)
  620. {
  621. dma_rx_desc = rx_frame.rx_fs_desc;
  622. buffer_offset = 0;
  623. for (q = p; q != NULL; q = q->next)
  624. {
  625. copy_count = q->len;
  626. payload_offset = 0;
  627. while( (copy_count + buffer_offset) > EMAC_MAX_PACKET_LENGTH )
  628. {
  629. /* copy data to pbuf */
  630. rt_memcpy((uint8_t*)q->payload + payload_offset, buffer + buffer_offset, (EMAC_MAX_PACKET_LENGTH - buffer_offset));
  631. /* point to next descriptor */
  632. dma_rx_desc = (emac_dma_desc_type *)(dma_rx_desc->buf2nextdescaddr);
  633. buffer = (uint8_t *)(dma_rx_desc->buf1addr);
  634. copy_count = copy_count - (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  635. payload_offset = payload_offset + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  636. buffer_offset = 0;
  637. }
  638. rt_memcpy((uint8_t*)q->payload + payload_offset, (uint8_t*)buffer + buffer_offset, copy_count);
  639. buffer_offset = buffer_offset + copy_count;
  640. }
  641. }
  642. dma_rx_desc = rx_frame.rx_fs_desc;
  643. for(index = 0; index < rx_frame.g_seg_count; index ++)
  644. {
  645. dma_rx_desc->status |= EMAC_DMARXDESC_OWN;
  646. dma_rx_desc = (emac_dma_desc_type*) (dma_rx_desc->buf2nextdescaddr);
  647. }
  648. rx_frame.g_seg_count = 0;
  649. /* when rx buffer unavailable flag is set: clear it and resume reception */
  650. if(emac_dma_flag_get(EMAC_DMA_RBU_FLAG))
  651. {
  652. /* clear rbus ethernet dma flag */
  653. emac_dma_flag_clear(EMAC_DMA_RBU_FLAG);
  654. /* resume dma reception */
  655. EMAC_DMA->rpd_bit.rpd = FALSE;
  656. }
  657. return p;
  658. }
  659. void EMAC_IRQHandler(void)
  660. {
  661. /* enter interrupt */
  662. rt_interrupt_enter();
  663. /* packet receiption */
  664. if (emac_dma_flag_get(EMAC_DMA_RI_FLAG) == SET)
  665. {
  666. /* a frame has been received */
  667. eth_device_ready(&(at32_emac_device.parent));
  668. emac_dma_flag_clear(EMAC_DMA_RI_FLAG);
  669. }
  670. /* packet transmission */
  671. if (emac_dma_flag_get(EMAC_DMA_TI_FLAG) == SET)
  672. {
  673. emac_dma_flag_clear(EMAC_DMA_TI_FLAG);
  674. }
  675. /* clear normal interrupt */
  676. emac_dma_flag_clear(EMAC_DMA_NIS_FLAG);
  677. /* clear dma error */
  678. if(emac_dma_flag_get(EMAC_DMA_AIS_FLAG) != RESET)
  679. {
  680. if(emac_dma_flag_get(EMAC_DMA_RBU_FLAG) != RESET)
  681. {
  682. emac_dma_flag_clear(EMAC_DMA_RBU_FLAG);
  683. }
  684. if(emac_dma_flag_get(EMAC_DMA_OVF_FLAG) != RESET)
  685. {
  686. emac_dma_flag_clear(EMAC_DMA_OVF_FLAG);
  687. }
  688. emac_dma_flag_clear(EMAC_DMA_AIS_FLAG);
  689. }
  690. /* leave interrupt */
  691. rt_interrupt_leave();
  692. }
  693. enum {
  694. PHY_LINK = (1 << 0),
  695. PHY_10M = (1 << 1),
  696. PHY_FULLDUPLEX = (1 << 2),
  697. };
  698. static void phy_linkchange()
  699. {
  700. static rt_uint8_t phy_speed = 0;
  701. rt_uint8_t phy_speed_new = 0;
  702. rt_uint16_t status;
  703. emac_phy_register_read(phy_addr, PHY_BASIC_STATUS_REG, (uint16_t *)&status);
  704. LOG_D("phy basic status reg is 0x%X", status);
  705. if (status & (PHY_AUTONEGO_COMPLETE_MASK | PHY_LINKED_STATUS_MASK))
  706. {
  707. rt_uint16_t SR = 0;
  708. phy_speed_new |= PHY_LINK;
  709. emac_phy_register_read(phy_addr, PHY_SPECIFIED_CS_REG, (uint16_t *)&SR);
  710. LOG_D("phy control status reg is 0x%X", SR);
  711. if (SR & (PHY_SPEED_MODE))
  712. {
  713. #if defined (PHY_USING_DP83848)
  714. phy_speed_new |= PHY_10M;
  715. #endif
  716. }
  717. if (SR & (PHY_DUPLEX_MODE))
  718. {
  719. phy_speed_new |= PHY_FULLDUPLEX;
  720. }
  721. }
  722. if (phy_speed != phy_speed_new)
  723. {
  724. phy_speed = phy_speed_new;
  725. if (phy_speed & PHY_LINK)
  726. {
  727. LOG_D("link up");
  728. if (phy_speed & PHY_10M)
  729. {
  730. LOG_D("10Mbps");
  731. at32_emac_device.emac_speed = EMAC_SPEED_10MBPS;
  732. }
  733. else
  734. {
  735. at32_emac_device.emac_speed = EMAC_SPEED_100MBPS;
  736. LOG_D("100Mbps");
  737. }
  738. if (phy_speed & PHY_FULLDUPLEX)
  739. {
  740. LOG_D("full-duplex");
  741. at32_emac_device.emac_mode = EMAC_FULL_DUPLEX;
  742. }
  743. else
  744. {
  745. LOG_D("half-duplex");
  746. at32_emac_device.emac_mode = EMAC_HALF_DUPLEX;
  747. }
  748. /* send link up. */
  749. eth_device_linkchange(&at32_emac_device.parent, RT_TRUE);
  750. }
  751. else
  752. {
  753. LOG_I("link down");
  754. eth_device_linkchange(&at32_emac_device.parent, RT_FALSE);
  755. }
  756. }
  757. }
  758. #ifdef PHY_USING_INTERRUPT_MODE
  759. static void emac_phy_isr(void *args)
  760. {
  761. rt_uint32_t status = 0;
  762. emac_phy_register_read(phy_addr, PHY_INTERRUPT_FLAG_REG, (uint16_t *)&status);
  763. LOG_D("phy interrupt status reg is 0x%X", status);
  764. phy_linkchange();
  765. }
  766. #endif /* PHY_USING_INTERRUPT_MODE */
  767. static void phy_monitor_thread_entry(void *parameter)
  768. {
  769. uint8_t detected_count = 0;
  770. while(phy_addr == 0xFF)
  771. {
  772. /* phy search */
  773. rt_uint32_t i, temp;
  774. for (i = 0; i <= 0x1F; i++)
  775. {
  776. emac_phy_register_read(i, PHY_BASIC_STATUS_REG, (uint16_t *)&temp);
  777. if (temp != 0xFFFF && temp != 0x00)
  778. {
  779. phy_addr = i;
  780. break;
  781. }
  782. }
  783. detected_count++;
  784. rt_thread_mdelay(1000);
  785. if (detected_count > 10)
  786. {
  787. LOG_E("No PHY device was detected, please check hardware!");
  788. }
  789. }
  790. LOG_D("Found a phy, address:0x%02X", phy_addr);
  791. /* reset phy */
  792. LOG_D("RESET PHY!");
  793. emac_phy_register_write(phy_addr, PHY_BASIC_CONTROL_REG, PHY_RESET_MASK);
  794. rt_thread_mdelay(2000);
  795. emac_phy_register_write(phy_addr, PHY_BASIC_CONTROL_REG, PHY_AUTO_NEGOTIATION_MASK);
  796. phy_linkchange();
  797. #ifdef PHY_USING_INTERRUPT_MODE
  798. /* configuration intterrupt pin */
  799. rt_pin_mode(PHY_INT_PIN, PIN_MODE_INPUT_PULLUP);
  800. rt_pin_attach_irq(PHY_INT_PIN, PIN_IRQ_MODE_FALLING, emac_phy_isr, (void *)"callbackargs");
  801. rt_pin_irq_enable(PHY_INT_PIN, PIN_IRQ_ENABLE);
  802. /* enable phy interrupt */
  803. emac_phy_register_write(phy_addr, PHY_INTERRUPT_MASK_REG, PHY_INT_MASK);
  804. #if defined(PHY_INTERRUPT_CTRL_REG)
  805. emac_phy_register_write(phy_addr, PHY_INTERRUPT_CTRL_REG, PHY_INTERRUPT_EN);
  806. #endif
  807. #else /* PHY_USING_INTERRUPT_MODE */
  808. at32_emac_device.poll_link_timer = rt_timer_create("phylnk", (void (*)(void*))phy_linkchange,
  809. NULL, RT_TICK_PER_SECOND, RT_TIMER_FLAG_PERIODIC);
  810. if (!at32_emac_device.poll_link_timer || rt_timer_start(at32_emac_device.poll_link_timer) != RT_EOK)
  811. {
  812. LOG_E("Start link change detection timer failed");
  813. }
  814. #endif /* PHY_USING_INTERRUPT_MODE */
  815. }
  816. /* Register the EMAC device */
  817. static int rt_hw_at32_emac_init(void)
  818. {
  819. rt_err_t state = RT_EOK;
  820. /* Prepare receive and send buffers */
  821. rx_buff = (rt_uint8_t *)rt_calloc(EMAC_NUM_RX_BUF, EMAC_MAX_PACKET_LENGTH);
  822. if (rx_buff == RT_NULL)
  823. {
  824. LOG_E("No memory");
  825. state = -RT_ENOMEM;
  826. goto __exit;
  827. }
  828. tx_buff = (rt_uint8_t *)rt_calloc(EMAC_NUM_TX_BUF, EMAC_MAX_PACKET_LENGTH);
  829. if (tx_buff == RT_NULL)
  830. {
  831. LOG_E("No memory");
  832. state = -RT_ENOMEM;
  833. goto __exit;
  834. }
  835. dma_rx_dscr_tab = (emac_dma_desc_type *)rt_calloc(EMAC_NUM_RX_BUF, sizeof(emac_dma_desc_type));
  836. if (dma_rx_dscr_tab == RT_NULL)
  837. {
  838. LOG_E("No memory");
  839. state = -RT_ENOMEM;
  840. goto __exit;
  841. }
  842. dma_tx_dscr_tab = (emac_dma_desc_type *)rt_calloc(EMAC_NUM_TX_BUF, sizeof(emac_dma_desc_type));
  843. if (dma_tx_dscr_tab == RT_NULL)
  844. {
  845. LOG_E("No memory");
  846. state = -RT_ENOMEM;
  847. goto __exit;
  848. }
  849. /* phy clock */
  850. phy_clock_config();
  851. /* enable periph clock */
  852. crm_periph_clock_enable(CRM_EMAC_PERIPH_CLOCK, TRUE);
  853. crm_periph_clock_enable(CRM_EMACTX_PERIPH_CLOCK, TRUE);
  854. crm_periph_clock_enable(CRM_EMACRX_PERIPH_CLOCK, TRUE);
  855. /* interface mode */
  856. #if defined (SOC_SERIES_AT32F407)
  857. gpio_pin_remap_config(MII_RMII_SEL_GMUX, TRUE);
  858. #endif
  859. #if defined (SOC_SERIES_AT32F437) || defined (SOC_SERIES_AT32F457)
  860. scfg_emac_interface_set(SCFG_EMAC_SELECT_RMII);
  861. #endif
  862. /* emac gpio init */
  863. at32_msp_emac_init(NULL);
  864. at32_emac_device.emac_speed = EMAC_SPEED_100MBPS;
  865. at32_emac_device.emac_mode = EMAC_FULL_DUPLEX;
  866. at32_emac_device.dev_addr[0] = 0x00;
  867. at32_emac_device.dev_addr[1] = 0x66;
  868. at32_emac_device.dev_addr[2] = 0x88;
  869. /* generate mac addr from unique id (only for test). */
  870. at32_emac_device.dev_addr[3] = *(rt_uint8_t *)(0x1FFFF7E8 + 4);
  871. at32_emac_device.dev_addr[4] = *(rt_uint8_t *)(0x1FFFF7E8 + 2);
  872. at32_emac_device.dev_addr[5] = *(rt_uint8_t *)(0x1FFFF7E8 + 0);
  873. at32_emac_device.parent.parent.init = rt_at32_emac_init;
  874. at32_emac_device.parent.parent.open = rt_at32_emac_open;
  875. at32_emac_device.parent.parent.close = rt_at32_emac_close;
  876. at32_emac_device.parent.parent.read = rt_at32_emac_read;
  877. at32_emac_device.parent.parent.write = rt_at32_emac_write;
  878. at32_emac_device.parent.parent.control = rt_at32_emac_control;
  879. at32_emac_device.parent.parent.user_data = RT_NULL;
  880. at32_emac_device.parent.eth_rx = rt_at32_emac_rx;
  881. at32_emac_device.parent.eth_tx = rt_at32_emac_tx;
  882. rx_frame.g_seg_count = 0;
  883. /* reset phy */
  884. phy_reset();
  885. /* start phy monitor */
  886. rt_thread_t tid;
  887. tid = rt_thread_create("phy",
  888. phy_monitor_thread_entry,
  889. RT_NULL,
  890. 1024,
  891. RT_THREAD_PRIORITY_MAX - 2,
  892. 2);
  893. if (tid != RT_NULL)
  894. {
  895. rt_thread_startup(tid);
  896. }
  897. else
  898. {
  899. state = -RT_ERROR;
  900. }
  901. /* register eth device */
  902. state = eth_device_init(&(at32_emac_device.parent), "e0");
  903. if (RT_EOK == state)
  904. {
  905. LOG_D("emac device init success");
  906. }
  907. else
  908. {
  909. LOG_E("emac device init faild: %d", state);
  910. state = -RT_ERROR;
  911. goto __exit;
  912. }
  913. __exit:
  914. if (state != RT_EOK)
  915. {
  916. if (rx_buff)
  917. {
  918. rt_free(rx_buff);
  919. }
  920. if (tx_buff)
  921. {
  922. rt_free(tx_buff);
  923. }
  924. if (dma_rx_dscr_tab)
  925. {
  926. rt_free(dma_rx_dscr_tab);
  927. }
  928. if (dma_tx_dscr_tab)
  929. {
  930. rt_free(dma_tx_dscr_tab);
  931. }
  932. }
  933. return state;
  934. }
  935. INIT_DEVICE_EXPORT(rt_hw_at32_emac_init);