dm_qe_uec.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * QE UEC ethernet controller driver
  4. *
  5. * based on drivers/qe/uec.c from NXP
  6. *
  7. * Copyright (C) 2020 Heiko Schocher <hs@denx.de>
  8. */
  9. #include <common.h>
  10. #include <dm.h>
  11. #include <errno.h>
  12. #include <memalign.h>
  13. #include <miiphy.h>
  14. #include <asm/global_data.h>
  15. #include <asm/io.h>
  16. #include "dm_qe_uec.h"
  17. #define QE_UEC_DRIVER_NAME "ucc_geth"
  18. /* Default UTBIPAR SMI address */
  19. #ifndef CONFIG_UTBIPAR_INIT_TBIPA
  20. #define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
  21. #endif
  22. static int uec_mac_enable(struct uec_priv *uec, comm_dir_e mode)
  23. {
  24. uec_t *uec_regs;
  25. u32 maccfg1;
  26. uec_regs = uec->uec_regs;
  27. maccfg1 = in_be32(&uec_regs->maccfg1);
  28. if (mode & COMM_DIR_TX) {
  29. maccfg1 |= MACCFG1_ENABLE_TX;
  30. out_be32(&uec_regs->maccfg1, maccfg1);
  31. uec->mac_tx_enabled = 1;
  32. }
  33. if (mode & COMM_DIR_RX) {
  34. maccfg1 |= MACCFG1_ENABLE_RX;
  35. out_be32(&uec_regs->maccfg1, maccfg1);
  36. uec->mac_rx_enabled = 1;
  37. }
  38. return 0;
  39. }
  40. static int uec_mac_disable(struct uec_priv *uec, comm_dir_e mode)
  41. {
  42. uec_t *uec_regs;
  43. u32 maccfg1;
  44. uec_regs = uec->uec_regs;
  45. maccfg1 = in_be32(&uec_regs->maccfg1);
  46. if (mode & COMM_DIR_TX) {
  47. maccfg1 &= ~MACCFG1_ENABLE_TX;
  48. out_be32(&uec_regs->maccfg1, maccfg1);
  49. uec->mac_tx_enabled = 0;
  50. }
  51. if (mode & COMM_DIR_RX) {
  52. maccfg1 &= ~MACCFG1_ENABLE_RX;
  53. out_be32(&uec_regs->maccfg1, maccfg1);
  54. uec->mac_rx_enabled = 0;
  55. }
  56. return 0;
  57. }
  58. static int uec_restart_tx(struct uec_priv *uec)
  59. {
  60. struct uec_inf *ui = uec->uec_info;
  61. u32 cecr_subblock;
  62. cecr_subblock = ucc_fast_get_qe_cr_subblock(ui->uf_info.ucc_num);
  63. qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
  64. (u8)QE_CR_PROTOCOL_ETHERNET, 0);
  65. uec->grace_stopped_tx = 0;
  66. return 0;
  67. }
  68. static int uec_restart_rx(struct uec_priv *uec)
  69. {
  70. struct uec_inf *ui = uec->uec_info;
  71. u32 cecr_subblock;
  72. cecr_subblock = ucc_fast_get_qe_cr_subblock(ui->uf_info.ucc_num);
  73. qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
  74. (u8)QE_CR_PROTOCOL_ETHERNET, 0);
  75. uec->grace_stopped_rx = 0;
  76. return 0;
  77. }
  78. static int uec_open(struct uec_priv *uec, comm_dir_e mode)
  79. {
  80. struct ucc_fast_priv *uccf;
  81. uccf = uec->uccf;
  82. /* check if the UCC number is in range. */
  83. if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  84. printf("%s: ucc_num out of range.\n", __func__);
  85. return -EINVAL;
  86. }
  87. /* Enable MAC */
  88. uec_mac_enable(uec, mode);
  89. /* Enable UCC fast */
  90. ucc_fast_enable(uccf, mode);
  91. /* RISC microcode start */
  92. if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx)
  93. uec_restart_tx(uec);
  94. if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx)
  95. uec_restart_rx(uec);
  96. return 0;
  97. }
  98. static int uec_set_mac_if_mode(struct uec_priv *uec)
  99. {
  100. struct uec_inf *uec_info = uec->uec_info;
  101. phy_interface_t enet_if_mode;
  102. uec_t *uec_regs;
  103. u32 upsmr;
  104. u32 maccfg2;
  105. uec_regs = uec->uec_regs;
  106. enet_if_mode = uec_info->enet_interface_type;
  107. maccfg2 = in_be32(&uec_regs->maccfg2);
  108. maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
  109. upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
  110. upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
  111. switch (uec_info->speed) {
  112. case SPEED_10:
  113. maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
  114. switch (enet_if_mode) {
  115. case PHY_INTERFACE_MODE_MII:
  116. break;
  117. case PHY_INTERFACE_MODE_RGMII:
  118. upsmr |= (UPSMR_RPM | UPSMR_R10M);
  119. break;
  120. case PHY_INTERFACE_MODE_RMII:
  121. upsmr |= (UPSMR_R10M | UPSMR_RMM);
  122. break;
  123. default:
  124. return -EINVAL;
  125. }
  126. break;
  127. case SPEED_100:
  128. maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
  129. switch (enet_if_mode) {
  130. case PHY_INTERFACE_MODE_MII:
  131. break;
  132. case PHY_INTERFACE_MODE_RGMII:
  133. upsmr |= UPSMR_RPM;
  134. break;
  135. case PHY_INTERFACE_MODE_RMII:
  136. upsmr |= UPSMR_RMM;
  137. break;
  138. default:
  139. return -EINVAL;
  140. }
  141. break;
  142. case SPEED_1000:
  143. maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
  144. switch (enet_if_mode) {
  145. case PHY_INTERFACE_MODE_GMII:
  146. break;
  147. case PHY_INTERFACE_MODE_TBI:
  148. upsmr |= UPSMR_TBIM;
  149. break;
  150. case PHY_INTERFACE_MODE_RTBI:
  151. upsmr |= (UPSMR_RPM | UPSMR_TBIM);
  152. break;
  153. case PHY_INTERFACE_MODE_RGMII_RXID:
  154. case PHY_INTERFACE_MODE_RGMII_TXID:
  155. case PHY_INTERFACE_MODE_RGMII_ID:
  156. case PHY_INTERFACE_MODE_RGMII:
  157. upsmr |= UPSMR_RPM;
  158. break;
  159. case PHY_INTERFACE_MODE_SGMII:
  160. upsmr |= UPSMR_SGMM;
  161. break;
  162. default:
  163. return -EINVAL;
  164. }
  165. break;
  166. default:
  167. return -EINVAL;
  168. }
  169. out_be32(&uec_regs->maccfg2, maccfg2);
  170. out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
  171. return 0;
  172. }
  173. static int qe_uec_start(struct udevice *dev)
  174. {
  175. struct qe_uec_priv *priv = dev_get_priv(dev);
  176. struct uec_priv *uec = priv->uec;
  177. struct phy_device *phydev = priv->phydev;
  178. struct uec_inf *uec_info = uec->uec_info;
  179. int err;
  180. if (!phydev)
  181. return -ENODEV;
  182. /* Setup MAC interface mode */
  183. genphy_update_link(phydev);
  184. genphy_parse_link(phydev);
  185. uec_info->speed = phydev->speed;
  186. uec_set_mac_if_mode(uec);
  187. err = uec_open(uec, COMM_DIR_RX_AND_TX);
  188. if (err) {
  189. printf("%s: cannot enable UEC device\n", dev->name);
  190. return -EINVAL;
  191. }
  192. return (phydev->link ? 0 : -EINVAL);
  193. }
  194. static int qe_uec_send(struct udevice *dev, void *packet, int length)
  195. {
  196. struct qe_uec_priv *priv = dev_get_priv(dev);
  197. struct uec_priv *uec = priv->uec;
  198. struct ucc_fast_priv *uccf = uec->uccf;
  199. struct buffer_descriptor *bd;
  200. u16 status;
  201. int i;
  202. int result = 0;
  203. uccf = uec->uccf;
  204. bd = uec->tx_bd;
  205. /* Find an empty TxBD */
  206. for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
  207. if (i > 0x100000) {
  208. printf("%s: tx buffer not ready\n", dev->name);
  209. return result;
  210. }
  211. }
  212. /* Init TxBD */
  213. BD_DATA_SET(bd, packet);
  214. BD_LENGTH_SET(bd, length);
  215. status = BD_STATUS(bd);
  216. status &= BD_WRAP;
  217. status |= (TX_BD_READY | TX_BD_LAST);
  218. BD_STATUS_SET(bd, status);
  219. /* Tell UCC to transmit the buffer */
  220. ucc_fast_transmit_on_demand(uccf);
  221. /* Wait for buffer to be transmitted */
  222. for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
  223. if (i > 0x100000) {
  224. printf("%s: tx error\n", dev->name);
  225. return result;
  226. }
  227. }
  228. /* Ok, the buffer be transimitted */
  229. BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
  230. uec->tx_bd = bd;
  231. result = 1;
  232. return result;
  233. }
  234. /*
  235. * Receive frame:
  236. * - wait for the next BD to get ready bit set
  237. * - clean up the descriptor
  238. * - move on and indicate to HW that the cleaned BD is available for Rx
  239. */
  240. static int qe_uec_recv(struct udevice *dev, int flags, uchar **packetp)
  241. {
  242. struct qe_uec_priv *priv = dev_get_priv(dev);
  243. struct uec_priv *uec = priv->uec;
  244. struct buffer_descriptor *bd;
  245. u16 status;
  246. u16 len = 0;
  247. u8 *data;
  248. *packetp = memalign(ARCH_DMA_MINALIGN, MAX_RXBUF_LEN);
  249. if (*packetp == 0) {
  250. printf("%s: error allocating packetp\n", __func__);
  251. return -ENOMEM;
  252. }
  253. bd = uec->rx_bd;
  254. status = BD_STATUS(bd);
  255. while (!(status & RX_BD_EMPTY)) {
  256. if (!(status & RX_BD_ERROR)) {
  257. data = BD_DATA(bd);
  258. len = BD_LENGTH(bd);
  259. memcpy(*packetp, (char *)data, len);
  260. } else {
  261. printf("%s: Rx error\n", dev->name);
  262. }
  263. status &= BD_CLEAN;
  264. BD_LENGTH_SET(bd, 0);
  265. BD_STATUS_SET(bd, status | RX_BD_EMPTY);
  266. BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
  267. status = BD_STATUS(bd);
  268. }
  269. uec->rx_bd = bd;
  270. return len;
  271. }
  272. static int uec_graceful_stop_tx(struct uec_priv *uec)
  273. {
  274. ucc_fast_t *uf_regs;
  275. u32 cecr_subblock;
  276. u32 ucce;
  277. uf_regs = uec->uccf->uf_regs;
  278. /* Clear the grace stop event */
  279. out_be32(&uf_regs->ucce, UCCE_GRA);
  280. /* Issue host command */
  281. cecr_subblock =
  282. ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
  283. qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
  284. (u8)QE_CR_PROTOCOL_ETHERNET, 0);
  285. /* Wait for command to complete */
  286. do {
  287. ucce = in_be32(&uf_regs->ucce);
  288. } while (!(ucce & UCCE_GRA));
  289. uec->grace_stopped_tx = 1;
  290. return 0;
  291. }
  292. static int uec_graceful_stop_rx(struct uec_priv *uec)
  293. {
  294. u32 cecr_subblock;
  295. u8 ack;
  296. if (!uec->p_rx_glbl_pram) {
  297. printf("%s: No init rx global parameter\n", __func__);
  298. return -EINVAL;
  299. }
  300. /* Clear acknowledge bit */
  301. ack = uec->p_rx_glbl_pram->rxgstpack;
  302. ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
  303. uec->p_rx_glbl_pram->rxgstpack = ack;
  304. /* Keep issuing cmd and checking ack bit until it is asserted */
  305. do {
  306. /* Issue host command */
  307. cecr_subblock =
  308. ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
  309. qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
  310. (u8)QE_CR_PROTOCOL_ETHERNET, 0);
  311. ack = uec->p_rx_glbl_pram->rxgstpack;
  312. } while (!(ack & GRACEFUL_STOP_ACKNOWLEDGE_RX));
  313. uec->grace_stopped_rx = 1;
  314. return 0;
  315. }
  316. static int uec_stop(struct uec_priv *uec, comm_dir_e mode)
  317. {
  318. /* check if the UCC number is in range. */
  319. if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  320. printf("%s: ucc_num out of range.\n", __func__);
  321. return -EINVAL;
  322. }
  323. /* Stop any transmissions */
  324. if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx)
  325. uec_graceful_stop_tx(uec);
  326. /* Stop any receptions */
  327. if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx)
  328. uec_graceful_stop_rx(uec);
  329. /* Disable the UCC fast */
  330. ucc_fast_disable(uec->uccf, mode);
  331. /* Disable the MAC */
  332. uec_mac_disable(uec, mode);
  333. return 0;
  334. }
  335. static void qe_uec_stop(struct udevice *dev)
  336. {
  337. struct qe_uec_priv *priv = dev_get_priv(dev);
  338. struct uec_priv *uec = priv->uec;
  339. uec_stop(uec, COMM_DIR_RX_AND_TX);
  340. }
  341. static int qe_uec_set_hwaddr(struct udevice *dev)
  342. {
  343. struct qe_uec_priv *priv = dev_get_priv(dev);
  344. struct eth_pdata *pdata = dev_get_plat(dev);
  345. struct uec_priv *uec = priv->uec;
  346. uec_t *uec_regs = uec->uec_regs;
  347. uchar *mac = pdata->enetaddr;
  348. u32 mac_addr1;
  349. u32 mac_addr2;
  350. /*
  351. * if a station address of 0x12345678ABCD, perform a write to
  352. * MACSTNADDR1 of 0xCDAB7856,
  353. * MACSTNADDR2 of 0x34120000
  354. */
  355. mac_addr1 = (mac[5] << 24) | (mac[4] << 16) |
  356. (mac[3] << 8) | (mac[2]);
  357. out_be32(&uec_regs->macstnaddr1, mac_addr1);
  358. mac_addr2 = ((mac[1] << 24) | (mac[0] << 16)) & 0xffff0000;
  359. out_be32(&uec_regs->macstnaddr2, mac_addr2);
  360. return 0;
  361. }
  362. static int qe_uec_free_pkt(struct udevice *dev, uchar *packet, int length)
  363. {
  364. if (packet)
  365. free(packet);
  366. return 0;
  367. }
  368. static const struct eth_ops qe_uec_eth_ops = {
  369. .start = qe_uec_start,
  370. .send = qe_uec_send,
  371. .recv = qe_uec_recv,
  372. .free_pkt = qe_uec_free_pkt,
  373. .stop = qe_uec_stop,
  374. .write_hwaddr = qe_uec_set_hwaddr,
  375. };
  376. static int uec_convert_threads_num(enum uec_num_of_threads threads_num,
  377. int *threads_num_ret)
  378. {
  379. int num_threads_numerica;
  380. switch (threads_num) {
  381. case UEC_NUM_OF_THREADS_1:
  382. num_threads_numerica = 1;
  383. break;
  384. case UEC_NUM_OF_THREADS_2:
  385. num_threads_numerica = 2;
  386. break;
  387. case UEC_NUM_OF_THREADS_4:
  388. num_threads_numerica = 4;
  389. break;
  390. case UEC_NUM_OF_THREADS_6:
  391. num_threads_numerica = 6;
  392. break;
  393. case UEC_NUM_OF_THREADS_8:
  394. num_threads_numerica = 8;
  395. break;
  396. default:
  397. printf("%s: Bad number of threads value.",
  398. __func__);
  399. return -EINVAL;
  400. }
  401. *threads_num_ret = num_threads_numerica;
  402. return 0;
  403. }
  404. static void uec_init_tx_parameter(struct uec_priv *uec, int num_threads_tx)
  405. {
  406. struct uec_inf *uec_info;
  407. u32 end_bd;
  408. u8 bmrx = 0;
  409. int i;
  410. uec_info = uec->uec_info;
  411. /* Alloc global Tx parameter RAM page */
  412. uec->tx_glbl_pram_offset =
  413. qe_muram_alloc(sizeof(struct uec_tx_global_pram),
  414. UEC_TX_GLOBAL_PRAM_ALIGNMENT);
  415. uec->p_tx_glbl_pram = (struct uec_tx_global_pram *)
  416. qe_muram_addr(uec->tx_glbl_pram_offset);
  417. /* Zero the global Tx prameter RAM */
  418. memset(uec->p_tx_glbl_pram, 0, sizeof(struct uec_tx_global_pram));
  419. /* Init global Tx parameter RAM */
  420. /* TEMODER, RMON statistics disable, one Tx queue */
  421. out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
  422. /* SQPTR */
  423. uec->send_q_mem_reg_offset =
  424. qe_muram_alloc(sizeof(struct uec_send_queue_qd),
  425. UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
  426. uec->p_send_q_mem_reg = (struct uec_send_queue_mem_region *)
  427. qe_muram_addr(uec->send_q_mem_reg_offset);
  428. out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
  429. /* Setup the table with TxBDs ring */
  430. end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
  431. * SIZEOFBD;
  432. out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
  433. (u32)(uec->p_tx_bd_ring));
  434. out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
  435. end_bd);
  436. /* Scheduler Base Pointer, we have only one Tx queue, no need it */
  437. out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
  438. /* TxRMON Base Pointer, TxRMON disable, we don't need it */
  439. out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
  440. /* TSTATE, global snooping, big endian, the CSB bus selected */
  441. bmrx = BMR_INIT_VALUE;
  442. out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
  443. /* IPH_Offset */
  444. for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++)
  445. out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
  446. /* VTAG table */
  447. for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++)
  448. out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
  449. /* TQPTR */
  450. uec->thread_dat_tx_offset =
  451. qe_muram_alloc(num_threads_tx *
  452. sizeof(struct uec_thread_data_tx) +
  453. 32 * (num_threads_tx == 1),
  454. UEC_THREAD_DATA_ALIGNMENT);
  455. uec->p_thread_data_tx = (struct uec_thread_data_tx *)
  456. qe_muram_addr(uec->thread_dat_tx_offset);
  457. out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
  458. }
  459. static void uec_init_rx_parameter(struct uec_priv *uec, int num_threads_rx)
  460. {
  461. u8 bmrx = 0;
  462. int i;
  463. struct uec_82xx_add_filtering_pram *p_af_pram;
  464. /* Allocate global Rx parameter RAM page */
  465. uec->rx_glbl_pram_offset =
  466. qe_muram_alloc(sizeof(struct uec_rx_global_pram),
  467. UEC_RX_GLOBAL_PRAM_ALIGNMENT);
  468. uec->p_rx_glbl_pram = (struct uec_rx_global_pram *)
  469. qe_muram_addr(uec->rx_glbl_pram_offset);
  470. /* Zero Global Rx parameter RAM */
  471. memset(uec->p_rx_glbl_pram, 0, sizeof(struct uec_rx_global_pram));
  472. /* Init global Rx parameter RAM */
  473. /*
  474. * REMODER, Extended feature mode disable, VLAN disable,
  475. * LossLess flow control disable, Receive firmware statisic disable,
  476. * Extended address parsing mode disable, One Rx queues,
  477. * Dynamic maximum/minimum frame length disable, IP checksum check
  478. * disable, IP address alignment disable
  479. */
  480. out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
  481. /* RQPTR */
  482. uec->thread_dat_rx_offset =
  483. qe_muram_alloc(num_threads_rx *
  484. sizeof(struct uec_thread_data_rx),
  485. UEC_THREAD_DATA_ALIGNMENT);
  486. uec->p_thread_data_rx = (struct uec_thread_data_rx *)
  487. qe_muram_addr(uec->thread_dat_rx_offset);
  488. out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
  489. /* Type_or_Len */
  490. out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
  491. /* RxRMON base pointer, we don't need it */
  492. out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
  493. /* IntCoalescingPTR, we don't need it, no interrupt */
  494. out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
  495. /* RSTATE, global snooping, big endian, the CSB bus selected */
  496. bmrx = BMR_INIT_VALUE;
  497. out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
  498. /* MRBLR */
  499. out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
  500. /* RBDQPTR */
  501. uec->rx_bd_qs_tbl_offset =
  502. qe_muram_alloc(sizeof(struct uec_rx_bd_queues_entry) +
  503. sizeof(struct uec_rx_pref_bds),
  504. UEC_RX_BD_QUEUES_ALIGNMENT);
  505. uec->p_rx_bd_qs_tbl = (struct uec_rx_bd_queues_entry *)
  506. qe_muram_addr(uec->rx_bd_qs_tbl_offset);
  507. /* Zero it */
  508. memset(uec->p_rx_bd_qs_tbl, 0, sizeof(struct uec_rx_bd_queues_entry) +
  509. sizeof(struct uec_rx_pref_bds));
  510. out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
  511. out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
  512. (u32)uec->p_rx_bd_ring);
  513. /* MFLR */
  514. out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
  515. /* MINFLR */
  516. out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
  517. /* MAXD1 */
  518. out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
  519. /* MAXD2 */
  520. out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
  521. /* ECAM_PTR */
  522. out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
  523. /* L2QT */
  524. out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
  525. /* L3QT */
  526. for (i = 0; i < 8; i++)
  527. out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
  528. /* VLAN_TYPE */
  529. out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
  530. /* TCI */
  531. out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
  532. /* Clear PQ2 style address filtering hash table */
  533. p_af_pram = (struct uec_82xx_add_filtering_pram *)
  534. uec->p_rx_glbl_pram->addressfiltering;
  535. p_af_pram->iaddr_h = 0;
  536. p_af_pram->iaddr_l = 0;
  537. p_af_pram->gaddr_h = 0;
  538. p_af_pram->gaddr_l = 0;
  539. }
  540. static int uec_issue_init_enet_rxtx_cmd(struct uec_priv *uec,
  541. int thread_tx, int thread_rx)
  542. {
  543. struct uec_init_cmd_pram *p_init_enet_param;
  544. u32 init_enet_param_offset;
  545. struct uec_inf *uec_info;
  546. struct ucc_fast_inf *uf_info;
  547. int i;
  548. int snum;
  549. u32 off;
  550. u32 entry_val;
  551. u32 command;
  552. u32 cecr_subblock;
  553. uec_info = uec->uec_info;
  554. uf_info = &uec_info->uf_info;
  555. /* Allocate init enet command parameter */
  556. uec->init_enet_param_offset =
  557. qe_muram_alloc(sizeof(struct uec_init_cmd_pram), 4);
  558. init_enet_param_offset = uec->init_enet_param_offset;
  559. uec->p_init_enet_param = (struct uec_init_cmd_pram *)
  560. qe_muram_addr(uec->init_enet_param_offset);
  561. /* Zero init enet command struct */
  562. memset((void *)uec->p_init_enet_param, 0,
  563. sizeof(struct uec_init_cmd_pram));
  564. /* Init the command struct */
  565. p_init_enet_param = uec->p_init_enet_param;
  566. p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
  567. p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
  568. p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
  569. p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
  570. p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
  571. p_init_enet_param->largestexternallookupkeysize = 0;
  572. p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
  573. << ENET_INIT_PARAM_RGF_SHIFT;
  574. p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
  575. << ENET_INIT_PARAM_TGF_SHIFT;
  576. /* Init Rx global parameter pointer */
  577. p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
  578. (u32)uec_info->risc_rx;
  579. /* Init Rx threads */
  580. for (i = 0; i < (thread_rx + 1); i++) {
  581. snum = qe_get_snum();
  582. if (snum < 0) {
  583. printf("%s can not get snum\n", __func__);
  584. return -ENOMEM;
  585. }
  586. if (i == 0) {
  587. off = 0;
  588. } else {
  589. off = qe_muram_alloc(sizeof(struct uec_thread_rx_pram),
  590. UEC_THREAD_RX_PRAM_ALIGNMENT);
  591. }
  592. entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
  593. off | (u32)uec_info->risc_rx;
  594. p_init_enet_param->rxthread[i] = entry_val;
  595. }
  596. /* Init Tx global parameter pointer */
  597. p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
  598. (u32)uec_info->risc_tx;
  599. /* Init Tx threads */
  600. for (i = 0; i < thread_tx; i++) {
  601. snum = qe_get_snum();
  602. if (snum < 0) {
  603. printf("%s can not get snum\n", __func__);
  604. return -ENOMEM;
  605. }
  606. off = qe_muram_alloc(sizeof(struct uec_thread_tx_pram),
  607. UEC_THREAD_TX_PRAM_ALIGNMENT);
  608. entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
  609. off | (u32)uec_info->risc_tx;
  610. p_init_enet_param->txthread[i] = entry_val;
  611. }
  612. __asm__ __volatile__("sync");
  613. /* Issue QE command */
  614. command = QE_INIT_TX_RX;
  615. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  616. qe_issue_cmd(command, cecr_subblock, (u8)QE_CR_PROTOCOL_ETHERNET,
  617. init_enet_param_offset);
  618. return 0;
  619. }
  620. static int uec_startup(struct udevice *dev)
  621. {
  622. struct qe_uec_priv *priv = dev_get_priv(dev);
  623. struct uec_priv *uec = priv->uec;
  624. struct uec_inf *uec_info;
  625. struct ucc_fast_inf *uf_info;
  626. struct ucc_fast_priv *uccf;
  627. ucc_fast_t *uf_regs;
  628. uec_t *uec_regs;
  629. int num_threads_tx;
  630. int num_threads_rx;
  631. u32 utbipar;
  632. u32 length;
  633. u32 align;
  634. struct buffer_descriptor *bd;
  635. u8 *buf;
  636. int i;
  637. uec_info = uec->uec_info;
  638. uf_info = &uec_info->uf_info;
  639. /* Check if Rx BD ring len is illegal */
  640. if (uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN ||
  641. uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT) {
  642. printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
  643. __func__);
  644. return -EINVAL;
  645. }
  646. /* Check if Tx BD ring len is illegal */
  647. if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
  648. printf("%s: Tx BD ring length must not be smaller than 2.\n",
  649. __func__);
  650. return -EINVAL;
  651. }
  652. /* Check if MRBLR is illegal */
  653. if (MAX_RXBUF_LEN == 0 || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) {
  654. printf("%s: max rx buffer length must be mutliple of 128.\n",
  655. __func__);
  656. return -EINVAL;
  657. }
  658. /* Both Rx and Tx are stopped */
  659. uec->grace_stopped_rx = 1;
  660. uec->grace_stopped_tx = 1;
  661. /* Init UCC fast */
  662. if (ucc_fast_init(uf_info, &uccf)) {
  663. printf("%s: failed to init ucc fast\n", __func__);
  664. return -ENOMEM;
  665. }
  666. /* Save uccf */
  667. uec->uccf = uccf;
  668. /* Convert the Tx threads number */
  669. if (uec_convert_threads_num(uec_info->num_threads_tx,
  670. &num_threads_tx))
  671. return -EINVAL;
  672. /* Convert the Rx threads number */
  673. if (uec_convert_threads_num(uec_info->num_threads_rx,
  674. &num_threads_rx))
  675. return -EINVAL;
  676. uf_regs = uccf->uf_regs;
  677. /* UEC register is following UCC fast registers */
  678. uec_regs = (uec_t *)(&uf_regs->ucc_eth);
  679. /* Save the UEC register pointer to UEC private struct */
  680. uec->uec_regs = uec_regs;
  681. /* Init UPSMR, enable hardware statistics (UCC) */
  682. out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
  683. /* Init MACCFG1, flow control disable, disable Tx and Rx */
  684. out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
  685. /* Init MACCFG2, length check, MAC PAD and CRC enable */
  686. out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
  687. /* Setup UTBIPAR */
  688. utbipar = in_be32(&uec_regs->utbipar);
  689. utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
  690. /* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
  691. * This frees up the remaining SMI addresses for use.
  692. */
  693. utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
  694. out_be32(&uec_regs->utbipar, utbipar);
  695. /* Allocate Tx BDs */
  696. length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
  697. UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
  698. UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  699. if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
  700. UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
  701. length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  702. align = UEC_TX_BD_RING_ALIGNMENT;
  703. uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
  704. if (uec->tx_bd_ring_offset != 0)
  705. uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
  706. & ~(align - 1));
  707. /* Zero all of Tx BDs */
  708. memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
  709. /* Allocate Rx BDs */
  710. length = uec_info->rx_bd_ring_len * SIZEOFBD;
  711. align = UEC_RX_BD_RING_ALIGNMENT;
  712. uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
  713. if (uec->rx_bd_ring_offset != 0)
  714. uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
  715. & ~(align - 1));
  716. /* Zero all of Rx BDs */
  717. memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
  718. /* Allocate Rx buffer */
  719. length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
  720. align = UEC_RX_DATA_BUF_ALIGNMENT;
  721. uec->rx_buf_offset = (u32)malloc(length + align);
  722. if (uec->rx_buf_offset != 0)
  723. uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
  724. & ~(align - 1));
  725. /* Zero all of the Rx buffer */
  726. memset((void *)(uec->rx_buf_offset), 0, length + align);
  727. /* Init TxBD ring */
  728. bd = (struct buffer_descriptor *)uec->p_tx_bd_ring;
  729. uec->tx_bd = bd;
  730. for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
  731. BD_DATA_CLEAR(bd);
  732. BD_STATUS_SET(bd, 0);
  733. BD_LENGTH_SET(bd, 0);
  734. bd++;
  735. }
  736. BD_STATUS_SET((--bd), TX_BD_WRAP);
  737. /* Init RxBD ring */
  738. bd = (struct buffer_descriptor *)uec->p_rx_bd_ring;
  739. uec->rx_bd = bd;
  740. buf = uec->p_rx_buf;
  741. for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
  742. BD_DATA_SET(bd, buf);
  743. BD_LENGTH_SET(bd, 0);
  744. BD_STATUS_SET(bd, RX_BD_EMPTY);
  745. buf += MAX_RXBUF_LEN;
  746. bd++;
  747. }
  748. BD_STATUS_SET((--bd), RX_BD_WRAP | RX_BD_EMPTY);
  749. /* Init global Tx parameter RAM */
  750. uec_init_tx_parameter(uec, num_threads_tx);
  751. /* Init global Rx parameter RAM */
  752. uec_init_rx_parameter(uec, num_threads_rx);
  753. /* Init ethernet Tx and Rx parameter command */
  754. if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
  755. num_threads_rx)) {
  756. printf("%s issue init enet cmd failed\n", __func__);
  757. return -ENOMEM;
  758. }
  759. return 0;
  760. }
  761. /* Convert a string to a QE clock source enum
  762. *
  763. * This function takes a string, typically from a property in the device
  764. * tree, and returns the corresponding "enum qe_clock" value.
  765. */
  766. enum qe_clock qe_clock_source(const char *source)
  767. {
  768. unsigned int i;
  769. if (strcasecmp(source, "none") == 0)
  770. return QE_CLK_NONE;
  771. if (strncasecmp(source, "brg", 3) == 0) {
  772. i = dectoul(source + 3, NULL);
  773. if (i >= 1 && i <= 16)
  774. return (QE_BRG1 - 1) + i;
  775. else
  776. return QE_CLK_DUMMY;
  777. }
  778. if (strncasecmp(source, "clk", 3) == 0) {
  779. i = dectoul(source + 3, NULL);
  780. if (i >= 1 && i <= 24)
  781. return (QE_CLK1 - 1) + i;
  782. else
  783. return QE_CLK_DUMMY;
  784. }
  785. return QE_CLK_DUMMY;
  786. }
  787. static void qe_uec_set_eth_type(struct udevice *dev)
  788. {
  789. struct qe_uec_priv *priv = dev_get_priv(dev);
  790. struct uec_priv *uec = priv->uec;
  791. struct uec_inf *uec_info = uec->uec_info;
  792. struct ucc_fast_inf *uf_info = &uec_info->uf_info;
  793. switch (uec_info->enet_interface_type) {
  794. case PHY_INTERFACE_MODE_GMII:
  795. case PHY_INTERFACE_MODE_RGMII:
  796. case PHY_INTERFACE_MODE_RGMII_ID:
  797. case PHY_INTERFACE_MODE_RGMII_RXID:
  798. case PHY_INTERFACE_MODE_RGMII_TXID:
  799. case PHY_INTERFACE_MODE_TBI:
  800. case PHY_INTERFACE_MODE_RTBI:
  801. case PHY_INTERFACE_MODE_SGMII:
  802. uf_info->eth_type = GIGA_ETH;
  803. break;
  804. default:
  805. uf_info->eth_type = FAST_ETH;
  806. break;
  807. }
  808. }
  809. static int qe_uec_set_uec_info(struct udevice *dev)
  810. {
  811. struct qe_uec_priv *priv = dev_get_priv(dev);
  812. struct eth_pdata *pdata = dev_get_plat(dev);
  813. struct uec_priv *uec = priv->uec;
  814. struct uec_inf *uec_info;
  815. struct ucc_fast_inf *uf_info;
  816. const char *s;
  817. int ret;
  818. u32 val;
  819. uec_info = (struct uec_inf *)malloc(sizeof(struct uec_inf));
  820. if (!uec_info)
  821. return -ENOMEM;
  822. uf_info = &uec_info->uf_info;
  823. ret = dev_read_u32(dev, "cell-index", &val);
  824. if (ret) {
  825. ret = dev_read_u32(dev, "device-id", &val);
  826. if (ret) {
  827. pr_err("no cell-index nor device-id found!");
  828. goto out;
  829. }
  830. }
  831. uf_info->ucc_num = val - 1;
  832. if (uf_info->ucc_num < 0 || uf_info->ucc_num > 7) {
  833. ret = -ENODEV;
  834. goto out;
  835. }
  836. ret = dev_read_string_index(dev, "rx-clock-name", 0, &s);
  837. if (!ret) {
  838. uf_info->rx_clock = qe_clock_source(s);
  839. if (uf_info->rx_clock < QE_CLK_NONE ||
  840. uf_info->rx_clock > QE_CLK24) {
  841. pr_err("invalid rx-clock-name property\n");
  842. ret = -EINVAL;
  843. goto out;
  844. }
  845. } else {
  846. ret = dev_read_u32(dev, "rx-clock", &val);
  847. if (ret) {
  848. /*
  849. * If both rx-clock-name and rx-clock are missing,
  850. * we want to tell people to use rx-clock-name.
  851. */
  852. pr_err("missing rx-clock-name property\n");
  853. goto out;
  854. }
  855. if (val < QE_CLK_NONE || val > QE_CLK24) {
  856. pr_err("invalid rx-clock property\n");
  857. ret = -EINVAL;
  858. goto out;
  859. }
  860. uf_info->rx_clock = val;
  861. }
  862. ret = dev_read_string_index(dev, "tx-clock-name", 0, &s);
  863. if (!ret) {
  864. uf_info->tx_clock = qe_clock_source(s);
  865. if (uf_info->tx_clock < QE_CLK_NONE ||
  866. uf_info->tx_clock > QE_CLK24) {
  867. pr_err("invalid tx-clock-name property\n");
  868. ret = -EINVAL;
  869. goto out;
  870. }
  871. } else {
  872. ret = dev_read_u32(dev, "tx-clock", &val);
  873. if (ret) {
  874. pr_err("missing tx-clock-name property\n");
  875. goto out;
  876. }
  877. if (val < QE_CLK_NONE || val > QE_CLK24) {
  878. pr_err("invalid tx-clock property\n");
  879. ret = -EINVAL;
  880. goto out;
  881. }
  882. uf_info->tx_clock = val;
  883. }
  884. uec_info->num_threads_tx = UEC_NUM_OF_THREADS_1;
  885. uec_info->num_threads_rx = UEC_NUM_OF_THREADS_1;
  886. uec_info->risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2;
  887. uec_info->risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2;
  888. uec_info->tx_bd_ring_len = 16;
  889. uec_info->rx_bd_ring_len = 16;
  890. #if (MAX_QE_RISC == 4)
  891. uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
  892. uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
  893. #endif
  894. uec_info->enet_interface_type = pdata->phy_interface;
  895. uec->uec_info = uec_info;
  896. qe_uec_set_eth_type(dev);
  897. return 0;
  898. out:
  899. free(uec_info);
  900. return ret;
  901. }
  902. static int qe_uec_probe(struct udevice *dev)
  903. {
  904. struct qe_uec_priv *priv = dev_get_priv(dev);
  905. struct eth_pdata *pdata = dev_get_plat(dev);
  906. struct uec_priv *uec;
  907. int ret;
  908. /* Allocate the UEC private struct */
  909. uec = (struct uec_priv *)malloc(sizeof(struct uec_priv));
  910. if (!uec)
  911. return -ENOMEM;
  912. memset(uec, 0, sizeof(struct uec_priv));
  913. priv->uec = uec;
  914. uec->uec_regs = (uec_t *)pdata->iobase;
  915. /* setup uec info struct */
  916. ret = qe_uec_set_uec_info(dev);
  917. if (ret) {
  918. free(uec);
  919. return ret;
  920. }
  921. ret = uec_startup(dev);
  922. if (ret) {
  923. free(uec->uec_info);
  924. free(uec);
  925. return ret;
  926. }
  927. priv->phydev = dm_eth_phy_connect(dev);
  928. return 0;
  929. }
  930. /*
  931. * Remove the driver from an interface:
  932. * - free up allocated memory
  933. */
  934. static int qe_uec_remove(struct udevice *dev)
  935. {
  936. struct qe_uec_priv *priv = dev_get_priv(dev);
  937. free(priv->uec);
  938. return 0;
  939. }
  940. static int qe_uec_of_to_plat(struct udevice *dev)
  941. {
  942. struct eth_pdata *pdata = dev_get_plat(dev);
  943. const char *phy_mode;
  944. pdata->iobase = (phys_addr_t)devfdt_get_addr(dev);
  945. pdata->phy_interface = -1;
  946. phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev),
  947. "phy-connection-type", NULL);
  948. if (phy_mode)
  949. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  950. if (pdata->phy_interface == -1) {
  951. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  952. return -EINVAL;
  953. }
  954. return 0;
  955. }
  956. static const struct udevice_id qe_uec_ids[] = {
  957. { .compatible = QE_UEC_DRIVER_NAME },
  958. { }
  959. };
  960. U_BOOT_DRIVER(eth_qe_uec) = {
  961. .name = QE_UEC_DRIVER_NAME,
  962. .id = UCLASS_ETH,
  963. .of_match = qe_uec_ids,
  964. .of_to_plat = qe_uec_of_to_plat,
  965. .probe = qe_uec_probe,
  966. .remove = qe_uec_remove,
  967. .ops = &qe_uec_eth_ops,
  968. .priv_auto = sizeof(struct qe_uec_priv),
  969. .plat_auto = sizeof(struct eth_pdata),
  970. };