dm_qe_uec.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * QE UEC ethernet controller driver
  4. *
  5. * based on drivers/qe/uec.c from NXP
  6. *
  7. * Copyright (C) 2020 Heiko Schocher <hs@denx.de>
  8. */
  9. #include <common.h>
  10. #include <dm.h>
  11. #include <errno.h>
  12. #include <memalign.h>
  13. #include <miiphy.h>
  14. #include <asm/io.h>
  15. #include "dm_qe_uec.h"
  16. #define QE_UEC_DRIVER_NAME "ucc_geth"
  17. /* Default UTBIPAR SMI address */
  18. #ifndef CONFIG_UTBIPAR_INIT_TBIPA
  19. #define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
  20. #endif
  21. static int uec_mac_enable(struct uec_priv *uec, comm_dir_e mode)
  22. {
  23. uec_t *uec_regs;
  24. u32 maccfg1;
  25. uec_regs = uec->uec_regs;
  26. maccfg1 = in_be32(&uec_regs->maccfg1);
  27. if (mode & COMM_DIR_TX) {
  28. maccfg1 |= MACCFG1_ENABLE_TX;
  29. out_be32(&uec_regs->maccfg1, maccfg1);
  30. uec->mac_tx_enabled = 1;
  31. }
  32. if (mode & COMM_DIR_RX) {
  33. maccfg1 |= MACCFG1_ENABLE_RX;
  34. out_be32(&uec_regs->maccfg1, maccfg1);
  35. uec->mac_rx_enabled = 1;
  36. }
  37. return 0;
  38. }
  39. static int uec_mac_disable(struct uec_priv *uec, comm_dir_e mode)
  40. {
  41. uec_t *uec_regs;
  42. u32 maccfg1;
  43. uec_regs = uec->uec_regs;
  44. maccfg1 = in_be32(&uec_regs->maccfg1);
  45. if (mode & COMM_DIR_TX) {
  46. maccfg1 &= ~MACCFG1_ENABLE_TX;
  47. out_be32(&uec_regs->maccfg1, maccfg1);
  48. uec->mac_tx_enabled = 0;
  49. }
  50. if (mode & COMM_DIR_RX) {
  51. maccfg1 &= ~MACCFG1_ENABLE_RX;
  52. out_be32(&uec_regs->maccfg1, maccfg1);
  53. uec->mac_rx_enabled = 0;
  54. }
  55. return 0;
  56. }
  57. static int uec_restart_tx(struct uec_priv *uec)
  58. {
  59. struct uec_inf *ui = uec->uec_info;
  60. u32 cecr_subblock;
  61. cecr_subblock = ucc_fast_get_qe_cr_subblock(ui->uf_info.ucc_num);
  62. qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
  63. (u8)QE_CR_PROTOCOL_ETHERNET, 0);
  64. uec->grace_stopped_tx = 0;
  65. return 0;
  66. }
  67. static int uec_restart_rx(struct uec_priv *uec)
  68. {
  69. struct uec_inf *ui = uec->uec_info;
  70. u32 cecr_subblock;
  71. cecr_subblock = ucc_fast_get_qe_cr_subblock(ui->uf_info.ucc_num);
  72. qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
  73. (u8)QE_CR_PROTOCOL_ETHERNET, 0);
  74. uec->grace_stopped_rx = 0;
  75. return 0;
  76. }
  77. static int uec_open(struct uec_priv *uec, comm_dir_e mode)
  78. {
  79. struct ucc_fast_priv *uccf;
  80. uccf = uec->uccf;
  81. /* check if the UCC number is in range. */
  82. if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  83. printf("%s: ucc_num out of range.\n", __func__);
  84. return -EINVAL;
  85. }
  86. /* Enable MAC */
  87. uec_mac_enable(uec, mode);
  88. /* Enable UCC fast */
  89. ucc_fast_enable(uccf, mode);
  90. /* RISC microcode start */
  91. if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx)
  92. uec_restart_tx(uec);
  93. if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx)
  94. uec_restart_rx(uec);
  95. return 0;
  96. }
  97. static int uec_set_mac_if_mode(struct uec_priv *uec)
  98. {
  99. struct uec_inf *uec_info = uec->uec_info;
  100. phy_interface_t enet_if_mode;
  101. uec_t *uec_regs;
  102. u32 upsmr;
  103. u32 maccfg2;
  104. uec_regs = uec->uec_regs;
  105. enet_if_mode = uec_info->enet_interface_type;
  106. maccfg2 = in_be32(&uec_regs->maccfg2);
  107. maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
  108. upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
  109. upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
  110. switch (uec_info->speed) {
  111. case SPEED_10:
  112. maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
  113. switch (enet_if_mode) {
  114. case PHY_INTERFACE_MODE_MII:
  115. break;
  116. case PHY_INTERFACE_MODE_RGMII:
  117. upsmr |= (UPSMR_RPM | UPSMR_R10M);
  118. break;
  119. case PHY_INTERFACE_MODE_RMII:
  120. upsmr |= (UPSMR_R10M | UPSMR_RMM);
  121. break;
  122. default:
  123. return -EINVAL;
  124. }
  125. break;
  126. case SPEED_100:
  127. maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
  128. switch (enet_if_mode) {
  129. case PHY_INTERFACE_MODE_MII:
  130. break;
  131. case PHY_INTERFACE_MODE_RGMII:
  132. upsmr |= UPSMR_RPM;
  133. break;
  134. case PHY_INTERFACE_MODE_RMII:
  135. upsmr |= UPSMR_RMM;
  136. break;
  137. default:
  138. return -EINVAL;
  139. }
  140. break;
  141. case SPEED_1000:
  142. maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
  143. switch (enet_if_mode) {
  144. case PHY_INTERFACE_MODE_GMII:
  145. break;
  146. case PHY_INTERFACE_MODE_TBI:
  147. upsmr |= UPSMR_TBIM;
  148. break;
  149. case PHY_INTERFACE_MODE_RTBI:
  150. upsmr |= (UPSMR_RPM | UPSMR_TBIM);
  151. break;
  152. case PHY_INTERFACE_MODE_RGMII_RXID:
  153. case PHY_INTERFACE_MODE_RGMII_TXID:
  154. case PHY_INTERFACE_MODE_RGMII_ID:
  155. case PHY_INTERFACE_MODE_RGMII:
  156. upsmr |= UPSMR_RPM;
  157. break;
  158. case PHY_INTERFACE_MODE_SGMII:
  159. upsmr |= UPSMR_SGMM;
  160. break;
  161. default:
  162. return -EINVAL;
  163. }
  164. break;
  165. default:
  166. return -EINVAL;
  167. }
  168. out_be32(&uec_regs->maccfg2, maccfg2);
  169. out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
  170. return 0;
  171. }
  172. static int qe_uec_start(struct udevice *dev)
  173. {
  174. struct qe_uec_priv *priv = dev_get_priv(dev);
  175. struct uec_priv *uec = priv->uec;
  176. struct phy_device *phydev = priv->phydev;
  177. struct uec_inf *uec_info = uec->uec_info;
  178. int err;
  179. if (!phydev)
  180. return -ENODEV;
  181. /* Setup MAC interface mode */
  182. genphy_update_link(phydev);
  183. genphy_parse_link(phydev);
  184. uec_info->speed = phydev->speed;
  185. uec_set_mac_if_mode(uec);
  186. err = uec_open(uec, COMM_DIR_RX_AND_TX);
  187. if (err) {
  188. printf("%s: cannot enable UEC device\n", dev->name);
  189. return -EINVAL;
  190. }
  191. return (phydev->link ? 0 : -EINVAL);
  192. }
  193. static int qe_uec_send(struct udevice *dev, void *packet, int length)
  194. {
  195. struct qe_uec_priv *priv = dev_get_priv(dev);
  196. struct uec_priv *uec = priv->uec;
  197. struct ucc_fast_priv *uccf = uec->uccf;
  198. struct buffer_descriptor *bd;
  199. u16 status;
  200. int i;
  201. int result = 0;
  202. uccf = uec->uccf;
  203. bd = uec->tx_bd;
  204. /* Find an empty TxBD */
  205. for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
  206. if (i > 0x100000) {
  207. printf("%s: tx buffer not ready\n", dev->name);
  208. return result;
  209. }
  210. }
  211. /* Init TxBD */
  212. BD_DATA_SET(bd, packet);
  213. BD_LENGTH_SET(bd, length);
  214. status = BD_STATUS(bd);
  215. status &= BD_WRAP;
  216. status |= (TX_BD_READY | TX_BD_LAST);
  217. BD_STATUS_SET(bd, status);
  218. /* Tell UCC to transmit the buffer */
  219. ucc_fast_transmit_on_demand(uccf);
  220. /* Wait for buffer to be transmitted */
  221. for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
  222. if (i > 0x100000) {
  223. printf("%s: tx error\n", dev->name);
  224. return result;
  225. }
  226. }
  227. /* Ok, the buffer be transimitted */
  228. BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
  229. uec->tx_bd = bd;
  230. result = 1;
  231. return result;
  232. }
  233. /*
  234. * Receive frame:
  235. * - wait for the next BD to get ready bit set
  236. * - clean up the descriptor
  237. * - move on and indicate to HW that the cleaned BD is available for Rx
  238. */
  239. static int qe_uec_recv(struct udevice *dev, int flags, uchar **packetp)
  240. {
  241. struct qe_uec_priv *priv = dev_get_priv(dev);
  242. struct uec_priv *uec = priv->uec;
  243. struct buffer_descriptor *bd;
  244. u16 status;
  245. u16 len = 0;
  246. u8 *data;
  247. *packetp = memalign(ARCH_DMA_MINALIGN, MAX_RXBUF_LEN);
  248. if (*packetp == 0) {
  249. printf("%s: error allocating packetp\n", __func__);
  250. return -ENOMEM;
  251. }
  252. bd = uec->rx_bd;
  253. status = BD_STATUS(bd);
  254. while (!(status & RX_BD_EMPTY)) {
  255. if (!(status & RX_BD_ERROR)) {
  256. data = BD_DATA(bd);
  257. len = BD_LENGTH(bd);
  258. memcpy(*packetp, (char *)data, len);
  259. } else {
  260. printf("%s: Rx error\n", dev->name);
  261. }
  262. status &= BD_CLEAN;
  263. BD_LENGTH_SET(bd, 0);
  264. BD_STATUS_SET(bd, status | RX_BD_EMPTY);
  265. BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
  266. status = BD_STATUS(bd);
  267. }
  268. uec->rx_bd = bd;
  269. return len;
  270. }
  271. static int uec_graceful_stop_tx(struct uec_priv *uec)
  272. {
  273. ucc_fast_t *uf_regs;
  274. u32 cecr_subblock;
  275. u32 ucce;
  276. uf_regs = uec->uccf->uf_regs;
  277. /* Clear the grace stop event */
  278. out_be32(&uf_regs->ucce, UCCE_GRA);
  279. /* Issue host command */
  280. cecr_subblock =
  281. ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
  282. qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
  283. (u8)QE_CR_PROTOCOL_ETHERNET, 0);
  284. /* Wait for command to complete */
  285. do {
  286. ucce = in_be32(&uf_regs->ucce);
  287. } while (!(ucce & UCCE_GRA));
  288. uec->grace_stopped_tx = 1;
  289. return 0;
  290. }
  291. static int uec_graceful_stop_rx(struct uec_priv *uec)
  292. {
  293. u32 cecr_subblock;
  294. u8 ack;
  295. if (!uec->p_rx_glbl_pram) {
  296. printf("%s: No init rx global parameter\n", __func__);
  297. return -EINVAL;
  298. }
  299. /* Clear acknowledge bit */
  300. ack = uec->p_rx_glbl_pram->rxgstpack;
  301. ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
  302. uec->p_rx_glbl_pram->rxgstpack = ack;
  303. /* Keep issuing cmd and checking ack bit until it is asserted */
  304. do {
  305. /* Issue host command */
  306. cecr_subblock =
  307. ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
  308. qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
  309. (u8)QE_CR_PROTOCOL_ETHERNET, 0);
  310. ack = uec->p_rx_glbl_pram->rxgstpack;
  311. } while (!(ack & GRACEFUL_STOP_ACKNOWLEDGE_RX));
  312. uec->grace_stopped_rx = 1;
  313. return 0;
  314. }
  315. static int uec_stop(struct uec_priv *uec, comm_dir_e mode)
  316. {
  317. /* check if the UCC number is in range. */
  318. if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
  319. printf("%s: ucc_num out of range.\n", __func__);
  320. return -EINVAL;
  321. }
  322. /* Stop any transmissions */
  323. if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx)
  324. uec_graceful_stop_tx(uec);
  325. /* Stop any receptions */
  326. if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx)
  327. uec_graceful_stop_rx(uec);
  328. /* Disable the UCC fast */
  329. ucc_fast_disable(uec->uccf, mode);
  330. /* Disable the MAC */
  331. uec_mac_disable(uec, mode);
  332. return 0;
  333. }
  334. static void qe_uec_stop(struct udevice *dev)
  335. {
  336. struct qe_uec_priv *priv = dev_get_priv(dev);
  337. struct uec_priv *uec = priv->uec;
  338. uec_stop(uec, COMM_DIR_RX_AND_TX);
  339. }
  340. static int qe_uec_set_hwaddr(struct udevice *dev)
  341. {
  342. struct qe_uec_priv *priv = dev_get_priv(dev);
  343. struct eth_pdata *pdata = dev_get_platdata(dev);
  344. struct uec_priv *uec = priv->uec;
  345. uec_t *uec_regs = uec->uec_regs;
  346. uchar *mac = pdata->enetaddr;
  347. u32 mac_addr1;
  348. u32 mac_addr2;
  349. /*
  350. * if a station address of 0x12345678ABCD, perform a write to
  351. * MACSTNADDR1 of 0xCDAB7856,
  352. * MACSTNADDR2 of 0x34120000
  353. */
  354. mac_addr1 = (mac[5] << 24) | (mac[4] << 16) |
  355. (mac[3] << 8) | (mac[2]);
  356. out_be32(&uec_regs->macstnaddr1, mac_addr1);
  357. mac_addr2 = ((mac[1] << 24) | (mac[0] << 16)) & 0xffff0000;
  358. out_be32(&uec_regs->macstnaddr2, mac_addr2);
  359. return 0;
  360. }
  361. static int qe_uec_free_pkt(struct udevice *dev, uchar *packet, int length)
  362. {
  363. if (packet)
  364. free(packet);
  365. return 0;
  366. }
  367. static const struct eth_ops qe_uec_eth_ops = {
  368. .start = qe_uec_start,
  369. .send = qe_uec_send,
  370. .recv = qe_uec_recv,
  371. .free_pkt = qe_uec_free_pkt,
  372. .stop = qe_uec_stop,
  373. .write_hwaddr = qe_uec_set_hwaddr,
  374. };
  375. static int uec_convert_threads_num(enum uec_num_of_threads threads_num,
  376. int *threads_num_ret)
  377. {
  378. int num_threads_numerica;
  379. switch (threads_num) {
  380. case UEC_NUM_OF_THREADS_1:
  381. num_threads_numerica = 1;
  382. break;
  383. case UEC_NUM_OF_THREADS_2:
  384. num_threads_numerica = 2;
  385. break;
  386. case UEC_NUM_OF_THREADS_4:
  387. num_threads_numerica = 4;
  388. break;
  389. case UEC_NUM_OF_THREADS_6:
  390. num_threads_numerica = 6;
  391. break;
  392. case UEC_NUM_OF_THREADS_8:
  393. num_threads_numerica = 8;
  394. break;
  395. default:
  396. printf("%s: Bad number of threads value.",
  397. __func__);
  398. return -EINVAL;
  399. }
  400. *threads_num_ret = num_threads_numerica;
  401. return 0;
  402. }
  403. static void uec_init_tx_parameter(struct uec_priv *uec, int num_threads_tx)
  404. {
  405. struct uec_inf *uec_info;
  406. u32 end_bd;
  407. u8 bmrx = 0;
  408. int i;
  409. uec_info = uec->uec_info;
  410. /* Alloc global Tx parameter RAM page */
  411. uec->tx_glbl_pram_offset =
  412. qe_muram_alloc(sizeof(struct uec_tx_global_pram),
  413. UEC_TX_GLOBAL_PRAM_ALIGNMENT);
  414. uec->p_tx_glbl_pram = (struct uec_tx_global_pram *)
  415. qe_muram_addr(uec->tx_glbl_pram_offset);
  416. /* Zero the global Tx prameter RAM */
  417. memset(uec->p_tx_glbl_pram, 0, sizeof(struct uec_tx_global_pram));
  418. /* Init global Tx parameter RAM */
  419. /* TEMODER, RMON statistics disable, one Tx queue */
  420. out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
  421. /* SQPTR */
  422. uec->send_q_mem_reg_offset =
  423. qe_muram_alloc(sizeof(struct uec_send_queue_qd),
  424. UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
  425. uec->p_send_q_mem_reg = (struct uec_send_queue_mem_region *)
  426. qe_muram_addr(uec->send_q_mem_reg_offset);
  427. out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
  428. /* Setup the table with TxBDs ring */
  429. end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
  430. * SIZEOFBD;
  431. out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
  432. (u32)(uec->p_tx_bd_ring));
  433. out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
  434. end_bd);
  435. /* Scheduler Base Pointer, we have only one Tx queue, no need it */
  436. out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
  437. /* TxRMON Base Pointer, TxRMON disable, we don't need it */
  438. out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
  439. /* TSTATE, global snooping, big endian, the CSB bus selected */
  440. bmrx = BMR_INIT_VALUE;
  441. out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
  442. /* IPH_Offset */
  443. for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++)
  444. out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
  445. /* VTAG table */
  446. for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++)
  447. out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
  448. /* TQPTR */
  449. uec->thread_dat_tx_offset =
  450. qe_muram_alloc(num_threads_tx *
  451. sizeof(struct uec_thread_data_tx) +
  452. 32 * (num_threads_tx == 1),
  453. UEC_THREAD_DATA_ALIGNMENT);
  454. uec->p_thread_data_tx = (struct uec_thread_data_tx *)
  455. qe_muram_addr(uec->thread_dat_tx_offset);
  456. out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
  457. }
  458. static void uec_init_rx_parameter(struct uec_priv *uec, int num_threads_rx)
  459. {
  460. u8 bmrx = 0;
  461. int i;
  462. struct uec_82xx_add_filtering_pram *p_af_pram;
  463. /* Allocate global Rx parameter RAM page */
  464. uec->rx_glbl_pram_offset =
  465. qe_muram_alloc(sizeof(struct uec_rx_global_pram),
  466. UEC_RX_GLOBAL_PRAM_ALIGNMENT);
  467. uec->p_rx_glbl_pram = (struct uec_rx_global_pram *)
  468. qe_muram_addr(uec->rx_glbl_pram_offset);
  469. /* Zero Global Rx parameter RAM */
  470. memset(uec->p_rx_glbl_pram, 0, sizeof(struct uec_rx_global_pram));
  471. /* Init global Rx parameter RAM */
  472. /*
  473. * REMODER, Extended feature mode disable, VLAN disable,
  474. * LossLess flow control disable, Receive firmware statisic disable,
  475. * Extended address parsing mode disable, One Rx queues,
  476. * Dynamic maximum/minimum frame length disable, IP checksum check
  477. * disable, IP address alignment disable
  478. */
  479. out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
  480. /* RQPTR */
  481. uec->thread_dat_rx_offset =
  482. qe_muram_alloc(num_threads_rx *
  483. sizeof(struct uec_thread_data_rx),
  484. UEC_THREAD_DATA_ALIGNMENT);
  485. uec->p_thread_data_rx = (struct uec_thread_data_rx *)
  486. qe_muram_addr(uec->thread_dat_rx_offset);
  487. out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
  488. /* Type_or_Len */
  489. out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
  490. /* RxRMON base pointer, we don't need it */
  491. out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
  492. /* IntCoalescingPTR, we don't need it, no interrupt */
  493. out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
  494. /* RSTATE, global snooping, big endian, the CSB bus selected */
  495. bmrx = BMR_INIT_VALUE;
  496. out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
  497. /* MRBLR */
  498. out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
  499. /* RBDQPTR */
  500. uec->rx_bd_qs_tbl_offset =
  501. qe_muram_alloc(sizeof(struct uec_rx_bd_queues_entry) +
  502. sizeof(struct uec_rx_pref_bds),
  503. UEC_RX_BD_QUEUES_ALIGNMENT);
  504. uec->p_rx_bd_qs_tbl = (struct uec_rx_bd_queues_entry *)
  505. qe_muram_addr(uec->rx_bd_qs_tbl_offset);
  506. /* Zero it */
  507. memset(uec->p_rx_bd_qs_tbl, 0, sizeof(struct uec_rx_bd_queues_entry) +
  508. sizeof(struct uec_rx_pref_bds));
  509. out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
  510. out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
  511. (u32)uec->p_rx_bd_ring);
  512. /* MFLR */
  513. out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
  514. /* MINFLR */
  515. out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
  516. /* MAXD1 */
  517. out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
  518. /* MAXD2 */
  519. out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
  520. /* ECAM_PTR */
  521. out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
  522. /* L2QT */
  523. out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
  524. /* L3QT */
  525. for (i = 0; i < 8; i++)
  526. out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
  527. /* VLAN_TYPE */
  528. out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
  529. /* TCI */
  530. out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
  531. /* Clear PQ2 style address filtering hash table */
  532. p_af_pram = (struct uec_82xx_add_filtering_pram *)
  533. uec->p_rx_glbl_pram->addressfiltering;
  534. p_af_pram->iaddr_h = 0;
  535. p_af_pram->iaddr_l = 0;
  536. p_af_pram->gaddr_h = 0;
  537. p_af_pram->gaddr_l = 0;
  538. }
  539. static int uec_issue_init_enet_rxtx_cmd(struct uec_priv *uec,
  540. int thread_tx, int thread_rx)
  541. {
  542. struct uec_init_cmd_pram *p_init_enet_param;
  543. u32 init_enet_param_offset;
  544. struct uec_inf *uec_info;
  545. struct ucc_fast_inf *uf_info;
  546. int i;
  547. int snum;
  548. u32 off;
  549. u32 entry_val;
  550. u32 command;
  551. u32 cecr_subblock;
  552. uec_info = uec->uec_info;
  553. uf_info = &uec_info->uf_info;
  554. /* Allocate init enet command parameter */
  555. uec->init_enet_param_offset =
  556. qe_muram_alloc(sizeof(struct uec_init_cmd_pram), 4);
  557. init_enet_param_offset = uec->init_enet_param_offset;
  558. uec->p_init_enet_param = (struct uec_init_cmd_pram *)
  559. qe_muram_addr(uec->init_enet_param_offset);
  560. /* Zero init enet command struct */
  561. memset((void *)uec->p_init_enet_param, 0,
  562. sizeof(struct uec_init_cmd_pram));
  563. /* Init the command struct */
  564. p_init_enet_param = uec->p_init_enet_param;
  565. p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
  566. p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
  567. p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
  568. p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
  569. p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
  570. p_init_enet_param->largestexternallookupkeysize = 0;
  571. p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
  572. << ENET_INIT_PARAM_RGF_SHIFT;
  573. p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
  574. << ENET_INIT_PARAM_TGF_SHIFT;
  575. /* Init Rx global parameter pointer */
  576. p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
  577. (u32)uec_info->risc_rx;
  578. /* Init Rx threads */
  579. for (i = 0; i < (thread_rx + 1); i++) {
  580. snum = qe_get_snum();
  581. if (snum < 0) {
  582. printf("%s can not get snum\n", __func__);
  583. return -ENOMEM;
  584. }
  585. if (i == 0) {
  586. off = 0;
  587. } else {
  588. off = qe_muram_alloc(sizeof(struct uec_thread_rx_pram),
  589. UEC_THREAD_RX_PRAM_ALIGNMENT);
  590. }
  591. entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
  592. off | (u32)uec_info->risc_rx;
  593. p_init_enet_param->rxthread[i] = entry_val;
  594. }
  595. /* Init Tx global parameter pointer */
  596. p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
  597. (u32)uec_info->risc_tx;
  598. /* Init Tx threads */
  599. for (i = 0; i < thread_tx; i++) {
  600. snum = qe_get_snum();
  601. if (snum < 0) {
  602. printf("%s can not get snum\n", __func__);
  603. return -ENOMEM;
  604. }
  605. off = qe_muram_alloc(sizeof(struct uec_thread_tx_pram),
  606. UEC_THREAD_TX_PRAM_ALIGNMENT);
  607. entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
  608. off | (u32)uec_info->risc_tx;
  609. p_init_enet_param->txthread[i] = entry_val;
  610. }
  611. __asm__ __volatile__("sync");
  612. /* Issue QE command */
  613. command = QE_INIT_TX_RX;
  614. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  615. qe_issue_cmd(command, cecr_subblock, (u8)QE_CR_PROTOCOL_ETHERNET,
  616. init_enet_param_offset);
  617. return 0;
  618. }
  619. static int uec_startup(struct udevice *dev)
  620. {
  621. struct qe_uec_priv *priv = dev_get_priv(dev);
  622. struct uec_priv *uec = priv->uec;
  623. struct uec_inf *uec_info;
  624. struct ucc_fast_inf *uf_info;
  625. struct ucc_fast_priv *uccf;
  626. ucc_fast_t *uf_regs;
  627. uec_t *uec_regs;
  628. int num_threads_tx;
  629. int num_threads_rx;
  630. u32 utbipar;
  631. u32 length;
  632. u32 align;
  633. struct buffer_descriptor *bd;
  634. u8 *buf;
  635. int i;
  636. uec_info = uec->uec_info;
  637. uf_info = &uec_info->uf_info;
  638. /* Check if Rx BD ring len is illegal */
  639. if (uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN ||
  640. uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT) {
  641. printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
  642. __func__);
  643. return -EINVAL;
  644. }
  645. /* Check if Tx BD ring len is illegal */
  646. if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
  647. printf("%s: Tx BD ring length must not be smaller than 2.\n",
  648. __func__);
  649. return -EINVAL;
  650. }
  651. /* Check if MRBLR is illegal */
  652. if (MAX_RXBUF_LEN == 0 || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) {
  653. printf("%s: max rx buffer length must be mutliple of 128.\n",
  654. __func__);
  655. return -EINVAL;
  656. }
  657. /* Both Rx and Tx are stopped */
  658. uec->grace_stopped_rx = 1;
  659. uec->grace_stopped_tx = 1;
  660. /* Init UCC fast */
  661. if (ucc_fast_init(uf_info, &uccf)) {
  662. printf("%s: failed to init ucc fast\n", __func__);
  663. return -ENOMEM;
  664. }
  665. /* Save uccf */
  666. uec->uccf = uccf;
  667. /* Convert the Tx threads number */
  668. if (uec_convert_threads_num(uec_info->num_threads_tx,
  669. &num_threads_tx))
  670. return -EINVAL;
  671. /* Convert the Rx threads number */
  672. if (uec_convert_threads_num(uec_info->num_threads_rx,
  673. &num_threads_rx))
  674. return -EINVAL;
  675. uf_regs = uccf->uf_regs;
  676. /* UEC register is following UCC fast registers */
  677. uec_regs = (uec_t *)(&uf_regs->ucc_eth);
  678. /* Save the UEC register pointer to UEC private struct */
  679. uec->uec_regs = uec_regs;
  680. /* Init UPSMR, enable hardware statistics (UCC) */
  681. out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
  682. /* Init MACCFG1, flow control disable, disable Tx and Rx */
  683. out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
  684. /* Init MACCFG2, length check, MAC PAD and CRC enable */
  685. out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
  686. /* Setup UTBIPAR */
  687. utbipar = in_be32(&uec_regs->utbipar);
  688. utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
  689. /* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
  690. * This frees up the remaining SMI addresses for use.
  691. */
  692. utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
  693. out_be32(&uec_regs->utbipar, utbipar);
  694. /* Allocate Tx BDs */
  695. length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
  696. UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
  697. UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  698. if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
  699. UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
  700. length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
  701. align = UEC_TX_BD_RING_ALIGNMENT;
  702. uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
  703. if (uec->tx_bd_ring_offset != 0)
  704. uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
  705. & ~(align - 1));
  706. /* Zero all of Tx BDs */
  707. memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
  708. /* Allocate Rx BDs */
  709. length = uec_info->rx_bd_ring_len * SIZEOFBD;
  710. align = UEC_RX_BD_RING_ALIGNMENT;
  711. uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
  712. if (uec->rx_bd_ring_offset != 0)
  713. uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
  714. & ~(align - 1));
  715. /* Zero all of Rx BDs */
  716. memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
  717. /* Allocate Rx buffer */
  718. length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
  719. align = UEC_RX_DATA_BUF_ALIGNMENT;
  720. uec->rx_buf_offset = (u32)malloc(length + align);
  721. if (uec->rx_buf_offset != 0)
  722. uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
  723. & ~(align - 1));
  724. /* Zero all of the Rx buffer */
  725. memset((void *)(uec->rx_buf_offset), 0, length + align);
  726. /* Init TxBD ring */
  727. bd = (struct buffer_descriptor *)uec->p_tx_bd_ring;
  728. uec->tx_bd = bd;
  729. for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
  730. BD_DATA_CLEAR(bd);
  731. BD_STATUS_SET(bd, 0);
  732. BD_LENGTH_SET(bd, 0);
  733. bd++;
  734. }
  735. BD_STATUS_SET((--bd), TX_BD_WRAP);
  736. /* Init RxBD ring */
  737. bd = (struct buffer_descriptor *)uec->p_rx_bd_ring;
  738. uec->rx_bd = bd;
  739. buf = uec->p_rx_buf;
  740. for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
  741. BD_DATA_SET(bd, buf);
  742. BD_LENGTH_SET(bd, 0);
  743. BD_STATUS_SET(bd, RX_BD_EMPTY);
  744. buf += MAX_RXBUF_LEN;
  745. bd++;
  746. }
  747. BD_STATUS_SET((--bd), RX_BD_WRAP | RX_BD_EMPTY);
  748. /* Init global Tx parameter RAM */
  749. uec_init_tx_parameter(uec, num_threads_tx);
  750. /* Init global Rx parameter RAM */
  751. uec_init_rx_parameter(uec, num_threads_rx);
  752. /* Init ethernet Tx and Rx parameter command */
  753. if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
  754. num_threads_rx)) {
  755. printf("%s issue init enet cmd failed\n", __func__);
  756. return -ENOMEM;
  757. }
  758. return 0;
  759. }
  760. /* Convert a string to a QE clock source enum
  761. *
  762. * This function takes a string, typically from a property in the device
  763. * tree, and returns the corresponding "enum qe_clock" value.
  764. */
  765. enum qe_clock qe_clock_source(const char *source)
  766. {
  767. unsigned int i;
  768. if (strcasecmp(source, "none") == 0)
  769. return QE_CLK_NONE;
  770. if (strncasecmp(source, "brg", 3) == 0) {
  771. i = simple_strtoul(source + 3, NULL, 10);
  772. if (i >= 1 && i <= 16)
  773. return (QE_BRG1 - 1) + i;
  774. else
  775. return QE_CLK_DUMMY;
  776. }
  777. if (strncasecmp(source, "clk", 3) == 0) {
  778. i = simple_strtoul(source + 3, NULL, 10);
  779. if (i >= 1 && i <= 24)
  780. return (QE_CLK1 - 1) + i;
  781. else
  782. return QE_CLK_DUMMY;
  783. }
  784. return QE_CLK_DUMMY;
  785. }
  786. static void qe_uec_set_eth_type(struct udevice *dev)
  787. {
  788. struct qe_uec_priv *priv = dev_get_priv(dev);
  789. struct uec_priv *uec = priv->uec;
  790. struct uec_inf *uec_info = uec->uec_info;
  791. struct ucc_fast_inf *uf_info = &uec_info->uf_info;
  792. switch (uec_info->enet_interface_type) {
  793. case PHY_INTERFACE_MODE_GMII:
  794. case PHY_INTERFACE_MODE_RGMII:
  795. case PHY_INTERFACE_MODE_RGMII_ID:
  796. case PHY_INTERFACE_MODE_RGMII_RXID:
  797. case PHY_INTERFACE_MODE_RGMII_TXID:
  798. case PHY_INTERFACE_MODE_TBI:
  799. case PHY_INTERFACE_MODE_RTBI:
  800. case PHY_INTERFACE_MODE_SGMII:
  801. uf_info->eth_type = GIGA_ETH;
  802. break;
  803. default:
  804. uf_info->eth_type = FAST_ETH;
  805. break;
  806. }
  807. }
  808. static int qe_uec_set_uec_info(struct udevice *dev)
  809. {
  810. struct qe_uec_priv *priv = dev_get_priv(dev);
  811. struct eth_pdata *pdata = dev_get_platdata(dev);
  812. struct uec_priv *uec = priv->uec;
  813. struct uec_inf *uec_info;
  814. struct ucc_fast_inf *uf_info;
  815. const char *s;
  816. int ret;
  817. u32 val;
  818. uec_info = (struct uec_inf *)malloc(sizeof(struct uec_inf));
  819. if (!uec_info)
  820. return -ENOMEM;
  821. uf_info = &uec_info->uf_info;
  822. ret = dev_read_u32(dev, "cell-index", &val);
  823. if (ret) {
  824. ret = dev_read_u32(dev, "device-id", &val);
  825. if (ret) {
  826. pr_err("no cell-index nor device-id found!");
  827. goto out;
  828. }
  829. }
  830. uf_info->ucc_num = val - 1;
  831. if (uf_info->ucc_num < 0 || uf_info->ucc_num > 7) {
  832. ret = -ENODEV;
  833. goto out;
  834. }
  835. ret = dev_read_string_index(dev, "rx-clock-name", 0, &s);
  836. if (!ret) {
  837. uf_info->rx_clock = qe_clock_source(s);
  838. if (uf_info->rx_clock < QE_CLK_NONE ||
  839. uf_info->rx_clock > QE_CLK24) {
  840. pr_err("invalid rx-clock-name property\n");
  841. ret = -EINVAL;
  842. goto out;
  843. }
  844. } else {
  845. ret = dev_read_u32(dev, "rx-clock", &val);
  846. if (ret) {
  847. /*
  848. * If both rx-clock-name and rx-clock are missing,
  849. * we want to tell people to use rx-clock-name.
  850. */
  851. pr_err("missing rx-clock-name property\n");
  852. goto out;
  853. }
  854. if (val < QE_CLK_NONE || val > QE_CLK24) {
  855. pr_err("invalid rx-clock property\n");
  856. ret = -EINVAL;
  857. goto out;
  858. }
  859. uf_info->rx_clock = val;
  860. }
  861. ret = dev_read_string_index(dev, "tx-clock-name", 0, &s);
  862. if (!ret) {
  863. uf_info->tx_clock = qe_clock_source(s);
  864. if (uf_info->tx_clock < QE_CLK_NONE ||
  865. uf_info->tx_clock > QE_CLK24) {
  866. pr_err("invalid tx-clock-name property\n");
  867. ret = -EINVAL;
  868. goto out;
  869. }
  870. } else {
  871. ret = dev_read_u32(dev, "tx-clock", &val);
  872. if (ret) {
  873. pr_err("missing tx-clock-name property\n");
  874. goto out;
  875. }
  876. if (val < QE_CLK_NONE || val > QE_CLK24) {
  877. pr_err("invalid tx-clock property\n");
  878. ret = -EINVAL;
  879. goto out;
  880. }
  881. uf_info->tx_clock = val;
  882. }
  883. uec_info->num_threads_tx = UEC_NUM_OF_THREADS_1;
  884. uec_info->num_threads_rx = UEC_NUM_OF_THREADS_1;
  885. uec_info->risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2;
  886. uec_info->risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2;
  887. uec_info->tx_bd_ring_len = 16;
  888. uec_info->rx_bd_ring_len = 16;
  889. #if (MAX_QE_RISC == 4)
  890. uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
  891. uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
  892. #endif
  893. uec_info->enet_interface_type = pdata->phy_interface;
  894. uec->uec_info = uec_info;
  895. qe_uec_set_eth_type(dev);
  896. return 0;
  897. out:
  898. free(uec_info);
  899. return ret;
  900. }
  901. static int qe_uec_probe(struct udevice *dev)
  902. {
  903. struct qe_uec_priv *priv = dev_get_priv(dev);
  904. struct eth_pdata *pdata = dev_get_platdata(dev);
  905. struct uec_priv *uec;
  906. int ret;
  907. /* Allocate the UEC private struct */
  908. uec = (struct uec_priv *)malloc(sizeof(struct uec_priv));
  909. if (!uec)
  910. return -ENOMEM;
  911. memset(uec, 0, sizeof(struct uec_priv));
  912. priv->uec = uec;
  913. uec->uec_regs = (uec_t *)pdata->iobase;
  914. /* setup uec info struct */
  915. ret = qe_uec_set_uec_info(dev);
  916. if (ret) {
  917. free(uec);
  918. return ret;
  919. }
  920. ret = uec_startup(dev);
  921. if (ret) {
  922. free(uec->uec_info);
  923. free(uec);
  924. return ret;
  925. }
  926. priv->phydev = dm_eth_phy_connect(dev);
  927. return 0;
  928. }
  929. /*
  930. * Remove the driver from an interface:
  931. * - free up allocated memory
  932. */
  933. static int qe_uec_remove(struct udevice *dev)
  934. {
  935. struct qe_uec_priv *priv = dev_get_priv(dev);
  936. free(priv->uec);
  937. return 0;
  938. }
  939. static int qe_uec_ofdata_to_platdata(struct udevice *dev)
  940. {
  941. struct eth_pdata *pdata = dev_get_platdata(dev);
  942. const char *phy_mode;
  943. pdata->iobase = (phys_addr_t)devfdt_get_addr(dev);
  944. pdata->phy_interface = -1;
  945. phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev),
  946. "phy-connection-type", NULL);
  947. if (phy_mode)
  948. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  949. if (pdata->phy_interface == -1) {
  950. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  951. return -EINVAL;
  952. }
  953. return 0;
  954. }
  955. static const struct udevice_id qe_uec_ids[] = {
  956. { .compatible = QE_UEC_DRIVER_NAME },
  957. { }
  958. };
  959. U_BOOT_DRIVER(eth_qe_uec) = {
  960. .name = QE_UEC_DRIVER_NAME,
  961. .id = UCLASS_ETH,
  962. .of_match = qe_uec_ids,
  963. .ofdata_to_platdata = qe_uec_ofdata_to_platdata,
  964. .probe = qe_uec_probe,
  965. .remove = qe_uec_remove,
  966. .ops = &qe_uec_eth_ops,
  967. .priv_auto_alloc_size = sizeof(struct qe_uec_priv),
  968. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  969. };