sh_eth.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * sh_eth.c - Driver for Renesas ethernet controller.
  4. *
  5. * Copyright (C) 2008, 2011 Renesas Solutions Corp.
  6. * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
  7. * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
  8. * Copyright (C) 2013, 2014 Renesas Electronics Corporation
  9. */
  10. #include <config.h>
  11. #include <common.h>
  12. #include <cpu_func.h>
  13. #include <env.h>
  14. #include <malloc.h>
  15. #include <net.h>
  16. #include <netdev.h>
  17. #include <miiphy.h>
  18. #include <linux/errno.h>
  19. #include <asm/io.h>
  20. #ifdef CONFIG_DM_ETH
  21. #include <clk.h>
  22. #include <dm.h>
  23. #include <linux/mii.h>
  24. #include <asm/gpio.h>
  25. #endif
  26. #include "sh_eth.h"
  27. #ifndef CONFIG_SH_ETHER_USE_PORT
  28. # error "Please define CONFIG_SH_ETHER_USE_PORT"
  29. #endif
  30. #ifndef CONFIG_SH_ETHER_PHY_ADDR
  31. # error "Please define CONFIG_SH_ETHER_PHY_ADDR"
  32. #endif
  33. #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && \
  34. !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  35. #define flush_cache_wback(addr, len) \
  36. flush_dcache_range((unsigned long)addr, \
  37. (unsigned long)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
  38. #else
  39. #define flush_cache_wback(...)
  40. #endif
  41. #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
  42. #define invalidate_cache(addr, len) \
  43. { \
  44. unsigned long line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
  45. unsigned long start, end; \
  46. \
  47. start = (unsigned long)addr; \
  48. end = start + len; \
  49. start &= ~(line_size - 1); \
  50. end = ((end + line_size - 1) & ~(line_size - 1)); \
  51. \
  52. invalidate_dcache_range(start, end); \
  53. }
  54. #else
  55. #define invalidate_cache(...)
  56. #endif
  57. #define TIMEOUT_CNT 1000
  58. static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
  59. {
  60. int ret = 0, timeout;
  61. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  62. if (!packet || len > 0xffff) {
  63. printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
  64. ret = -EINVAL;
  65. goto err;
  66. }
  67. /* packet must be a 4 byte boundary */
  68. if ((uintptr_t)packet & 3) {
  69. printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
  70. , __func__);
  71. ret = -EFAULT;
  72. goto err;
  73. }
  74. /* Update tx descriptor */
  75. flush_cache_wback(packet, len);
  76. port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
  77. port_info->tx_desc_cur->td1 = len << 16;
  78. /* Must preserve the end of descriptor list indication */
  79. if (port_info->tx_desc_cur->td0 & TD_TDLE)
  80. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
  81. else
  82. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
  83. flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
  84. /* Restart the transmitter if disabled */
  85. if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
  86. sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
  87. /* Wait until packet is transmitted */
  88. timeout = TIMEOUT_CNT;
  89. do {
  90. invalidate_cache(port_info->tx_desc_cur,
  91. sizeof(struct tx_desc_s));
  92. udelay(100);
  93. } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
  94. if (timeout < 0) {
  95. printf(SHETHER_NAME ": transmit timeout\n");
  96. ret = -ETIMEDOUT;
  97. goto err;
  98. }
  99. port_info->tx_desc_cur++;
  100. if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
  101. port_info->tx_desc_cur = port_info->tx_desc_base;
  102. err:
  103. return ret;
  104. }
  105. static int sh_eth_recv_start(struct sh_eth_dev *eth)
  106. {
  107. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  108. /* Check if the rx descriptor is ready */
  109. invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
  110. if (port_info->rx_desc_cur->rd0 & RD_RACT)
  111. return -EINVAL;
  112. /* Check for errors */
  113. if (port_info->rx_desc_cur->rd0 & RD_RFE)
  114. return -EINVAL;
  115. return port_info->rx_desc_cur->rd1 & 0xffff;
  116. }
  117. static void sh_eth_recv_finish(struct sh_eth_dev *eth)
  118. {
  119. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  120. /* Make current descriptor available again */
  121. if (port_info->rx_desc_cur->rd0 & RD_RDLE)
  122. port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
  123. else
  124. port_info->rx_desc_cur->rd0 = RD_RACT;
  125. flush_cache_wback(port_info->rx_desc_cur,
  126. sizeof(struct rx_desc_s));
  127. /* Point to the next descriptor */
  128. port_info->rx_desc_cur++;
  129. if (port_info->rx_desc_cur >=
  130. port_info->rx_desc_base + NUM_RX_DESC)
  131. port_info->rx_desc_cur = port_info->rx_desc_base;
  132. }
  133. static int sh_eth_reset(struct sh_eth_dev *eth)
  134. {
  135. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  136. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  137. int ret = 0, i;
  138. /* Start e-dmac transmitter and receiver */
  139. sh_eth_write(port_info, EDSR_ENALL, EDSR);
  140. /* Perform a software reset and wait for it to complete */
  141. sh_eth_write(port_info, EDMR_SRST, EDMR);
  142. for (i = 0; i < TIMEOUT_CNT; i++) {
  143. if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
  144. break;
  145. udelay(1000);
  146. }
  147. if (i == TIMEOUT_CNT) {
  148. printf(SHETHER_NAME ": Software reset timeout\n");
  149. ret = -EIO;
  150. }
  151. return ret;
  152. #else
  153. sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
  154. mdelay(3);
  155. sh_eth_write(port_info,
  156. sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
  157. return 0;
  158. #endif
  159. }
  160. static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
  161. {
  162. int i, ret = 0;
  163. u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
  164. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  165. struct tx_desc_s *cur_tx_desc;
  166. /*
  167. * Allocate rx descriptors. They must be aligned to size of struct
  168. * tx_desc_s.
  169. */
  170. port_info->tx_desc_alloc =
  171. memalign(sizeof(struct tx_desc_s), alloc_desc_size);
  172. if (!port_info->tx_desc_alloc) {
  173. printf(SHETHER_NAME ": memalign failed\n");
  174. ret = -ENOMEM;
  175. goto err;
  176. }
  177. flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
  178. /* Make sure we use a P2 address (non-cacheable) */
  179. port_info->tx_desc_base =
  180. (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
  181. port_info->tx_desc_cur = port_info->tx_desc_base;
  182. /* Initialize all descriptors */
  183. for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
  184. cur_tx_desc++, i++) {
  185. cur_tx_desc->td0 = 0x00;
  186. cur_tx_desc->td1 = 0x00;
  187. cur_tx_desc->td2 = 0x00;
  188. }
  189. /* Mark the end of the descriptors */
  190. cur_tx_desc--;
  191. cur_tx_desc->td0 |= TD_TDLE;
  192. /*
  193. * Point the controller to the tx descriptor list. Must use physical
  194. * addresses
  195. */
  196. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
  197. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  198. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
  199. sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
  200. sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
  201. #endif
  202. err:
  203. return ret;
  204. }
  205. static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
  206. {
  207. int i, ret = 0;
  208. u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
  209. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  210. struct rx_desc_s *cur_rx_desc;
  211. u8 *rx_buf;
  212. /*
  213. * Allocate rx descriptors. They must be aligned to size of struct
  214. * rx_desc_s.
  215. */
  216. port_info->rx_desc_alloc =
  217. memalign(sizeof(struct rx_desc_s), alloc_desc_size);
  218. if (!port_info->rx_desc_alloc) {
  219. printf(SHETHER_NAME ": memalign failed\n");
  220. ret = -ENOMEM;
  221. goto err;
  222. }
  223. flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
  224. /* Make sure we use a P2 address (non-cacheable) */
  225. port_info->rx_desc_base =
  226. (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
  227. port_info->rx_desc_cur = port_info->rx_desc_base;
  228. /*
  229. * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
  230. * aligned and in P2 area.
  231. */
  232. port_info->rx_buf_alloc =
  233. memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
  234. if (!port_info->rx_buf_alloc) {
  235. printf(SHETHER_NAME ": alloc failed\n");
  236. ret = -ENOMEM;
  237. goto err_buf_alloc;
  238. }
  239. port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
  240. /* Initialize all descriptors */
  241. for (cur_rx_desc = port_info->rx_desc_base,
  242. rx_buf = port_info->rx_buf_base, i = 0;
  243. i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
  244. cur_rx_desc->rd0 = RD_RACT;
  245. cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
  246. cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
  247. }
  248. /* Mark the end of the descriptors */
  249. cur_rx_desc--;
  250. cur_rx_desc->rd0 |= RD_RDLE;
  251. /* Point the controller to the rx descriptor list */
  252. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
  253. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  254. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
  255. sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
  256. sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
  257. #endif
  258. return ret;
  259. err_buf_alloc:
  260. free(port_info->rx_desc_alloc);
  261. port_info->rx_desc_alloc = NULL;
  262. err:
  263. return ret;
  264. }
  265. static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
  266. {
  267. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  268. if (port_info->tx_desc_alloc) {
  269. free(port_info->tx_desc_alloc);
  270. port_info->tx_desc_alloc = NULL;
  271. }
  272. }
  273. static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
  274. {
  275. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  276. if (port_info->rx_desc_alloc) {
  277. free(port_info->rx_desc_alloc);
  278. port_info->rx_desc_alloc = NULL;
  279. }
  280. if (port_info->rx_buf_alloc) {
  281. free(port_info->rx_buf_alloc);
  282. port_info->rx_buf_alloc = NULL;
  283. }
  284. }
  285. static int sh_eth_desc_init(struct sh_eth_dev *eth)
  286. {
  287. int ret = 0;
  288. ret = sh_eth_tx_desc_init(eth);
  289. if (ret)
  290. goto err_tx_init;
  291. ret = sh_eth_rx_desc_init(eth);
  292. if (ret)
  293. goto err_rx_init;
  294. return ret;
  295. err_rx_init:
  296. sh_eth_tx_desc_free(eth);
  297. err_tx_init:
  298. return ret;
  299. }
  300. static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
  301. unsigned char *mac)
  302. {
  303. u32 val;
  304. val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
  305. sh_eth_write(port_info, val, MAHR);
  306. val = (mac[4] << 8) | mac[5];
  307. sh_eth_write(port_info, val, MALR);
  308. }
  309. static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
  310. {
  311. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  312. unsigned long edmr;
  313. /* Configure e-dmac registers */
  314. edmr = sh_eth_read(port_info, EDMR);
  315. edmr &= ~EMDR_DESC_R;
  316. edmr |= EMDR_DESC | EDMR_EL;
  317. #if defined(CONFIG_R8A77980)
  318. edmr |= EDMR_NBST;
  319. #endif
  320. sh_eth_write(port_info, edmr, EDMR);
  321. sh_eth_write(port_info, 0, EESIPR);
  322. sh_eth_write(port_info, 0, TRSCER);
  323. sh_eth_write(port_info, 0, TFTR);
  324. sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
  325. sh_eth_write(port_info, RMCR_RST, RMCR);
  326. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  327. sh_eth_write(port_info, 0, RPADIR);
  328. #endif
  329. sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
  330. /* Configure e-mac registers */
  331. sh_eth_write(port_info, 0, ECSIPR);
  332. /* Set Mac address */
  333. sh_eth_write_hwaddr(port_info, mac);
  334. sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
  335. #if defined(SH_ETH_TYPE_GETHER)
  336. sh_eth_write(port_info, 0, PIPR);
  337. #endif
  338. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  339. sh_eth_write(port_info, APR_AP, APR);
  340. sh_eth_write(port_info, MPR_MP, MPR);
  341. sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
  342. #endif
  343. #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
  344. sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
  345. #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
  346. sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
  347. #endif
  348. }
  349. static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
  350. {
  351. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  352. struct phy_device *phy = port_info->phydev;
  353. int ret = 0;
  354. u32 val = 0;
  355. /* Set the transfer speed */
  356. if (phy->speed == 100) {
  357. printf(SHETHER_NAME ": 100Base/");
  358. #if defined(SH_ETH_TYPE_GETHER)
  359. sh_eth_write(port_info, GECMR_100B, GECMR);
  360. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  361. sh_eth_write(port_info, 1, RTRATE);
  362. #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
  363. val = ECMR_RTM;
  364. #endif
  365. } else if (phy->speed == 10) {
  366. printf(SHETHER_NAME ": 10Base/");
  367. #if defined(SH_ETH_TYPE_GETHER)
  368. sh_eth_write(port_info, GECMR_10B, GECMR);
  369. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  370. sh_eth_write(port_info, 0, RTRATE);
  371. #endif
  372. }
  373. #if defined(SH_ETH_TYPE_GETHER)
  374. else if (phy->speed == 1000) {
  375. printf(SHETHER_NAME ": 1000Base/");
  376. sh_eth_write(port_info, GECMR_1000B, GECMR);
  377. }
  378. #endif
  379. /* Check if full duplex mode is supported by the phy */
  380. if (phy->duplex) {
  381. printf("Full\n");
  382. sh_eth_write(port_info,
  383. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
  384. ECMR);
  385. } else {
  386. printf("Half\n");
  387. sh_eth_write(port_info,
  388. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
  389. ECMR);
  390. }
  391. return ret;
  392. }
  393. static void sh_eth_start(struct sh_eth_dev *eth)
  394. {
  395. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  396. /*
  397. * Enable the e-dmac receiver only. The transmitter will be enabled when
  398. * we have something to transmit
  399. */
  400. sh_eth_write(port_info, EDRRR_R, EDRRR);
  401. }
  402. static void sh_eth_stop(struct sh_eth_dev *eth)
  403. {
  404. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  405. sh_eth_write(port_info, ~EDRRR_R, EDRRR);
  406. }
  407. static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
  408. {
  409. int ret = 0;
  410. ret = sh_eth_reset(eth);
  411. if (ret)
  412. return ret;
  413. ret = sh_eth_desc_init(eth);
  414. if (ret)
  415. return ret;
  416. sh_eth_mac_regs_config(eth, mac);
  417. return 0;
  418. }
  419. static int sh_eth_start_common(struct sh_eth_dev *eth)
  420. {
  421. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  422. int ret;
  423. ret = phy_startup(port_info->phydev);
  424. if (ret) {
  425. printf(SHETHER_NAME ": phy startup failure\n");
  426. return ret;
  427. }
  428. ret = sh_eth_phy_regs_config(eth);
  429. if (ret)
  430. return ret;
  431. sh_eth_start(eth);
  432. return 0;
  433. }
  434. #ifndef CONFIG_DM_ETH
  435. static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
  436. {
  437. int ret = 0;
  438. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  439. struct eth_device *dev = port_info->dev;
  440. struct phy_device *phydev;
  441. phydev = phy_connect(
  442. miiphy_get_dev_by_name(dev->name),
  443. port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
  444. port_info->phydev = phydev;
  445. phy_config(phydev);
  446. return ret;
  447. }
  448. static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
  449. {
  450. struct sh_eth_dev *eth = dev->priv;
  451. return sh_eth_send_common(eth, packet, len);
  452. }
  453. static int sh_eth_recv_common(struct sh_eth_dev *eth)
  454. {
  455. int len = 0;
  456. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  457. uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
  458. len = sh_eth_recv_start(eth);
  459. if (len > 0) {
  460. invalidate_cache(packet, len);
  461. net_process_received_packet(packet, len);
  462. sh_eth_recv_finish(eth);
  463. } else
  464. len = 0;
  465. /* Restart the receiver if disabled */
  466. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  467. sh_eth_write(port_info, EDRRR_R, EDRRR);
  468. return len;
  469. }
  470. static int sh_eth_recv_legacy(struct eth_device *dev)
  471. {
  472. struct sh_eth_dev *eth = dev->priv;
  473. return sh_eth_recv_common(eth);
  474. }
  475. static int sh_eth_init_legacy(struct eth_device *dev, bd_t *bd)
  476. {
  477. struct sh_eth_dev *eth = dev->priv;
  478. int ret;
  479. ret = sh_eth_init_common(eth, dev->enetaddr);
  480. if (ret)
  481. return ret;
  482. ret = sh_eth_phy_config_legacy(eth);
  483. if (ret) {
  484. printf(SHETHER_NAME ": phy config timeout\n");
  485. goto err_start;
  486. }
  487. ret = sh_eth_start_common(eth);
  488. if (ret)
  489. goto err_start;
  490. return 0;
  491. err_start:
  492. sh_eth_tx_desc_free(eth);
  493. sh_eth_rx_desc_free(eth);
  494. return ret;
  495. }
  496. void sh_eth_halt_legacy(struct eth_device *dev)
  497. {
  498. struct sh_eth_dev *eth = dev->priv;
  499. sh_eth_stop(eth);
  500. }
  501. int sh_eth_initialize(bd_t *bd)
  502. {
  503. int ret = 0;
  504. struct sh_eth_dev *eth = NULL;
  505. struct eth_device *dev = NULL;
  506. struct mii_dev *mdiodev;
  507. eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
  508. if (!eth) {
  509. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  510. ret = -ENOMEM;
  511. goto err;
  512. }
  513. dev = (struct eth_device *)malloc(sizeof(struct eth_device));
  514. if (!dev) {
  515. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  516. ret = -ENOMEM;
  517. goto err;
  518. }
  519. memset(dev, 0, sizeof(struct eth_device));
  520. memset(eth, 0, sizeof(struct sh_eth_dev));
  521. eth->port = CONFIG_SH_ETHER_USE_PORT;
  522. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  523. eth->port_info[eth->port].iobase =
  524. (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
  525. dev->priv = (void *)eth;
  526. dev->iobase = 0;
  527. dev->init = sh_eth_init_legacy;
  528. dev->halt = sh_eth_halt_legacy;
  529. dev->send = sh_eth_send_legacy;
  530. dev->recv = sh_eth_recv_legacy;
  531. eth->port_info[eth->port].dev = dev;
  532. strcpy(dev->name, SHETHER_NAME);
  533. /* Register Device to EtherNet subsystem */
  534. eth_register(dev);
  535. bb_miiphy_buses[0].priv = eth;
  536. mdiodev = mdio_alloc();
  537. if (!mdiodev)
  538. return -ENOMEM;
  539. strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
  540. mdiodev->read = bb_miiphy_read;
  541. mdiodev->write = bb_miiphy_write;
  542. ret = mdio_register(mdiodev);
  543. if (ret < 0)
  544. return ret;
  545. if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
  546. puts("Please set MAC address\n");
  547. return ret;
  548. err:
  549. if (dev)
  550. free(dev);
  551. if (eth)
  552. free(eth);
  553. printf(SHETHER_NAME ": Failed\n");
  554. return ret;
  555. }
  556. #else /* CONFIG_DM_ETH */
  557. struct sh_ether_priv {
  558. struct sh_eth_dev shdev;
  559. struct mii_dev *bus;
  560. phys_addr_t iobase;
  561. struct clk clk;
  562. struct gpio_desc reset_gpio;
  563. };
  564. static int sh_ether_send(struct udevice *dev, void *packet, int len)
  565. {
  566. struct sh_ether_priv *priv = dev_get_priv(dev);
  567. struct sh_eth_dev *eth = &priv->shdev;
  568. return sh_eth_send_common(eth, packet, len);
  569. }
  570. static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
  571. {
  572. struct sh_ether_priv *priv = dev_get_priv(dev);
  573. struct sh_eth_dev *eth = &priv->shdev;
  574. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  575. uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
  576. int len;
  577. len = sh_eth_recv_start(eth);
  578. if (len > 0) {
  579. invalidate_cache(packet, len);
  580. *packetp = packet;
  581. return len;
  582. } else {
  583. len = 0;
  584. /* Restart the receiver if disabled */
  585. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  586. sh_eth_write(port_info, EDRRR_R, EDRRR);
  587. return -EAGAIN;
  588. }
  589. }
  590. static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
  591. {
  592. struct sh_ether_priv *priv = dev_get_priv(dev);
  593. struct sh_eth_dev *eth = &priv->shdev;
  594. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  595. sh_eth_recv_finish(eth);
  596. /* Restart the receiver if disabled */
  597. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  598. sh_eth_write(port_info, EDRRR_R, EDRRR);
  599. return 0;
  600. }
  601. static int sh_ether_write_hwaddr(struct udevice *dev)
  602. {
  603. struct sh_ether_priv *priv = dev_get_priv(dev);
  604. struct sh_eth_dev *eth = &priv->shdev;
  605. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  606. struct eth_pdata *pdata = dev_get_platdata(dev);
  607. sh_eth_write_hwaddr(port_info, pdata->enetaddr);
  608. return 0;
  609. }
  610. static int sh_eth_phy_config(struct udevice *dev)
  611. {
  612. struct sh_ether_priv *priv = dev_get_priv(dev);
  613. struct eth_pdata *pdata = dev_get_platdata(dev);
  614. struct sh_eth_dev *eth = &priv->shdev;
  615. int ret = 0;
  616. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  617. struct phy_device *phydev;
  618. int mask = 0xffffffff;
  619. phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
  620. if (!phydev)
  621. return -ENODEV;
  622. phy_connect_dev(phydev, dev);
  623. port_info->phydev = phydev;
  624. phy_config(phydev);
  625. return ret;
  626. }
  627. static int sh_ether_start(struct udevice *dev)
  628. {
  629. struct sh_ether_priv *priv = dev_get_priv(dev);
  630. struct eth_pdata *pdata = dev_get_platdata(dev);
  631. struct sh_eth_dev *eth = &priv->shdev;
  632. int ret;
  633. ret = sh_eth_init_common(eth, pdata->enetaddr);
  634. if (ret)
  635. return ret;
  636. ret = sh_eth_start_common(eth);
  637. if (ret)
  638. goto err_start;
  639. return 0;
  640. err_start:
  641. sh_eth_tx_desc_free(eth);
  642. sh_eth_rx_desc_free(eth);
  643. return ret;
  644. }
  645. static void sh_ether_stop(struct udevice *dev)
  646. {
  647. struct sh_ether_priv *priv = dev_get_priv(dev);
  648. struct sh_eth_dev *eth = &priv->shdev;
  649. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  650. phy_shutdown(port_info->phydev);
  651. sh_eth_stop(&priv->shdev);
  652. }
  653. static int sh_ether_probe(struct udevice *udev)
  654. {
  655. struct eth_pdata *pdata = dev_get_platdata(udev);
  656. struct sh_ether_priv *priv = dev_get_priv(udev);
  657. struct sh_eth_dev *eth = &priv->shdev;
  658. struct ofnode_phandle_args phandle_args;
  659. struct mii_dev *mdiodev;
  660. int ret;
  661. priv->iobase = pdata->iobase;
  662. #if CONFIG_IS_ENABLED(CLK)
  663. ret = clk_get_by_index(udev, 0, &priv->clk);
  664. if (ret < 0)
  665. return ret;
  666. #endif
  667. ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args);
  668. if (!ret) {
  669. gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
  670. &priv->reset_gpio, GPIOD_IS_OUT);
  671. }
  672. if (!dm_gpio_is_valid(&priv->reset_gpio)) {
  673. gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
  674. GPIOD_IS_OUT);
  675. }
  676. mdiodev = mdio_alloc();
  677. if (!mdiodev) {
  678. ret = -ENOMEM;
  679. return ret;
  680. }
  681. mdiodev->read = bb_miiphy_read;
  682. mdiodev->write = bb_miiphy_write;
  683. bb_miiphy_buses[0].priv = eth;
  684. snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
  685. ret = mdio_register(mdiodev);
  686. if (ret < 0)
  687. goto err_mdio_register;
  688. priv->bus = miiphy_get_dev_by_name(udev->name);
  689. eth->port = CONFIG_SH_ETHER_USE_PORT;
  690. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  691. eth->port_info[eth->port].iobase =
  692. (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
  693. #if CONFIG_IS_ENABLED(CLK)
  694. ret = clk_enable(&priv->clk);
  695. if (ret)
  696. goto err_mdio_register;
  697. #endif
  698. ret = sh_eth_phy_config(udev);
  699. if (ret) {
  700. printf(SHETHER_NAME ": phy config timeout\n");
  701. goto err_phy_config;
  702. }
  703. return 0;
  704. err_phy_config:
  705. #if CONFIG_IS_ENABLED(CLK)
  706. clk_disable(&priv->clk);
  707. #endif
  708. err_mdio_register:
  709. mdio_free(mdiodev);
  710. return ret;
  711. }
  712. static int sh_ether_remove(struct udevice *udev)
  713. {
  714. struct sh_ether_priv *priv = dev_get_priv(udev);
  715. struct sh_eth_dev *eth = &priv->shdev;
  716. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  717. #if CONFIG_IS_ENABLED(CLK)
  718. clk_disable(&priv->clk);
  719. #endif
  720. free(port_info->phydev);
  721. mdio_unregister(priv->bus);
  722. mdio_free(priv->bus);
  723. if (dm_gpio_is_valid(&priv->reset_gpio))
  724. dm_gpio_free(udev, &priv->reset_gpio);
  725. return 0;
  726. }
  727. static const struct eth_ops sh_ether_ops = {
  728. .start = sh_ether_start,
  729. .send = sh_ether_send,
  730. .recv = sh_ether_recv,
  731. .free_pkt = sh_ether_free_pkt,
  732. .stop = sh_ether_stop,
  733. .write_hwaddr = sh_ether_write_hwaddr,
  734. };
  735. int sh_ether_ofdata_to_platdata(struct udevice *dev)
  736. {
  737. struct eth_pdata *pdata = dev_get_platdata(dev);
  738. const char *phy_mode;
  739. const fdt32_t *cell;
  740. int ret = 0;
  741. pdata->iobase = devfdt_get_addr(dev);
  742. pdata->phy_interface = -1;
  743. phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
  744. NULL);
  745. if (phy_mode)
  746. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  747. if (pdata->phy_interface == -1) {
  748. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  749. return -EINVAL;
  750. }
  751. pdata->max_speed = 1000;
  752. cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
  753. if (cell)
  754. pdata->max_speed = fdt32_to_cpu(*cell);
  755. sprintf(bb_miiphy_buses[0].name, dev->name);
  756. return ret;
  757. }
  758. static const struct udevice_id sh_ether_ids[] = {
  759. { .compatible = "renesas,ether-r7s72100" },
  760. { .compatible = "renesas,ether-r8a7790" },
  761. { .compatible = "renesas,ether-r8a7791" },
  762. { .compatible = "renesas,ether-r8a7793" },
  763. { .compatible = "renesas,ether-r8a7794" },
  764. { .compatible = "renesas,gether-r8a77980" },
  765. { }
  766. };
  767. U_BOOT_DRIVER(eth_sh_ether) = {
  768. .name = "sh_ether",
  769. .id = UCLASS_ETH,
  770. .of_match = sh_ether_ids,
  771. .ofdata_to_platdata = sh_ether_ofdata_to_platdata,
  772. .probe = sh_ether_probe,
  773. .remove = sh_ether_remove,
  774. .ops = &sh_ether_ops,
  775. .priv_auto_alloc_size = sizeof(struct sh_ether_priv),
  776. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  777. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  778. };
  779. #endif
  780. /******* for bb_miiphy *******/
  781. static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
  782. {
  783. return 0;
  784. }
  785. static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
  786. {
  787. struct sh_eth_dev *eth = bus->priv;
  788. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  789. sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
  790. return 0;
  791. }
  792. static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
  793. {
  794. struct sh_eth_dev *eth = bus->priv;
  795. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  796. sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
  797. return 0;
  798. }
  799. static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
  800. {
  801. struct sh_eth_dev *eth = bus->priv;
  802. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  803. if (v)
  804. sh_eth_write(port_info,
  805. sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
  806. else
  807. sh_eth_write(port_info,
  808. sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
  809. return 0;
  810. }
  811. static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
  812. {
  813. struct sh_eth_dev *eth = bus->priv;
  814. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  815. *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
  816. return 0;
  817. }
  818. static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
  819. {
  820. struct sh_eth_dev *eth = bus->priv;
  821. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  822. if (v)
  823. sh_eth_write(port_info,
  824. sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
  825. else
  826. sh_eth_write(port_info,
  827. sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
  828. return 0;
  829. }
  830. static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
  831. {
  832. udelay(10);
  833. return 0;
  834. }
  835. struct bb_miiphy_bus bb_miiphy_buses[] = {
  836. {
  837. .name = "sh_eth",
  838. .init = sh_eth_bb_init,
  839. .mdio_active = sh_eth_bb_mdio_active,
  840. .mdio_tristate = sh_eth_bb_mdio_tristate,
  841. .set_mdio = sh_eth_bb_set_mdio,
  842. .get_mdio = sh_eth_bb_get_mdio,
  843. .set_mdc = sh_eth_bb_set_mdc,
  844. .delay = sh_eth_bb_delay,
  845. }
  846. };
  847. int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);