sh_eth.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. /*
  2. * sh_eth.c - Driver for Renesas ethernet controller.
  3. *
  4. * Copyright (C) 2008, 2011 Renesas Solutions Corp.
  5. * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
  6. * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
  7. * Copyright (C) 2013, 2014 Renesas Electronics Corporation
  8. *
  9. * SPDX-License-Identifier: GPL-2.0+
  10. */
  11. #include <config.h>
  12. #include <common.h>
  13. #include <malloc.h>
  14. #include <net.h>
  15. #include <netdev.h>
  16. #include <miiphy.h>
  17. #include <asm/errno.h>
  18. #include <asm/io.h>
  19. #include "sh_eth.h"
  20. #ifndef CONFIG_SH_ETHER_USE_PORT
  21. # error "Please define CONFIG_SH_ETHER_USE_PORT"
  22. #endif
  23. #ifndef CONFIG_SH_ETHER_PHY_ADDR
  24. # error "Please define CONFIG_SH_ETHER_PHY_ADDR"
  25. #endif
  26. #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF)
  27. #define flush_cache_wback(addr, len) \
  28. flush_dcache_range((u32)addr, (u32)(addr + len - 1))
  29. #else
  30. #define flush_cache_wback(...)
  31. #endif
  32. #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
  33. #define invalidate_cache(addr, len) \
  34. { \
  35. u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
  36. u32 start, end; \
  37. \
  38. start = (u32)addr; \
  39. end = start + len; \
  40. start &= ~(line_size - 1); \
  41. end = ((end + line_size - 1) & ~(line_size - 1)); \
  42. \
  43. invalidate_dcache_range(start, end); \
  44. }
  45. #else
  46. #define invalidate_cache(...)
  47. #endif
  48. #define TIMEOUT_CNT 1000
  49. int sh_eth_send(struct eth_device *dev, void *packet, int len)
  50. {
  51. struct sh_eth_dev *eth = dev->priv;
  52. int port = eth->port, ret = 0, timeout;
  53. struct sh_eth_info *port_info = &eth->port_info[port];
  54. if (!packet || len > 0xffff) {
  55. printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
  56. ret = -EINVAL;
  57. goto err;
  58. }
  59. /* packet must be a 4 byte boundary */
  60. if ((int)packet & 3) {
  61. printf(SHETHER_NAME ": %s: packet not 4 byte alligned\n"
  62. , __func__);
  63. ret = -EFAULT;
  64. goto err;
  65. }
  66. /* Update tx descriptor */
  67. flush_cache_wback(packet, len);
  68. port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
  69. port_info->tx_desc_cur->td1 = len << 16;
  70. /* Must preserve the end of descriptor list indication */
  71. if (port_info->tx_desc_cur->td0 & TD_TDLE)
  72. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
  73. else
  74. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
  75. flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
  76. /* Restart the transmitter if disabled */
  77. if (!(sh_eth_read(eth, EDTRR) & EDTRR_TRNS))
  78. sh_eth_write(eth, EDTRR_TRNS, EDTRR);
  79. /* Wait until packet is transmitted */
  80. timeout = TIMEOUT_CNT;
  81. do {
  82. invalidate_cache(port_info->tx_desc_cur,
  83. sizeof(struct tx_desc_s));
  84. udelay(100);
  85. } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
  86. if (timeout < 0) {
  87. printf(SHETHER_NAME ": transmit timeout\n");
  88. ret = -ETIMEDOUT;
  89. goto err;
  90. }
  91. port_info->tx_desc_cur++;
  92. if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
  93. port_info->tx_desc_cur = port_info->tx_desc_base;
  94. err:
  95. return ret;
  96. }
  97. int sh_eth_recv(struct eth_device *dev)
  98. {
  99. struct sh_eth_dev *eth = dev->priv;
  100. int port = eth->port, len = 0;
  101. struct sh_eth_info *port_info = &eth->port_info[port];
  102. uchar *packet;
  103. /* Check if the rx descriptor is ready */
  104. invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
  105. if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) {
  106. /* Check for errors */
  107. if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) {
  108. len = port_info->rx_desc_cur->rd1 & 0xffff;
  109. packet = (uchar *)
  110. ADDR_TO_P2(port_info->rx_desc_cur->rd2);
  111. invalidate_cache(packet, len);
  112. net_process_received_packet(packet, len);
  113. }
  114. /* Make current descriptor available again */
  115. if (port_info->rx_desc_cur->rd0 & RD_RDLE)
  116. port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
  117. else
  118. port_info->rx_desc_cur->rd0 = RD_RACT;
  119. flush_cache_wback(port_info->rx_desc_cur,
  120. sizeof(struct rx_desc_s));
  121. /* Point to the next descriptor */
  122. port_info->rx_desc_cur++;
  123. if (port_info->rx_desc_cur >=
  124. port_info->rx_desc_base + NUM_RX_DESC)
  125. port_info->rx_desc_cur = port_info->rx_desc_base;
  126. }
  127. /* Restart the receiver if disabled */
  128. if (!(sh_eth_read(eth, EDRRR) & EDRRR_R))
  129. sh_eth_write(eth, EDRRR_R, EDRRR);
  130. return len;
  131. }
  132. static int sh_eth_reset(struct sh_eth_dev *eth)
  133. {
  134. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  135. int ret = 0, i;
  136. /* Start e-dmac transmitter and receiver */
  137. sh_eth_write(eth, EDSR_ENALL, EDSR);
  138. /* Perform a software reset and wait for it to complete */
  139. sh_eth_write(eth, EDMR_SRST, EDMR);
  140. for (i = 0; i < TIMEOUT_CNT; i++) {
  141. if (!(sh_eth_read(eth, EDMR) & EDMR_SRST))
  142. break;
  143. udelay(1000);
  144. }
  145. if (i == TIMEOUT_CNT) {
  146. printf(SHETHER_NAME ": Software reset timeout\n");
  147. ret = -EIO;
  148. }
  149. return ret;
  150. #else
  151. sh_eth_write(eth, sh_eth_read(eth, EDMR) | EDMR_SRST, EDMR);
  152. udelay(3000);
  153. sh_eth_write(eth, sh_eth_read(eth, EDMR) & ~EDMR_SRST, EDMR);
  154. return 0;
  155. #endif
  156. }
  157. static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
  158. {
  159. int port = eth->port, i, ret = 0;
  160. u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
  161. struct sh_eth_info *port_info = &eth->port_info[port];
  162. struct tx_desc_s *cur_tx_desc;
  163. /*
  164. * Allocate rx descriptors. They must be aligned to size of struct
  165. * tx_desc_s.
  166. */
  167. port_info->tx_desc_alloc =
  168. memalign(sizeof(struct tx_desc_s), alloc_desc_size);
  169. if (!port_info->tx_desc_alloc) {
  170. printf(SHETHER_NAME ": memalign failed\n");
  171. ret = -ENOMEM;
  172. goto err;
  173. }
  174. flush_cache_wback((u32)port_info->tx_desc_alloc, alloc_desc_size);
  175. /* Make sure we use a P2 address (non-cacheable) */
  176. port_info->tx_desc_base =
  177. (struct tx_desc_s *)ADDR_TO_P2((u32)port_info->tx_desc_alloc);
  178. port_info->tx_desc_cur = port_info->tx_desc_base;
  179. /* Initialize all descriptors */
  180. for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
  181. cur_tx_desc++, i++) {
  182. cur_tx_desc->td0 = 0x00;
  183. cur_tx_desc->td1 = 0x00;
  184. cur_tx_desc->td2 = 0x00;
  185. }
  186. /* Mark the end of the descriptors */
  187. cur_tx_desc--;
  188. cur_tx_desc->td0 |= TD_TDLE;
  189. /* Point the controller to the tx descriptor list. Must use physical
  190. addresses */
  191. sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
  192. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  193. sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
  194. sh_eth_write(eth, ADDR_TO_PHY(cur_tx_desc), TDFXR);
  195. sh_eth_write(eth, 0x01, TDFFR);/* Last discriptor bit */
  196. #endif
  197. err:
  198. return ret;
  199. }
  200. static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
  201. {
  202. int port = eth->port, i , ret = 0;
  203. u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
  204. struct sh_eth_info *port_info = &eth->port_info[port];
  205. struct rx_desc_s *cur_rx_desc;
  206. u8 *rx_buf;
  207. /*
  208. * Allocate rx descriptors. They must be aligned to size of struct
  209. * rx_desc_s.
  210. */
  211. port_info->rx_desc_alloc =
  212. memalign(sizeof(struct rx_desc_s), alloc_desc_size);
  213. if (!port_info->rx_desc_alloc) {
  214. printf(SHETHER_NAME ": memalign failed\n");
  215. ret = -ENOMEM;
  216. goto err;
  217. }
  218. flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
  219. /* Make sure we use a P2 address (non-cacheable) */
  220. port_info->rx_desc_base =
  221. (struct rx_desc_s *)ADDR_TO_P2((u32)port_info->rx_desc_alloc);
  222. port_info->rx_desc_cur = port_info->rx_desc_base;
  223. /*
  224. * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
  225. * aligned and in P2 area.
  226. */
  227. port_info->rx_buf_alloc =
  228. memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
  229. if (!port_info->rx_buf_alloc) {
  230. printf(SHETHER_NAME ": alloc failed\n");
  231. ret = -ENOMEM;
  232. goto err_buf_alloc;
  233. }
  234. port_info->rx_buf_base = (u8 *)ADDR_TO_P2((u32)port_info->rx_buf_alloc);
  235. /* Initialize all descriptors */
  236. for (cur_rx_desc = port_info->rx_desc_base,
  237. rx_buf = port_info->rx_buf_base, i = 0;
  238. i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
  239. cur_rx_desc->rd0 = RD_RACT;
  240. cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
  241. cur_rx_desc->rd2 = (u32) ADDR_TO_PHY(rx_buf);
  242. }
  243. /* Mark the end of the descriptors */
  244. cur_rx_desc--;
  245. cur_rx_desc->rd0 |= RD_RDLE;
  246. /* Point the controller to the rx descriptor list */
  247. sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
  248. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  249. sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
  250. sh_eth_write(eth, ADDR_TO_PHY(cur_rx_desc), RDFXR);
  251. sh_eth_write(eth, RDFFR_RDLF, RDFFR);
  252. #endif
  253. return ret;
  254. err_buf_alloc:
  255. free(port_info->rx_desc_alloc);
  256. port_info->rx_desc_alloc = NULL;
  257. err:
  258. return ret;
  259. }
  260. static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
  261. {
  262. int port = eth->port;
  263. struct sh_eth_info *port_info = &eth->port_info[port];
  264. if (port_info->tx_desc_alloc) {
  265. free(port_info->tx_desc_alloc);
  266. port_info->tx_desc_alloc = NULL;
  267. }
  268. }
  269. static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
  270. {
  271. int port = eth->port;
  272. struct sh_eth_info *port_info = &eth->port_info[port];
  273. if (port_info->rx_desc_alloc) {
  274. free(port_info->rx_desc_alloc);
  275. port_info->rx_desc_alloc = NULL;
  276. }
  277. if (port_info->rx_buf_alloc) {
  278. free(port_info->rx_buf_alloc);
  279. port_info->rx_buf_alloc = NULL;
  280. }
  281. }
  282. static int sh_eth_desc_init(struct sh_eth_dev *eth)
  283. {
  284. int ret = 0;
  285. ret = sh_eth_tx_desc_init(eth);
  286. if (ret)
  287. goto err_tx_init;
  288. ret = sh_eth_rx_desc_init(eth);
  289. if (ret)
  290. goto err_rx_init;
  291. return ret;
  292. err_rx_init:
  293. sh_eth_tx_desc_free(eth);
  294. err_tx_init:
  295. return ret;
  296. }
  297. static int sh_eth_phy_config(struct sh_eth_dev *eth)
  298. {
  299. int port = eth->port, ret = 0;
  300. struct sh_eth_info *port_info = &eth->port_info[port];
  301. struct eth_device *dev = port_info->dev;
  302. struct phy_device *phydev;
  303. phydev = phy_connect(
  304. miiphy_get_dev_by_name(dev->name),
  305. port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
  306. port_info->phydev = phydev;
  307. phy_config(phydev);
  308. return ret;
  309. }
  310. static int sh_eth_config(struct sh_eth_dev *eth, bd_t *bd)
  311. {
  312. int port = eth->port, ret = 0;
  313. u32 val;
  314. struct sh_eth_info *port_info = &eth->port_info[port];
  315. struct eth_device *dev = port_info->dev;
  316. struct phy_device *phy;
  317. /* Configure e-dmac registers */
  318. sh_eth_write(eth, (sh_eth_read(eth, EDMR) & ~EMDR_DESC_R) |
  319. (EMDR_DESC | EDMR_EL), EDMR);
  320. sh_eth_write(eth, 0, EESIPR);
  321. sh_eth_write(eth, 0, TRSCER);
  322. sh_eth_write(eth, 0, TFTR);
  323. sh_eth_write(eth, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
  324. sh_eth_write(eth, RMCR_RST, RMCR);
  325. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  326. sh_eth_write(eth, 0, RPADIR);
  327. #endif
  328. sh_eth_write(eth, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
  329. /* Configure e-mac registers */
  330. sh_eth_write(eth, 0, ECSIPR);
  331. /* Set Mac address */
  332. val = dev->enetaddr[0] << 24 | dev->enetaddr[1] << 16 |
  333. dev->enetaddr[2] << 8 | dev->enetaddr[3];
  334. sh_eth_write(eth, val, MAHR);
  335. val = dev->enetaddr[4] << 8 | dev->enetaddr[5];
  336. sh_eth_write(eth, val, MALR);
  337. sh_eth_write(eth, RFLR_RFL_MIN, RFLR);
  338. #if defined(SH_ETH_TYPE_GETHER)
  339. sh_eth_write(eth, 0, PIPR);
  340. #endif
  341. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  342. sh_eth_write(eth, APR_AP, APR);
  343. sh_eth_write(eth, MPR_MP, MPR);
  344. sh_eth_write(eth, TPAUSER_TPAUSE, TPAUSER);
  345. #endif
  346. #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
  347. sh_eth_write(eth, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
  348. #elif defined(CONFIG_R8A7790) || defined(CONFIG_R8A7791) || \
  349. defined(CONFIG_R8A7793) || defined(CONFIG_R8A7794)
  350. sh_eth_write(eth, sh_eth_read(eth, RMIIMR) | 0x1, RMIIMR);
  351. #endif
  352. /* Configure phy */
  353. ret = sh_eth_phy_config(eth);
  354. if (ret) {
  355. printf(SHETHER_NAME ": phy config timeout\n");
  356. goto err_phy_cfg;
  357. }
  358. phy = port_info->phydev;
  359. ret = phy_startup(phy);
  360. if (ret) {
  361. printf(SHETHER_NAME ": phy startup failure\n");
  362. return ret;
  363. }
  364. val = 0;
  365. /* Set the transfer speed */
  366. if (phy->speed == 100) {
  367. printf(SHETHER_NAME ": 100Base/");
  368. #if defined(SH_ETH_TYPE_GETHER)
  369. sh_eth_write(eth, GECMR_100B, GECMR);
  370. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  371. sh_eth_write(eth, 1, RTRATE);
  372. #elif defined(CONFIG_CPU_SH7724) || defined(CONFIG_R8A7790) || \
  373. defined(CONFIG_R8A7791) || defined(CONFIG_R8A7793) || \
  374. defined(CONFIG_R8A7794)
  375. val = ECMR_RTM;
  376. #endif
  377. } else if (phy->speed == 10) {
  378. printf(SHETHER_NAME ": 10Base/");
  379. #if defined(SH_ETH_TYPE_GETHER)
  380. sh_eth_write(eth, GECMR_10B, GECMR);
  381. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  382. sh_eth_write(eth, 0, RTRATE);
  383. #endif
  384. }
  385. #if defined(SH_ETH_TYPE_GETHER)
  386. else if (phy->speed == 1000) {
  387. printf(SHETHER_NAME ": 1000Base/");
  388. sh_eth_write(eth, GECMR_1000B, GECMR);
  389. }
  390. #endif
  391. /* Check if full duplex mode is supported by the phy */
  392. if (phy->duplex) {
  393. printf("Full\n");
  394. sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE|ECMR_DM),
  395. ECMR);
  396. } else {
  397. printf("Half\n");
  398. sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE), ECMR);
  399. }
  400. return ret;
  401. err_phy_cfg:
  402. return ret;
  403. }
  404. static void sh_eth_start(struct sh_eth_dev *eth)
  405. {
  406. /*
  407. * Enable the e-dmac receiver only. The transmitter will be enabled when
  408. * we have something to transmit
  409. */
  410. sh_eth_write(eth, EDRRR_R, EDRRR);
  411. }
  412. static void sh_eth_stop(struct sh_eth_dev *eth)
  413. {
  414. sh_eth_write(eth, ~EDRRR_R, EDRRR);
  415. }
  416. int sh_eth_init(struct eth_device *dev, bd_t *bd)
  417. {
  418. int ret = 0;
  419. struct sh_eth_dev *eth = dev->priv;
  420. ret = sh_eth_reset(eth);
  421. if (ret)
  422. goto err;
  423. ret = sh_eth_desc_init(eth);
  424. if (ret)
  425. goto err;
  426. ret = sh_eth_config(eth, bd);
  427. if (ret)
  428. goto err_config;
  429. sh_eth_start(eth);
  430. return ret;
  431. err_config:
  432. sh_eth_tx_desc_free(eth);
  433. sh_eth_rx_desc_free(eth);
  434. err:
  435. return ret;
  436. }
  437. void sh_eth_halt(struct eth_device *dev)
  438. {
  439. struct sh_eth_dev *eth = dev->priv;
  440. sh_eth_stop(eth);
  441. }
  442. int sh_eth_initialize(bd_t *bd)
  443. {
  444. int ret = 0;
  445. struct sh_eth_dev *eth = NULL;
  446. struct eth_device *dev = NULL;
  447. eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
  448. if (!eth) {
  449. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  450. ret = -ENOMEM;
  451. goto err;
  452. }
  453. dev = (struct eth_device *)malloc(sizeof(struct eth_device));
  454. if (!dev) {
  455. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  456. ret = -ENOMEM;
  457. goto err;
  458. }
  459. memset(dev, 0, sizeof(struct eth_device));
  460. memset(eth, 0, sizeof(struct sh_eth_dev));
  461. eth->port = CONFIG_SH_ETHER_USE_PORT;
  462. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  463. dev->priv = (void *)eth;
  464. dev->iobase = 0;
  465. dev->init = sh_eth_init;
  466. dev->halt = sh_eth_halt;
  467. dev->send = sh_eth_send;
  468. dev->recv = sh_eth_recv;
  469. eth->port_info[eth->port].dev = dev;
  470. strcpy(dev->name, SHETHER_NAME);
  471. /* Register Device to EtherNet subsystem */
  472. eth_register(dev);
  473. bb_miiphy_buses[0].priv = eth;
  474. miiphy_register(dev->name, bb_miiphy_read, bb_miiphy_write);
  475. if (!eth_getenv_enetaddr("ethaddr", dev->enetaddr))
  476. puts("Please set MAC address\n");
  477. return ret;
  478. err:
  479. if (dev)
  480. free(dev);
  481. if (eth)
  482. free(eth);
  483. printf(SHETHER_NAME ": Failed\n");
  484. return ret;
  485. }
  486. /******* for bb_miiphy *******/
  487. static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
  488. {
  489. return 0;
  490. }
  491. static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
  492. {
  493. struct sh_eth_dev *eth = bus->priv;
  494. sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MMD, PIR);
  495. return 0;
  496. }
  497. static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
  498. {
  499. struct sh_eth_dev *eth = bus->priv;
  500. sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MMD, PIR);
  501. return 0;
  502. }
  503. static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
  504. {
  505. struct sh_eth_dev *eth = bus->priv;
  506. if (v)
  507. sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDO, PIR);
  508. else
  509. sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDO, PIR);
  510. return 0;
  511. }
  512. static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
  513. {
  514. struct sh_eth_dev *eth = bus->priv;
  515. *v = (sh_eth_read(eth, PIR) & PIR_MDI) >> 3;
  516. return 0;
  517. }
  518. static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
  519. {
  520. struct sh_eth_dev *eth = bus->priv;
  521. if (v)
  522. sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDC, PIR);
  523. else
  524. sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDC, PIR);
  525. return 0;
  526. }
  527. static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
  528. {
  529. udelay(10);
  530. return 0;
  531. }
  532. struct bb_miiphy_bus bb_miiphy_buses[] = {
  533. {
  534. .name = "sh_eth",
  535. .init = sh_eth_bb_init,
  536. .mdio_active = sh_eth_bb_mdio_active,
  537. .mdio_tristate = sh_eth_bb_mdio_tristate,
  538. .set_mdio = sh_eth_bb_set_mdio,
  539. .get_mdio = sh_eth_bb_get_mdio,
  540. .set_mdc = sh_eth_bb_set_mdc,
  541. .delay = sh_eth_bb_delay,
  542. }
  543. };
  544. int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);