cpsw.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294
  1. /*
  2. * CPSW Ethernet Switch Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <common.h>
  16. #include <command.h>
  17. #include <net.h>
  18. #include <miiphy.h>
  19. #include <malloc.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <cpsw.h>
  23. #include <asm/errno.h>
  24. #include <asm/io.h>
  25. #include <phy.h>
  26. #include <asm/arch/cpu.h>
  27. #include <dm.h>
  28. DECLARE_GLOBAL_DATA_PTR;
  29. #define BITMASK(bits) (BIT(bits) - 1)
  30. #define PHY_REG_MASK 0x1f
  31. #define PHY_ID_MASK 0x1f
  32. #define NUM_DESCS (PKTBUFSRX * 2)
  33. #define PKT_MIN 60
  34. #define PKT_MAX (1500 + 14 + 4 + 4)
  35. #define CLEAR_BIT 1
  36. #define GIGABITEN BIT(7)
  37. #define FULLDUPLEXEN BIT(0)
  38. #define MIIEN BIT(15)
  39. /* reg offset */
  40. #define CPSW_HOST_PORT_OFFSET 0x108
  41. #define CPSW_SLAVE0_OFFSET 0x208
  42. #define CPSW_SLAVE1_OFFSET 0x308
  43. #define CPSW_SLAVE_SIZE 0x100
  44. #define CPSW_CPDMA_OFFSET 0x800
  45. #define CPSW_HW_STATS 0x900
  46. #define CPSW_STATERAM_OFFSET 0xa00
  47. #define CPSW_CPTS_OFFSET 0xc00
  48. #define CPSW_ALE_OFFSET 0xd00
  49. #define CPSW_SLIVER0_OFFSET 0xd80
  50. #define CPSW_SLIVER1_OFFSET 0xdc0
  51. #define CPSW_BD_OFFSET 0x2000
  52. #define CPSW_MDIO_DIV 0xff
  53. #define AM335X_GMII_SEL_OFFSET 0x630
  54. /* DMA Registers */
  55. #define CPDMA_TXCONTROL 0x004
  56. #define CPDMA_RXCONTROL 0x014
  57. #define CPDMA_SOFTRESET 0x01c
  58. #define CPDMA_RXFREE 0x0e0
  59. #define CPDMA_TXHDP_VER1 0x100
  60. #define CPDMA_TXHDP_VER2 0x200
  61. #define CPDMA_RXHDP_VER1 0x120
  62. #define CPDMA_RXHDP_VER2 0x220
  63. #define CPDMA_TXCP_VER1 0x140
  64. #define CPDMA_TXCP_VER2 0x240
  65. #define CPDMA_RXCP_VER1 0x160
  66. #define CPDMA_RXCP_VER2 0x260
  67. /* Descriptor mode bits */
  68. #define CPDMA_DESC_SOP BIT(31)
  69. #define CPDMA_DESC_EOP BIT(30)
  70. #define CPDMA_DESC_OWNER BIT(29)
  71. #define CPDMA_DESC_EOQ BIT(28)
  72. /*
  73. * This timeout definition is a worst-case ultra defensive measure against
  74. * unexpected controller lock ups. Ideally, we should never ever hit this
  75. * scenario in practice.
  76. */
  77. #define MDIO_TIMEOUT 100 /* msecs */
  78. #define CPDMA_TIMEOUT 100 /* msecs */
  79. struct cpsw_mdio_regs {
  80. u32 version;
  81. u32 control;
  82. #define CONTROL_IDLE BIT(31)
  83. #define CONTROL_ENABLE BIT(30)
  84. u32 alive;
  85. u32 link;
  86. u32 linkintraw;
  87. u32 linkintmasked;
  88. u32 __reserved_0[2];
  89. u32 userintraw;
  90. u32 userintmasked;
  91. u32 userintmaskset;
  92. u32 userintmaskclr;
  93. u32 __reserved_1[20];
  94. struct {
  95. u32 access;
  96. u32 physel;
  97. #define USERACCESS_GO BIT(31)
  98. #define USERACCESS_WRITE BIT(30)
  99. #define USERACCESS_ACK BIT(29)
  100. #define USERACCESS_READ (0)
  101. #define USERACCESS_DATA (0xffff)
  102. } user[0];
  103. };
  104. struct cpsw_regs {
  105. u32 id_ver;
  106. u32 control;
  107. u32 soft_reset;
  108. u32 stat_port_en;
  109. u32 ptype;
  110. };
  111. struct cpsw_slave_regs {
  112. u32 max_blks;
  113. u32 blk_cnt;
  114. u32 flow_thresh;
  115. u32 port_vlan;
  116. u32 tx_pri_map;
  117. #ifdef CONFIG_AM33XX
  118. u32 gap_thresh;
  119. #elif defined(CONFIG_TI814X)
  120. u32 ts_ctl;
  121. u32 ts_seq_ltype;
  122. u32 ts_vlan;
  123. #endif
  124. u32 sa_lo;
  125. u32 sa_hi;
  126. };
  127. struct cpsw_host_regs {
  128. u32 max_blks;
  129. u32 blk_cnt;
  130. u32 flow_thresh;
  131. u32 port_vlan;
  132. u32 tx_pri_map;
  133. u32 cpdma_tx_pri_map;
  134. u32 cpdma_rx_chan_map;
  135. };
  136. struct cpsw_sliver_regs {
  137. u32 id_ver;
  138. u32 mac_control;
  139. u32 mac_status;
  140. u32 soft_reset;
  141. u32 rx_maxlen;
  142. u32 __reserved_0;
  143. u32 rx_pause;
  144. u32 tx_pause;
  145. u32 __reserved_1;
  146. u32 rx_pri_map;
  147. };
  148. #define ALE_ENTRY_BITS 68
  149. #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
  150. /* ALE Registers */
  151. #define ALE_CONTROL 0x08
  152. #define ALE_UNKNOWNVLAN 0x18
  153. #define ALE_TABLE_CONTROL 0x20
  154. #define ALE_TABLE 0x34
  155. #define ALE_PORTCTL 0x40
  156. #define ALE_TABLE_WRITE BIT(31)
  157. #define ALE_TYPE_FREE 0
  158. #define ALE_TYPE_ADDR 1
  159. #define ALE_TYPE_VLAN 2
  160. #define ALE_TYPE_VLAN_ADDR 3
  161. #define ALE_UCAST_PERSISTANT 0
  162. #define ALE_UCAST_UNTOUCHED 1
  163. #define ALE_UCAST_OUI 2
  164. #define ALE_UCAST_TOUCHED 3
  165. #define ALE_MCAST_FWD 0
  166. #define ALE_MCAST_BLOCK_LEARN_FWD 1
  167. #define ALE_MCAST_FWD_LEARN 2
  168. #define ALE_MCAST_FWD_2 3
  169. enum cpsw_ale_port_state {
  170. ALE_PORT_STATE_DISABLE = 0x00,
  171. ALE_PORT_STATE_BLOCK = 0x01,
  172. ALE_PORT_STATE_LEARN = 0x02,
  173. ALE_PORT_STATE_FORWARD = 0x03,
  174. };
  175. /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
  176. #define ALE_SECURE 1
  177. #define ALE_BLOCKED 2
  178. struct cpsw_slave {
  179. struct cpsw_slave_regs *regs;
  180. struct cpsw_sliver_regs *sliver;
  181. int slave_num;
  182. u32 mac_control;
  183. struct cpsw_slave_data *data;
  184. };
  185. struct cpdma_desc {
  186. /* hardware fields */
  187. u32 hw_next;
  188. u32 hw_buffer;
  189. u32 hw_len;
  190. u32 hw_mode;
  191. /* software fields */
  192. u32 sw_buffer;
  193. u32 sw_len;
  194. };
  195. struct cpdma_chan {
  196. struct cpdma_desc *head, *tail;
  197. void *hdp, *cp, *rxfree;
  198. };
  199. #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
  200. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  201. #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
  202. #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
  203. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  204. #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
  205. #define for_active_slave(slave, priv) \
  206. slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
  207. #define for_each_slave(slave, priv) \
  208. for (slave = (priv)->slaves; slave != (priv)->slaves + \
  209. (priv)->data.slaves; slave++)
  210. struct cpsw_priv {
  211. #ifdef CONFIG_DM_ETH
  212. struct udevice *dev;
  213. #else
  214. struct eth_device *dev;
  215. #endif
  216. struct cpsw_platform_data data;
  217. int host_port;
  218. struct cpsw_regs *regs;
  219. void *dma_regs;
  220. struct cpsw_host_regs *host_port_regs;
  221. void *ale_regs;
  222. struct cpdma_desc *descs;
  223. struct cpdma_desc *desc_free;
  224. struct cpdma_chan rx_chan, tx_chan;
  225. struct cpsw_slave *slaves;
  226. struct phy_device *phydev;
  227. struct mii_dev *bus;
  228. u32 phy_mask;
  229. };
  230. static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
  231. {
  232. int idx;
  233. idx = start / 32;
  234. start -= idx * 32;
  235. idx = 2 - idx; /* flip */
  236. return (ale_entry[idx] >> start) & BITMASK(bits);
  237. }
  238. static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
  239. u32 value)
  240. {
  241. int idx;
  242. value &= BITMASK(bits);
  243. idx = start / 32;
  244. start -= idx * 32;
  245. idx = 2 - idx; /* flip */
  246. ale_entry[idx] &= ~(BITMASK(bits) << start);
  247. ale_entry[idx] |= (value << start);
  248. }
  249. #define DEFINE_ALE_FIELD(name, start, bits) \
  250. static inline int cpsw_ale_get_##name(u32 *ale_entry) \
  251. { \
  252. return cpsw_ale_get_field(ale_entry, start, bits); \
  253. } \
  254. static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
  255. { \
  256. cpsw_ale_set_field(ale_entry, start, bits, value); \
  257. }
  258. DEFINE_ALE_FIELD(entry_type, 60, 2)
  259. DEFINE_ALE_FIELD(mcast_state, 62, 2)
  260. DEFINE_ALE_FIELD(port_mask, 66, 3)
  261. DEFINE_ALE_FIELD(ucast_type, 62, 2)
  262. DEFINE_ALE_FIELD(port_num, 66, 2)
  263. DEFINE_ALE_FIELD(blocked, 65, 1)
  264. DEFINE_ALE_FIELD(secure, 64, 1)
  265. DEFINE_ALE_FIELD(mcast, 40, 1)
  266. /* The MAC address field in the ALE entry cannot be macroized as above */
  267. static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
  268. {
  269. int i;
  270. for (i = 0; i < 6; i++)
  271. addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
  272. }
  273. static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
  274. {
  275. int i;
  276. for (i = 0; i < 6; i++)
  277. cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
  278. }
  279. static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  280. {
  281. int i;
  282. __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
  283. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  284. ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
  285. return idx;
  286. }
  287. static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  288. {
  289. int i;
  290. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  291. __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
  292. __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
  293. return idx;
  294. }
  295. static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
  296. {
  297. u32 ale_entry[ALE_ENTRY_WORDS];
  298. int type, idx;
  299. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  300. u8 entry_addr[6];
  301. cpsw_ale_read(priv, idx, ale_entry);
  302. type = cpsw_ale_get_entry_type(ale_entry);
  303. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  304. continue;
  305. cpsw_ale_get_addr(ale_entry, entry_addr);
  306. if (memcmp(entry_addr, addr, 6) == 0)
  307. return idx;
  308. }
  309. return -ENOENT;
  310. }
  311. static int cpsw_ale_match_free(struct cpsw_priv *priv)
  312. {
  313. u32 ale_entry[ALE_ENTRY_WORDS];
  314. int type, idx;
  315. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  316. cpsw_ale_read(priv, idx, ale_entry);
  317. type = cpsw_ale_get_entry_type(ale_entry);
  318. if (type == ALE_TYPE_FREE)
  319. return idx;
  320. }
  321. return -ENOENT;
  322. }
  323. static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
  324. {
  325. u32 ale_entry[ALE_ENTRY_WORDS];
  326. int type, idx;
  327. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  328. cpsw_ale_read(priv, idx, ale_entry);
  329. type = cpsw_ale_get_entry_type(ale_entry);
  330. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  331. continue;
  332. if (cpsw_ale_get_mcast(ale_entry))
  333. continue;
  334. type = cpsw_ale_get_ucast_type(ale_entry);
  335. if (type != ALE_UCAST_PERSISTANT &&
  336. type != ALE_UCAST_OUI)
  337. return idx;
  338. }
  339. return -ENOENT;
  340. }
  341. static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
  342. int port, int flags)
  343. {
  344. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  345. int idx;
  346. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  347. cpsw_ale_set_addr(ale_entry, addr);
  348. cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
  349. cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
  350. cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
  351. cpsw_ale_set_port_num(ale_entry, port);
  352. idx = cpsw_ale_match_addr(priv, addr);
  353. if (idx < 0)
  354. idx = cpsw_ale_match_free(priv);
  355. if (idx < 0)
  356. idx = cpsw_ale_find_ageable(priv);
  357. if (idx < 0)
  358. return -ENOMEM;
  359. cpsw_ale_write(priv, idx, ale_entry);
  360. return 0;
  361. }
  362. static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
  363. int port_mask)
  364. {
  365. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  366. int idx, mask;
  367. idx = cpsw_ale_match_addr(priv, addr);
  368. if (idx >= 0)
  369. cpsw_ale_read(priv, idx, ale_entry);
  370. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  371. cpsw_ale_set_addr(ale_entry, addr);
  372. cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
  373. mask = cpsw_ale_get_port_mask(ale_entry);
  374. port_mask |= mask;
  375. cpsw_ale_set_port_mask(ale_entry, port_mask);
  376. if (idx < 0)
  377. idx = cpsw_ale_match_free(priv);
  378. if (idx < 0)
  379. idx = cpsw_ale_find_ageable(priv);
  380. if (idx < 0)
  381. return -ENOMEM;
  382. cpsw_ale_write(priv, idx, ale_entry);
  383. return 0;
  384. }
  385. static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
  386. {
  387. u32 tmp, mask = BIT(bit);
  388. tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
  389. tmp &= ~mask;
  390. tmp |= val ? mask : 0;
  391. __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
  392. }
  393. #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
  394. #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
  395. #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
  396. static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
  397. int val)
  398. {
  399. int offset = ALE_PORTCTL + 4 * port;
  400. u32 tmp, mask = 0x3;
  401. tmp = __raw_readl(priv->ale_regs + offset);
  402. tmp &= ~mask;
  403. tmp |= val & mask;
  404. __raw_writel(tmp, priv->ale_regs + offset);
  405. }
  406. static struct cpsw_mdio_regs *mdio_regs;
  407. /* wait until hardware is ready for another user access */
  408. static inline u32 wait_for_user_access(void)
  409. {
  410. u32 reg = 0;
  411. int timeout = MDIO_TIMEOUT;
  412. while (timeout-- &&
  413. ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
  414. udelay(10);
  415. if (timeout == -1) {
  416. printf("wait_for_user_access Timeout\n");
  417. return -ETIMEDOUT;
  418. }
  419. return reg;
  420. }
  421. /* wait until hardware state machine is idle */
  422. static inline void wait_for_idle(void)
  423. {
  424. int timeout = MDIO_TIMEOUT;
  425. while (timeout-- &&
  426. ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
  427. udelay(10);
  428. if (timeout == -1)
  429. printf("wait_for_idle Timeout\n");
  430. }
  431. static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
  432. int dev_addr, int phy_reg)
  433. {
  434. int data;
  435. u32 reg;
  436. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  437. return -EINVAL;
  438. wait_for_user_access();
  439. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  440. (phy_id << 16));
  441. __raw_writel(reg, &mdio_regs->user[0].access);
  442. reg = wait_for_user_access();
  443. data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
  444. return data;
  445. }
  446. static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
  447. int phy_reg, u16 data)
  448. {
  449. u32 reg;
  450. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  451. return -EINVAL;
  452. wait_for_user_access();
  453. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  454. (phy_id << 16) | (data & USERACCESS_DATA));
  455. __raw_writel(reg, &mdio_regs->user[0].access);
  456. wait_for_user_access();
  457. return 0;
  458. }
  459. static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
  460. {
  461. struct mii_dev *bus = mdio_alloc();
  462. mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
  463. /* set enable and clock divider */
  464. __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
  465. /*
  466. * wait for scan logic to settle:
  467. * the scan time consists of (a) a large fixed component, and (b) a
  468. * small component that varies with the mii bus frequency. These
  469. * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
  470. * silicon. Since the effect of (b) was found to be largely
  471. * negligible, we keep things simple here.
  472. */
  473. udelay(1000);
  474. bus->read = cpsw_mdio_read;
  475. bus->write = cpsw_mdio_write;
  476. strcpy(bus->name, name);
  477. mdio_register(bus);
  478. }
  479. /* Set a self-clearing bit in a register, and wait for it to clear */
  480. static inline void setbit_and_wait_for_clear32(void *addr)
  481. {
  482. __raw_writel(CLEAR_BIT, addr);
  483. while (__raw_readl(addr) & CLEAR_BIT)
  484. ;
  485. }
  486. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  487. ((mac)[2] << 16) | ((mac)[3] << 24))
  488. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  489. static void cpsw_set_slave_mac(struct cpsw_slave *slave,
  490. struct cpsw_priv *priv)
  491. {
  492. #ifdef CONFIG_DM_ETH
  493. struct eth_pdata *pdata = dev_get_platdata(priv->dev);
  494. writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
  495. writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
  496. #else
  497. __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
  498. __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
  499. #endif
  500. }
  501. static void cpsw_slave_update_link(struct cpsw_slave *slave,
  502. struct cpsw_priv *priv, int *link)
  503. {
  504. struct phy_device *phy;
  505. u32 mac_control = 0;
  506. phy = priv->phydev;
  507. if (!phy)
  508. return;
  509. phy_startup(phy);
  510. *link = phy->link;
  511. if (*link) { /* link up */
  512. mac_control = priv->data.mac_control;
  513. if (phy->speed == 1000)
  514. mac_control |= GIGABITEN;
  515. if (phy->duplex == DUPLEX_FULL)
  516. mac_control |= FULLDUPLEXEN;
  517. if (phy->speed == 100)
  518. mac_control |= MIIEN;
  519. }
  520. if (mac_control == slave->mac_control)
  521. return;
  522. if (mac_control) {
  523. printf("link up on port %d, speed %d, %s duplex\n",
  524. slave->slave_num, phy->speed,
  525. (phy->duplex == DUPLEX_FULL) ? "full" : "half");
  526. } else {
  527. printf("link down on port %d\n", slave->slave_num);
  528. }
  529. __raw_writel(mac_control, &slave->sliver->mac_control);
  530. slave->mac_control = mac_control;
  531. }
  532. static int cpsw_update_link(struct cpsw_priv *priv)
  533. {
  534. int link = 0;
  535. struct cpsw_slave *slave;
  536. for_active_slave(slave, priv)
  537. cpsw_slave_update_link(slave, priv, &link);
  538. return link;
  539. }
  540. static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
  541. {
  542. if (priv->host_port == 0)
  543. return slave_num + 1;
  544. else
  545. return slave_num;
  546. }
  547. static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
  548. {
  549. u32 slave_port;
  550. setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
  551. /* setup priority mapping */
  552. __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
  553. __raw_writel(0x33221100, &slave->regs->tx_pri_map);
  554. /* setup max packet size, and mac address */
  555. __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
  556. cpsw_set_slave_mac(slave, priv);
  557. slave->mac_control = 0; /* no link yet */
  558. /* enable forwarding */
  559. slave_port = cpsw_get_slave_port(priv, slave->slave_num);
  560. cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
  561. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
  562. priv->phy_mask |= 1 << slave->data->phy_addr;
  563. }
  564. static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
  565. {
  566. struct cpdma_desc *desc = priv->desc_free;
  567. if (desc)
  568. priv->desc_free = desc_read_ptr(desc, hw_next);
  569. return desc;
  570. }
  571. static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
  572. {
  573. if (desc) {
  574. desc_write(desc, hw_next, priv->desc_free);
  575. priv->desc_free = desc;
  576. }
  577. }
  578. static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
  579. void *buffer, int len)
  580. {
  581. struct cpdma_desc *desc, *prev;
  582. u32 mode;
  583. desc = cpdma_desc_alloc(priv);
  584. if (!desc)
  585. return -ENOMEM;
  586. if (len < PKT_MIN)
  587. len = PKT_MIN;
  588. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  589. desc_write(desc, hw_next, 0);
  590. desc_write(desc, hw_buffer, buffer);
  591. desc_write(desc, hw_len, len);
  592. desc_write(desc, hw_mode, mode | len);
  593. desc_write(desc, sw_buffer, buffer);
  594. desc_write(desc, sw_len, len);
  595. if (!chan->head) {
  596. /* simple case - first packet enqueued */
  597. chan->head = desc;
  598. chan->tail = desc;
  599. chan_write(chan, hdp, desc);
  600. goto done;
  601. }
  602. /* not the first packet - enqueue at the tail */
  603. prev = chan->tail;
  604. desc_write(prev, hw_next, desc);
  605. chan->tail = desc;
  606. /* next check if EOQ has been triggered already */
  607. if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
  608. chan_write(chan, hdp, desc);
  609. done:
  610. if (chan->rxfree)
  611. chan_write(chan, rxfree, 1);
  612. return 0;
  613. }
  614. static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
  615. void **buffer, int *len)
  616. {
  617. struct cpdma_desc *desc = chan->head;
  618. u32 status;
  619. if (!desc)
  620. return -ENOENT;
  621. status = desc_read(desc, hw_mode);
  622. if (len)
  623. *len = status & 0x7ff;
  624. if (buffer)
  625. *buffer = desc_read_ptr(desc, sw_buffer);
  626. if (status & CPDMA_DESC_OWNER) {
  627. if (chan_read(chan, hdp) == 0) {
  628. if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
  629. chan_write(chan, hdp, desc);
  630. }
  631. return -EBUSY;
  632. }
  633. chan->head = desc_read_ptr(desc, hw_next);
  634. chan_write(chan, cp, desc);
  635. cpdma_desc_free(priv, desc);
  636. return 0;
  637. }
  638. static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
  639. {
  640. struct cpsw_slave *slave;
  641. int i, ret;
  642. /* soft reset the controller and initialize priv */
  643. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  644. /* initialize and reset the address lookup engine */
  645. cpsw_ale_enable(priv, 1);
  646. cpsw_ale_clear(priv, 1);
  647. cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
  648. /* setup host port priority mapping */
  649. __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
  650. __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
  651. /* disable priority elevation and enable statistics on all ports */
  652. __raw_writel(0, &priv->regs->ptype);
  653. /* enable statistics collection only on the host port */
  654. __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
  655. __raw_writel(0x7, &priv->regs->stat_port_en);
  656. cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
  657. cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
  658. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
  659. for_active_slave(slave, priv)
  660. cpsw_slave_init(slave, priv);
  661. cpsw_update_link(priv);
  662. /* init descriptor pool */
  663. for (i = 0; i < NUM_DESCS; i++) {
  664. desc_write(&priv->descs[i], hw_next,
  665. (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
  666. }
  667. priv->desc_free = &priv->descs[0];
  668. /* initialize channels */
  669. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  670. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  671. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
  672. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
  673. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  674. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  675. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
  676. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
  677. } else {
  678. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  679. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
  680. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
  681. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  682. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  683. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
  684. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
  685. }
  686. /* clear dma state */
  687. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  688. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  689. for (i = 0; i < priv->data.channels; i++) {
  690. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
  691. * i);
  692. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  693. * i);
  694. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
  695. * i);
  696. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
  697. * i);
  698. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
  699. * i);
  700. }
  701. } else {
  702. for (i = 0; i < priv->data.channels; i++) {
  703. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
  704. * i);
  705. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  706. * i);
  707. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
  708. * i);
  709. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
  710. * i);
  711. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
  712. * i);
  713. }
  714. }
  715. __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
  716. __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
  717. /* submit rx descs */
  718. for (i = 0; i < PKTBUFSRX; i++) {
  719. ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
  720. PKTSIZE);
  721. if (ret < 0) {
  722. printf("error %d submitting rx desc\n", ret);
  723. break;
  724. }
  725. }
  726. return 0;
  727. }
  728. static void _cpsw_halt(struct cpsw_priv *priv)
  729. {
  730. writel(0, priv->dma_regs + CPDMA_TXCONTROL);
  731. writel(0, priv->dma_regs + CPDMA_RXCONTROL);
  732. /* soft reset the controller and initialize priv */
  733. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  734. /* clear dma state */
  735. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  736. }
  737. static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
  738. {
  739. void *buffer;
  740. int len;
  741. int timeout = CPDMA_TIMEOUT;
  742. flush_dcache_range((unsigned long)packet,
  743. (unsigned long)packet + length);
  744. /* first reap completed packets */
  745. while (timeout-- &&
  746. (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
  747. ;
  748. if (timeout == -1) {
  749. printf("cpdma_process timeout\n");
  750. return -ETIMEDOUT;
  751. }
  752. return cpdma_submit(priv, &priv->tx_chan, packet, length);
  753. }
  754. static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
  755. {
  756. void *buffer;
  757. int len;
  758. int ret = -EAGAIN;
  759. ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
  760. if (ret < 0)
  761. return ret;
  762. invalidate_dcache_range((unsigned long)buffer,
  763. (unsigned long)buffer + PKTSIZE_ALIGN);
  764. *pkt = buffer;
  765. return len;
  766. }
  767. static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
  768. struct cpsw_priv *priv)
  769. {
  770. void *regs = priv->regs;
  771. struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
  772. slave->slave_num = slave_num;
  773. slave->data = data;
  774. slave->regs = regs + data->slave_reg_ofs;
  775. slave->sliver = regs + data->sliver_reg_ofs;
  776. }
  777. static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
  778. {
  779. struct phy_device *phydev;
  780. u32 supported = PHY_GBIT_FEATURES;
  781. phydev = phy_connect(priv->bus,
  782. slave->data->phy_addr,
  783. priv->dev,
  784. slave->data->phy_if);
  785. if (!phydev)
  786. return -1;
  787. phydev->supported &= supported;
  788. phydev->advertising = phydev->supported;
  789. priv->phydev = phydev;
  790. phy_config(phydev);
  791. return 1;
  792. }
  793. int _cpsw_register(struct cpsw_priv *priv)
  794. {
  795. struct cpsw_slave *slave;
  796. struct cpsw_platform_data *data = &priv->data;
  797. void *regs = (void *)data->cpsw_base;
  798. priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
  799. if (!priv->slaves) {
  800. return -ENOMEM;
  801. }
  802. priv->host_port = data->host_port_num;
  803. priv->regs = regs;
  804. priv->host_port_regs = regs + data->host_port_reg_ofs;
  805. priv->dma_regs = regs + data->cpdma_reg_ofs;
  806. priv->ale_regs = regs + data->ale_reg_ofs;
  807. priv->descs = (void *)regs + data->bd_ram_ofs;
  808. int idx = 0;
  809. for_each_slave(slave, priv) {
  810. cpsw_slave_setup(slave, idx, priv);
  811. idx = idx + 1;
  812. }
  813. cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
  814. priv->bus = miiphy_get_dev_by_name(priv->dev->name);
  815. for_active_slave(slave, priv)
  816. cpsw_phy_init(priv, slave);
  817. return 0;
  818. }
  819. #ifndef CONFIG_DM_ETH
  820. static int cpsw_init(struct eth_device *dev, bd_t *bis)
  821. {
  822. struct cpsw_priv *priv = dev->priv;
  823. return _cpsw_init(priv, dev->enetaddr);
  824. }
  825. static void cpsw_halt(struct eth_device *dev)
  826. {
  827. struct cpsw_priv *priv = dev->priv;
  828. return _cpsw_halt(priv);
  829. }
  830. static int cpsw_send(struct eth_device *dev, void *packet, int length)
  831. {
  832. struct cpsw_priv *priv = dev->priv;
  833. return _cpsw_send(priv, packet, length);
  834. }
  835. static int cpsw_recv(struct eth_device *dev)
  836. {
  837. struct cpsw_priv *priv = dev->priv;
  838. uchar *pkt = NULL;
  839. int len;
  840. len = _cpsw_recv(priv, &pkt);
  841. if (len > 0) {
  842. net_process_received_packet(pkt, len);
  843. cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
  844. }
  845. return len;
  846. }
  847. int cpsw_register(struct cpsw_platform_data *data)
  848. {
  849. struct cpsw_priv *priv;
  850. struct eth_device *dev;
  851. int ret;
  852. dev = calloc(sizeof(*dev), 1);
  853. if (!dev)
  854. return -ENOMEM;
  855. priv = calloc(sizeof(*priv), 1);
  856. if (!priv) {
  857. free(dev);
  858. return -ENOMEM;
  859. }
  860. priv->dev = dev;
  861. priv->data = *data;
  862. strcpy(dev->name, "cpsw");
  863. dev->iobase = 0;
  864. dev->init = cpsw_init;
  865. dev->halt = cpsw_halt;
  866. dev->send = cpsw_send;
  867. dev->recv = cpsw_recv;
  868. dev->priv = priv;
  869. eth_register(dev);
  870. ret = _cpsw_register(priv);
  871. if (ret < 0) {
  872. eth_unregister(dev);
  873. free(dev);
  874. free(priv);
  875. return ret;
  876. }
  877. return 1;
  878. }
  879. #else
  880. static int cpsw_eth_start(struct udevice *dev)
  881. {
  882. struct eth_pdata *pdata = dev_get_platdata(dev);
  883. struct cpsw_priv *priv = dev_get_priv(dev);
  884. return _cpsw_init(priv, pdata->enetaddr);
  885. }
  886. static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
  887. {
  888. struct cpsw_priv *priv = dev_get_priv(dev);
  889. return _cpsw_send(priv, packet, length);
  890. }
  891. static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
  892. {
  893. struct cpsw_priv *priv = dev_get_priv(dev);
  894. return _cpsw_recv(priv, packetp);
  895. }
  896. static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
  897. int length)
  898. {
  899. struct cpsw_priv *priv = dev_get_priv(dev);
  900. return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
  901. }
  902. static void cpsw_eth_stop(struct udevice *dev)
  903. {
  904. struct cpsw_priv *priv = dev_get_priv(dev);
  905. return _cpsw_halt(priv);
  906. }
  907. static int cpsw_eth_probe(struct udevice *dev)
  908. {
  909. struct cpsw_priv *priv = dev_get_priv(dev);
  910. priv->dev = dev;
  911. return _cpsw_register(priv);
  912. }
  913. static const struct eth_ops cpsw_eth_ops = {
  914. .start = cpsw_eth_start,
  915. .send = cpsw_eth_send,
  916. .recv = cpsw_eth_recv,
  917. .free_pkt = cpsw_eth_free_pkt,
  918. .stop = cpsw_eth_stop,
  919. };
  920. static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
  921. {
  922. struct eth_pdata *pdata = dev_get_platdata(dev);
  923. struct cpsw_priv *priv = dev_get_priv(dev);
  924. const char *phy_mode;
  925. const void *fdt = gd->fdt_blob;
  926. int node = dev->of_offset;
  927. int subnode;
  928. int slave_index = 0;
  929. uint32_t mac_hi, mac_lo;
  930. fdt32_t gmii = 0;
  931. int active_slave;
  932. pdata->iobase = dev_get_addr(dev);
  933. priv->data.version = CPSW_CTRL_VERSION_2;
  934. priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
  935. priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
  936. priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
  937. priv->data.mdio_div = CPSW_MDIO_DIV;
  938. priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
  939. pdata->phy_interface = -1;
  940. priv->data.cpsw_base = pdata->iobase;
  941. priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
  942. if (priv->data.channels <= 0) {
  943. printf("error: cpdma_channels not found in dt\n");
  944. return -ENOENT;
  945. }
  946. priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
  947. if (priv->data.slaves <= 0) {
  948. printf("error: slaves not found in dt\n");
  949. return -ENOENT;
  950. }
  951. priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
  952. priv->data.slaves);
  953. priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
  954. if (priv->data.ale_entries <= 0) {
  955. printf("error: ale_entries not found in dt\n");
  956. return -ENOENT;
  957. }
  958. priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
  959. if (priv->data.bd_ram_ofs <= 0) {
  960. printf("error: bd_ram_size not found in dt\n");
  961. return -ENOENT;
  962. }
  963. priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
  964. if (priv->data.mac_control <= 0) {
  965. printf("error: ale_entries not found in dt\n");
  966. return -ENOENT;
  967. }
  968. active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
  969. priv->data.active_slave = active_slave;
  970. fdt_for_each_subnode(fdt, subnode, node) {
  971. int len;
  972. const char *name;
  973. name = fdt_get_name(fdt, subnode, &len);
  974. if (!strncmp(name, "mdio", 4)) {
  975. priv->data.mdio_base = fdtdec_get_addr(fdt, subnode,
  976. "reg");
  977. }
  978. if (!strncmp(name, "slave", 5)) {
  979. u32 phy_id[2];
  980. if (slave_index >= priv->data.slaves) {
  981. printf("error: num slaves and slave nodes did not match\n");
  982. return -EINVAL;
  983. }
  984. phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
  985. if (phy_mode)
  986. priv->data.slave_data[slave_index].phy_if =
  987. phy_get_interface_by_name(phy_mode);
  988. fdtdec_get_int_array(fdt, subnode, "phy_id", phy_id, 2);
  989. priv->data.slave_data[slave_index].phy_addr = phy_id[1];
  990. slave_index++;
  991. }
  992. if (!strncmp(name, "cpsw-phy-sel", 12)) {
  993. priv->data.gmii_sel = fdtdec_get_addr(fdt, subnode,
  994. "reg");
  995. }
  996. }
  997. priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
  998. priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
  999. if (priv->data.slaves == 2) {
  1000. priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
  1001. priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
  1002. }
  1003. subnode = fdtdec_lookup_phandle(fdt, node, "syscon");
  1004. priv->data.mac_id = fdt_translate_address((void *)fdt, subnode, &gmii);
  1005. priv->data.mac_id += AM335X_GMII_SEL_OFFSET;
  1006. priv->data.mac_id += active_slave * 8;
  1007. /* try reading mac address from efuse */
  1008. mac_lo = readl(priv->data.mac_id);
  1009. mac_hi = readl(priv->data.mac_id + 4);
  1010. pdata->enetaddr[0] = mac_hi & 0xFF;
  1011. pdata->enetaddr[1] = (mac_hi & 0xFF00) >> 8;
  1012. pdata->enetaddr[2] = (mac_hi & 0xFF0000) >> 16;
  1013. pdata->enetaddr[3] = (mac_hi & 0xFF000000) >> 24;
  1014. pdata->enetaddr[4] = mac_lo & 0xFF;
  1015. pdata->enetaddr[5] = (mac_lo & 0xFF00) >> 8;
  1016. pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
  1017. if (pdata->phy_interface == -1) {
  1018. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  1019. return -EINVAL;
  1020. }
  1021. switch (pdata->phy_interface) {
  1022. case PHY_INTERFACE_MODE_MII:
  1023. writel(MII_MODE_ENABLE, priv->data.gmii_sel);
  1024. break;
  1025. case PHY_INTERFACE_MODE_RMII:
  1026. writel(RMII_MODE_ENABLE, priv->data.gmii_sel);
  1027. break;
  1028. case PHY_INTERFACE_MODE_RGMII:
  1029. case PHY_INTERFACE_MODE_RGMII_ID:
  1030. case PHY_INTERFACE_MODE_RGMII_RXID:
  1031. case PHY_INTERFACE_MODE_RGMII_TXID:
  1032. writel(RGMII_MODE_ENABLE, priv->data.gmii_sel);
  1033. break;
  1034. }
  1035. return 0;
  1036. }
  1037. static const struct udevice_id cpsw_eth_ids[] = {
  1038. { .compatible = "ti,cpsw" },
  1039. { .compatible = "ti,am335x-cpsw" },
  1040. { }
  1041. };
  1042. U_BOOT_DRIVER(eth_cpsw) = {
  1043. .name = "eth_cpsw",
  1044. .id = UCLASS_ETH,
  1045. .of_match = cpsw_eth_ids,
  1046. .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
  1047. .probe = cpsw_eth_probe,
  1048. .ops = &cpsw_eth_ops,
  1049. .priv_auto_alloc_size = sizeof(struct cpsw_priv),
  1050. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  1051. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  1052. };
  1053. #endif /* CONFIG_DM_ETH */