acenic.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. #ifndef _ACENIC_H_
  2. #define _ACENIC_H_
  3. /*
  4. * Generate TX index update each time, when TX ring is closed.
  5. * Normally, this is not useful, because results in more dma (and irqs
  6. * without TX_COAL_INTS_ONLY).
  7. */
  8. #define USE_TX_COAL_NOW 0
  9. /*
  10. * Addressing:
  11. *
  12. * The Tigon uses 64-bit host addresses, regardless of their actual
  13. * length, and it expects a big-endian format. For 32 bit systems the
  14. * upper 32 bits of the address are simply ignored (zero), however for
  15. * little endian 64 bit systems (Alpha) this looks strange with the
  16. * two parts of the address word being swapped.
  17. *
  18. * The addresses are split in two 32 bit words for all architectures
  19. * as some of them are in PCI shared memory and it is necessary to use
  20. * readl/writel to access them.
  21. *
  22. * The addressing code is derived from Pete Wyckoff's work, but
  23. * modified to deal properly with readl/writel usage.
  24. */
  25. struct ace_regs {
  26. u32 pad0[16]; /* PCI control registers */
  27. u32 HostCtrl; /* 0x40 */
  28. u32 LocalCtrl;
  29. u32 pad1[2];
  30. u32 MiscCfg; /* 0x50 */
  31. u32 pad2[2];
  32. u32 PciState;
  33. u32 pad3[2]; /* 0x60 */
  34. u32 WinBase;
  35. u32 WinData;
  36. u32 pad4[12]; /* 0x70 */
  37. u32 DmaWriteState; /* 0xa0 */
  38. u32 pad5[3];
  39. u32 DmaReadState; /* 0xb0 */
  40. u32 pad6[26];
  41. u32 AssistState;
  42. u32 pad7[8]; /* 0x120 */
  43. u32 CpuCtrl; /* 0x140 */
  44. u32 Pc;
  45. u32 pad8[3];
  46. u32 SramAddr; /* 0x154 */
  47. u32 SramData;
  48. u32 pad9[49];
  49. u32 MacRxState; /* 0x220 */
  50. u32 pad10[7];
  51. u32 CpuBCtrl; /* 0x240 */
  52. u32 PcB;
  53. u32 pad11[3];
  54. u32 SramBAddr; /* 0x254 */
  55. u32 SramBData;
  56. u32 pad12[105];
  57. u32 pad13[32]; /* 0x400 */
  58. u32 Stats[32];
  59. u32 Mb0Hi; /* 0x500 */
  60. u32 Mb0Lo;
  61. u32 Mb1Hi;
  62. u32 CmdPrd;
  63. u32 Mb2Hi;
  64. u32 TxPrd;
  65. u32 Mb3Hi;
  66. u32 RxStdPrd;
  67. u32 Mb4Hi;
  68. u32 RxJumboPrd;
  69. u32 Mb5Hi;
  70. u32 RxMiniPrd;
  71. u32 Mb6Hi;
  72. u32 Mb6Lo;
  73. u32 Mb7Hi;
  74. u32 Mb7Lo;
  75. u32 Mb8Hi;
  76. u32 Mb8Lo;
  77. u32 Mb9Hi;
  78. u32 Mb9Lo;
  79. u32 MbAHi;
  80. u32 MbALo;
  81. u32 MbBHi;
  82. u32 MbBLo;
  83. u32 MbCHi;
  84. u32 MbCLo;
  85. u32 MbDHi;
  86. u32 MbDLo;
  87. u32 MbEHi;
  88. u32 MbELo;
  89. u32 MbFHi;
  90. u32 MbFLo;
  91. u32 pad14[32];
  92. u32 MacAddrHi; /* 0x600 */
  93. u32 MacAddrLo;
  94. u32 InfoPtrHi;
  95. u32 InfoPtrLo;
  96. u32 MultiCastHi; /* 0x610 */
  97. u32 MultiCastLo;
  98. u32 ModeStat;
  99. u32 DmaReadCfg;
  100. u32 DmaWriteCfg; /* 0x620 */
  101. u32 TxBufRat;
  102. u32 EvtCsm;
  103. u32 CmdCsm;
  104. u32 TuneRxCoalTicks;/* 0x630 */
  105. u32 TuneTxCoalTicks;
  106. u32 TuneStatTicks;
  107. u32 TuneMaxTxDesc;
  108. u32 TuneMaxRxDesc; /* 0x640 */
  109. u32 TuneTrace;
  110. u32 TuneLink;
  111. u32 TuneFastLink;
  112. u32 TracePtr; /* 0x650 */
  113. u32 TraceStrt;
  114. u32 TraceLen;
  115. u32 IfIdx;
  116. u32 IfMtu; /* 0x660 */
  117. u32 MaskInt;
  118. u32 GigLnkState;
  119. u32 FastLnkState;
  120. u32 pad16[4]; /* 0x670 */
  121. u32 RxRetCsm; /* 0x680 */
  122. u32 pad17[31];
  123. u32 CmdRng[64]; /* 0x700 */
  124. u32 Window[0x200];
  125. };
  126. typedef struct {
  127. u32 addrhi;
  128. u32 addrlo;
  129. } aceaddr;
  130. #define ACE_WINDOW_SIZE 0x800
  131. #define ACE_JUMBO_MTU 9000
  132. #define ACE_STD_MTU 1500
  133. #define ACE_TRACE_SIZE 0x8000
  134. /*
  135. * Host control register bits.
  136. */
  137. #define IN_INT 0x01
  138. #define CLR_INT 0x02
  139. #define HW_RESET 0x08
  140. #define BYTE_SWAP 0x10
  141. #define WORD_SWAP 0x20
  142. #define MASK_INTS 0x40
  143. /*
  144. * Local control register bits.
  145. */
  146. #define EEPROM_DATA_IN 0x800000
  147. #define EEPROM_DATA_OUT 0x400000
  148. #define EEPROM_WRITE_ENABLE 0x200000
  149. #define EEPROM_CLK_OUT 0x100000
  150. #define EEPROM_BASE 0xa0000000
  151. #define EEPROM_WRITE_SELECT 0xa0
  152. #define EEPROM_READ_SELECT 0xa1
  153. #define SRAM_BANK_512K 0x200
  154. /*
  155. * udelay() values for when clocking the eeprom
  156. */
  157. #define ACE_SHORT_DELAY 2
  158. #define ACE_LONG_DELAY 4
  159. /*
  160. * Misc Config bits
  161. */
  162. #define SYNC_SRAM_TIMING 0x100000
  163. /*
  164. * CPU state bits.
  165. */
  166. #define CPU_RESET 0x01
  167. #define CPU_TRACE 0x02
  168. #define CPU_PROM_FAILED 0x10
  169. #define CPU_HALT 0x00010000
  170. #define CPU_HALTED 0xffff0000
  171. /*
  172. * PCI State bits.
  173. */
  174. #define DMA_READ_MAX_4 0x04
  175. #define DMA_READ_MAX_16 0x08
  176. #define DMA_READ_MAX_32 0x0c
  177. #define DMA_READ_MAX_64 0x10
  178. #define DMA_READ_MAX_128 0x14
  179. #define DMA_READ_MAX_256 0x18
  180. #define DMA_READ_MAX_1K 0x1c
  181. #define DMA_WRITE_MAX_4 0x20
  182. #define DMA_WRITE_MAX_16 0x40
  183. #define DMA_WRITE_MAX_32 0x60
  184. #define DMA_WRITE_MAX_64 0x80
  185. #define DMA_WRITE_MAX_128 0xa0
  186. #define DMA_WRITE_MAX_256 0xc0
  187. #define DMA_WRITE_MAX_1K 0xe0
  188. #define DMA_READ_WRITE_MASK 0xfc
  189. #define MEM_READ_MULTIPLE 0x00020000
  190. #define PCI_66MHZ 0x00080000
  191. #define PCI_32BIT 0x00100000
  192. #define DMA_WRITE_ALL_ALIGN 0x00800000
  193. #define READ_CMD_MEM 0x06000000
  194. #define WRITE_CMD_MEM 0x70000000
  195. /*
  196. * Mode status
  197. */
  198. #define ACE_BYTE_SWAP_BD 0x02
  199. #define ACE_WORD_SWAP_BD 0x04 /* not actually used */
  200. #define ACE_WARN 0x08
  201. #define ACE_BYTE_SWAP_DMA 0x10
  202. #define ACE_NO_JUMBO_FRAG 0x200
  203. #define ACE_FATAL 0x40000000
  204. /*
  205. * DMA config
  206. */
  207. #define DMA_THRESH_1W 0x10
  208. #define DMA_THRESH_2W 0x20
  209. #define DMA_THRESH_4W 0x40
  210. #define DMA_THRESH_8W 0x80
  211. #define DMA_THRESH_16W 0x100
  212. #define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */
  213. /*
  214. * Tuning parameters
  215. */
  216. #define TICKS_PER_SEC 1000000
  217. /*
  218. * Link bits
  219. */
  220. #define LNK_PREF 0x00008000
  221. #define LNK_10MB 0x00010000
  222. #define LNK_100MB 0x00020000
  223. #define LNK_1000MB 0x00040000
  224. #define LNK_FULL_DUPLEX 0x00080000
  225. #define LNK_HALF_DUPLEX 0x00100000
  226. #define LNK_TX_FLOW_CTL_Y 0x00200000
  227. #define LNK_NEG_ADVANCED 0x00400000
  228. #define LNK_RX_FLOW_CTL_Y 0x00800000
  229. #define LNK_NIC 0x01000000
  230. #define LNK_JAM 0x02000000
  231. #define LNK_JUMBO 0x04000000
  232. #define LNK_ALTEON 0x08000000
  233. #define LNK_NEG_FCTL 0x10000000
  234. #define LNK_NEGOTIATE 0x20000000
  235. #define LNK_ENABLE 0x40000000
  236. #define LNK_UP 0x80000000
  237. /*
  238. * Event definitions
  239. */
  240. #define EVT_RING_ENTRIES 256
  241. #define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
  242. struct event {
  243. #ifdef __LITTLE_ENDIAN_BITFIELD
  244. u32 idx:12;
  245. u32 code:12;
  246. u32 evt:8;
  247. #else
  248. u32 evt:8;
  249. u32 code:12;
  250. u32 idx:12;
  251. #endif
  252. u32 pad;
  253. };
  254. /*
  255. * Events
  256. */
  257. #define E_FW_RUNNING 0x01
  258. #define E_STATS_UPDATED 0x04
  259. #define E_STATS_UPDATE 0x04
  260. #define E_LNK_STATE 0x06
  261. #define E_C_LINK_UP 0x01
  262. #define E_C_LINK_DOWN 0x02
  263. #define E_C_LINK_10_100 0x03
  264. #define E_ERROR 0x07
  265. #define E_C_ERR_INVAL_CMD 0x01
  266. #define E_C_ERR_UNIMP_CMD 0x02
  267. #define E_C_ERR_BAD_CFG 0x03
  268. #define E_MCAST_LIST 0x08
  269. #define E_C_MCAST_ADDR_ADD 0x01
  270. #define E_C_MCAST_ADDR_DEL 0x02
  271. #define E_RESET_JUMBO_RNG 0x09
  272. /*
  273. * Commands
  274. */
  275. #define CMD_RING_ENTRIES 64
  276. struct cmd {
  277. #ifdef __LITTLE_ENDIAN_BITFIELD
  278. u32 idx:12;
  279. u32 code:12;
  280. u32 evt:8;
  281. #else
  282. u32 evt:8;
  283. u32 code:12;
  284. u32 idx:12;
  285. #endif
  286. };
  287. #define C_HOST_STATE 0x01
  288. #define C_C_STACK_UP 0x01
  289. #define C_C_STACK_DOWN 0x02
  290. #define C_FDR_FILTERING 0x02
  291. #define C_C_FDR_FILT_ENABLE 0x01
  292. #define C_C_FDR_FILT_DISABLE 0x02
  293. #define C_SET_RX_PRD_IDX 0x03
  294. #define C_UPDATE_STATS 0x04
  295. #define C_RESET_JUMBO_RNG 0x05
  296. #define C_ADD_MULTICAST_ADDR 0x08
  297. #define C_DEL_MULTICAST_ADDR 0x09
  298. #define C_SET_PROMISC_MODE 0x0a
  299. #define C_C_PROMISC_ENABLE 0x01
  300. #define C_C_PROMISC_DISABLE 0x02
  301. #define C_LNK_NEGOTIATION 0x0b
  302. #define C_C_NEGOTIATE_BOTH 0x00
  303. #define C_C_NEGOTIATE_GIG 0x01
  304. #define C_C_NEGOTIATE_10_100 0x02
  305. #define C_SET_MAC_ADDR 0x0c
  306. #define C_CLEAR_PROFILE 0x0d
  307. #define C_SET_MULTICAST_MODE 0x0e
  308. #define C_C_MCAST_ENABLE 0x01
  309. #define C_C_MCAST_DISABLE 0x02
  310. #define C_CLEAR_STATS 0x0f
  311. #define C_SET_RX_JUMBO_PRD_IDX 0x10
  312. #define C_REFRESH_STATS 0x11
  313. /*
  314. * Descriptor flags
  315. */
  316. #define BD_FLG_TCP_UDP_SUM 0x01
  317. #define BD_FLG_IP_SUM 0x02
  318. #define BD_FLG_END 0x04
  319. #define BD_FLG_MORE 0x08
  320. #define BD_FLG_JUMBO 0x10
  321. #define BD_FLG_UCAST 0x20
  322. #define BD_FLG_MCAST 0x40
  323. #define BD_FLG_BCAST 0x60
  324. #define BD_FLG_TYP_MASK 0x60
  325. #define BD_FLG_IP_FRAG 0x80
  326. #define BD_FLG_IP_FRAG_END 0x100
  327. #define BD_FLG_VLAN_TAG 0x200
  328. #define BD_FLG_FRAME_ERROR 0x400
  329. #define BD_FLG_COAL_NOW 0x800
  330. #define BD_FLG_MINI 0x1000
  331. /*
  332. * Ring Control block flags
  333. */
  334. #define RCB_FLG_TCP_UDP_SUM 0x01
  335. #define RCB_FLG_IP_SUM 0x02
  336. #define RCB_FLG_NO_PSEUDO_HDR 0x08
  337. #define RCB_FLG_VLAN_ASSIST 0x10
  338. #define RCB_FLG_COAL_INT_ONLY 0x20
  339. #define RCB_FLG_TX_HOST_RING 0x40
  340. #define RCB_FLG_IEEE_SNAP_SUM 0x80
  341. #define RCB_FLG_EXT_RX_BD 0x100
  342. #define RCB_FLG_RNG_DISABLE 0x200
  343. /*
  344. * TX ring - maximum TX ring entries for Tigon I's is 128
  345. */
  346. #define MAX_TX_RING_ENTRIES 256
  347. #define TIGON_I_TX_RING_ENTRIES 128
  348. #define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
  349. #define TX_RING_BASE 0x3800
  350. struct tx_desc{
  351. aceaddr addr;
  352. u32 flagsize;
  353. #if 0
  354. /*
  355. * This is in PCI shared mem and must be accessed with readl/writel
  356. * real layout is:
  357. */
  358. #if __LITTLE_ENDIAN
  359. u16 flags;
  360. u16 size;
  361. u16 vlan;
  362. u16 reserved;
  363. #else
  364. u16 size;
  365. u16 flags;
  366. u16 reserved;
  367. u16 vlan;
  368. #endif
  369. #endif
  370. u32 vlanres;
  371. };
  372. #define RX_STD_RING_ENTRIES 512
  373. #define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
  374. #define RX_JUMBO_RING_ENTRIES 256
  375. #define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
  376. #define RX_MINI_RING_ENTRIES 1024
  377. #define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
  378. #define RX_RETURN_RING_ENTRIES 2048
  379. #define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \
  380. sizeof(struct rx_desc))
  381. struct rx_desc{
  382. aceaddr addr;
  383. #ifdef __LITTLE_ENDIAN
  384. u16 size;
  385. u16 idx;
  386. #else
  387. u16 idx;
  388. u16 size;
  389. #endif
  390. #ifdef __LITTLE_ENDIAN
  391. u16 flags;
  392. u16 type;
  393. #else
  394. u16 type;
  395. u16 flags;
  396. #endif
  397. #ifdef __LITTLE_ENDIAN
  398. u16 tcp_udp_csum;
  399. u16 ip_csum;
  400. #else
  401. u16 ip_csum;
  402. u16 tcp_udp_csum;
  403. #endif
  404. #ifdef __LITTLE_ENDIAN
  405. u16 vlan;
  406. u16 err_flags;
  407. #else
  408. u16 err_flags;
  409. u16 vlan;
  410. #endif
  411. u32 reserved;
  412. u32 opague;
  413. };
  414. /*
  415. * This struct is shared with the NIC firmware.
  416. */
  417. struct ring_ctrl {
  418. aceaddr rngptr;
  419. #ifdef __LITTLE_ENDIAN
  420. u16 flags;
  421. u16 max_len;
  422. #else
  423. u16 max_len;
  424. u16 flags;
  425. #endif
  426. u32 pad;
  427. };
  428. struct ace_mac_stats {
  429. u32 excess_colls;
  430. u32 coll_1;
  431. u32 coll_2;
  432. u32 coll_3;
  433. u32 coll_4;
  434. u32 coll_5;
  435. u32 coll_6;
  436. u32 coll_7;
  437. u32 coll_8;
  438. u32 coll_9;
  439. u32 coll_10;
  440. u32 coll_11;
  441. u32 coll_12;
  442. u32 coll_13;
  443. u32 coll_14;
  444. u32 coll_15;
  445. u32 late_coll;
  446. u32 defers;
  447. u32 crc_err;
  448. u32 underrun;
  449. u32 crs_err;
  450. u32 pad[3];
  451. u32 drop_ula;
  452. u32 drop_mc;
  453. u32 drop_fc;
  454. u32 drop_space;
  455. u32 coll;
  456. u32 kept_bc;
  457. u32 kept_mc;
  458. u32 kept_uc;
  459. };
  460. struct ace_info {
  461. union {
  462. u32 stats[256];
  463. } s;
  464. struct ring_ctrl evt_ctrl;
  465. struct ring_ctrl cmd_ctrl;
  466. struct ring_ctrl tx_ctrl;
  467. struct ring_ctrl rx_std_ctrl;
  468. struct ring_ctrl rx_jumbo_ctrl;
  469. struct ring_ctrl rx_mini_ctrl;
  470. struct ring_ctrl rx_return_ctrl;
  471. aceaddr evt_prd_ptr;
  472. aceaddr rx_ret_prd_ptr;
  473. aceaddr tx_csm_ptr;
  474. aceaddr stats2_ptr;
  475. };
  476. struct ring_info {
  477. struct sk_buff *skb;
  478. DECLARE_PCI_UNMAP_ADDR(mapping)
  479. };
  480. /*
  481. * Funny... As soon as we add maplen on alpha, it starts to work
  482. * much slower. Hmm... is it because struct does not fit to one cacheline?
  483. * So, split tx_ring_info.
  484. */
  485. struct tx_ring_info {
  486. struct sk_buff *skb;
  487. DECLARE_PCI_UNMAP_ADDR(mapping)
  488. DECLARE_PCI_UNMAP_LEN(maplen)
  489. };
  490. /*
  491. * struct ace_skb holding the rings of skb's. This is an awful lot of
  492. * pointers, but I don't see any other smart mode to do this in an
  493. * efficient manner ;-(
  494. */
  495. struct ace_skb
  496. {
  497. struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES];
  498. struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES];
  499. struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES];
  500. struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
  501. };
  502. /*
  503. * Struct private for the AceNIC.
  504. *
  505. * Elements are grouped so variables used by the tx handling goes
  506. * together, and will go into the same cache lines etc. in order to
  507. * avoid cache line contention between the rx and tx handling on SMP.
  508. *
  509. * Frequently accessed variables are put at the beginning of the
  510. * struct to help the compiler generate better/shorter code.
  511. */
  512. struct ace_private
  513. {
  514. struct ace_info *info;
  515. struct ace_regs __iomem *regs; /* register base */
  516. struct ace_skb *skb;
  517. dma_addr_t info_dma; /* 32/64 bit */
  518. int version, link;
  519. int promisc, mcast_all;
  520. /*
  521. * TX elements
  522. */
  523. struct tx_desc *tx_ring;
  524. u32 tx_prd;
  525. volatile u32 tx_ret_csm;
  526. int tx_ring_entries;
  527. /*
  528. * RX elements
  529. */
  530. unsigned long std_refill_busy
  531. __attribute__ ((aligned (SMP_CACHE_BYTES)));
  532. unsigned long mini_refill_busy, jumbo_refill_busy;
  533. atomic_t cur_rx_bufs;
  534. atomic_t cur_mini_bufs;
  535. atomic_t cur_jumbo_bufs;
  536. u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
  537. u32 cur_rx;
  538. struct rx_desc *rx_std_ring;
  539. struct rx_desc *rx_jumbo_ring;
  540. struct rx_desc *rx_mini_ring;
  541. struct rx_desc *rx_return_ring;
  542. #if ACENIC_DO_VLAN
  543. struct vlan_group *vlgrp;
  544. #endif
  545. int tasklet_pending, jumbo;
  546. struct tasklet_struct ace_tasklet;
  547. struct event *evt_ring;
  548. volatile u32 *evt_prd, *rx_ret_prd, *tx_csm;
  549. dma_addr_t tx_ring_dma; /* 32/64 bit */
  550. dma_addr_t rx_ring_base_dma;
  551. dma_addr_t evt_ring_dma;
  552. dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
  553. unsigned char *trace_buf;
  554. struct pci_dev *pdev;
  555. struct net_device *next;
  556. volatile int fw_running;
  557. int board_idx;
  558. u16 pci_command;
  559. u8 pci_latency;
  560. const char *name;
  561. #ifdef INDEX_DEBUG
  562. spinlock_t debug_lock
  563. __attribute__ ((aligned (SMP_CACHE_BYTES)));
  564. u32 last_tx, last_std_rx, last_mini_rx;
  565. #endif
  566. struct net_device_stats stats;
  567. int pci_using_dac;
  568. };
  569. #define TX_RESERVED MAX_SKB_FRAGS
  570. static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
  571. {
  572. return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
  573. }
  574. #define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
  575. #define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED)
  576. static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
  577. {
  578. u64 baddr = (u64) addr;
  579. aa->addrlo = baddr & 0xffffffff;
  580. aa->addrhi = baddr >> 32;
  581. wmb();
  582. }
  583. static inline void ace_set_txprd(struct ace_regs __iomem *regs,
  584. struct ace_private *ap, u32 value)
  585. {
  586. #ifdef INDEX_DEBUG
  587. unsigned long flags;
  588. spin_lock_irqsave(&ap->debug_lock, flags);
  589. writel(value, &regs->TxPrd);
  590. if (value == ap->last_tx)
  591. printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
  592. "to tx producer (%i)\n", value);
  593. ap->last_tx = value;
  594. spin_unlock_irqrestore(&ap->debug_lock, flags);
  595. #else
  596. writel(value, &regs->TxPrd);
  597. #endif
  598. wmb();
  599. }
  600. static inline void ace_mask_irq(struct net_device *dev)
  601. {
  602. struct ace_private *ap = netdev_priv(dev);
  603. struct ace_regs __iomem *regs = ap->regs;
  604. if (ACE_IS_TIGON_I(ap))
  605. writel(1, &regs->MaskInt);
  606. else
  607. writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
  608. ace_sync_irq(dev->irq);
  609. }
  610. static inline void ace_unmask_irq(struct net_device *dev)
  611. {
  612. struct ace_private *ap = netdev_priv(dev);
  613. struct ace_regs __iomem *regs = ap->regs;
  614. if (ACE_IS_TIGON_I(ap))
  615. writel(0, &regs->MaskInt);
  616. else
  617. writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
  618. }
  619. /*
  620. * Prototypes
  621. */
  622. static int ace_init(struct net_device *dev);
  623. static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs);
  624. static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs);
  625. static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs);
  626. static irqreturn_t ace_interrupt(int irq, void *dev_id);
  627. static int ace_load_firmware(struct net_device *dev);
  628. static int ace_open(struct net_device *dev);
  629. static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev);
  630. static int ace_close(struct net_device *dev);
  631. static void ace_tasklet(unsigned long dev);
  632. static void ace_dump_trace(struct ace_private *ap);
  633. static void ace_set_multicast_list(struct net_device *dev);
  634. static int ace_change_mtu(struct net_device *dev, int new_mtu);
  635. static int ace_set_mac_addr(struct net_device *dev, void *p);
  636. static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
  637. static int ace_allocate_descriptors(struct net_device *dev);
  638. static void ace_free_descriptors(struct net_device *dev);
  639. static void ace_init_cleanup(struct net_device *dev);
  640. static struct net_device_stats *ace_get_stats(struct net_device *dev);
  641. static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
  642. #if ACENIC_DO_VLAN
  643. static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp);
  644. static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
  645. #endif
  646. #endif /* _ACENIC_H_ */