blob: 33c5fafdbbd3f2cfae431b9fa03d15f172df0a9b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __MV643XX_ETH_H__
2#define __MV643XX_ETH_H__
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/module.h>
5#include <linux/kernel.h>
6#include <linux/spinlock.h>
7#include <linux/workqueue.h>
James Chapmanc28a4f82006-01-27 01:13:15 -07008#include <linux/mii.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10#include <linux/mv643xx.h>
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012/* Checksum offload for Tx works for most packets, but
13 * fails if previous packet sent did not use hw csum
14 */
Dale Farnsworth26006362005-08-22 15:53:29 -070015#define MV643XX_CHECKSUM_OFFLOAD_TX
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#define MV643XX_NAPI
17#define MV643XX_TX_FAST_REFILL
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#undef MV643XX_COAL
19
20/*
21 * Number of RX / TX descriptors on RX / TX rings.
22 * Note that allocating RX descriptors is done by allocating the RX
23 * ring AND a preallocated RX buffers (skb's) for each descriptor.
24 * The TX descriptors only allocates the TX descriptors ring,
25 * with no pre allocated TX buffers (skb's are allocated by higher layers.
26 */
27
28/* Default TX ring size is 1000 descriptors */
29#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
30
31/* Default RX ring size is 400 descriptors */
32#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
33
34#define MV643XX_TX_COAL 100
35#ifdef MV643XX_COAL
36#define MV643XX_RX_COAL 100
37#endif
38
Dale Farnsworth7303fde2006-03-03 10:03:36 -070039#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
40#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
41#else
42#define MAX_DESCS_PER_SKB 1
43#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Dale Farnsworth6f059c32006-03-21 11:44:35 -070045/*
46 * The MV643XX HW requires 8-byte alignment. However, when I/O
47 * is non-cache-coherent, we need to ensure that the I/O buffers
48 * we use don't share cache lines with other data.
49 */
50#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
51#define ETH_DMA_ALIGN L1_CACHE_BYTES
52#else
53#define ETH_DMA_ALIGN 8
54#endif
55
Dale Farnsworth7303fde2006-03-03 10:03:36 -070056#define ETH_VLAN_HLEN 4
57#define ETH_FCS_LEN 4
Dale Farnsworth6f059c32006-03-21 11:44:35 -070058#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
Dale Farnsworth7303fde2006-03-03 10:03:36 -070059#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
Dale Farnsworth6f059c32006-03-21 11:44:35 -070060 ETH_VLAN_HLEN + ETH_FCS_LEN)
61#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN)
Dale Farnsworth7303fde2006-03-03 10:03:36 -070062
63#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
64#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
65
66#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
67#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
68#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
69#define ETH_INT_CAUSE_EXT 0x00000002
70#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
71
72#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
73#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
74#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
75#define ETH_INT_CAUSE_PHY 0x00010000
76#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY)
77
78#define ETH_INT_MASK_ALL 0x00000000
79#define ETH_INT_MASK_ALL_EXT 0x00000000
80
81#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
82#define PHY_WAIT_MICRO_SECONDS 10
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/* Buffer offset from buffer pointer */
85#define RX_BUF_OFFSET 0x2
86
87/* Gigabit Ethernet Unit Global Registers */
88
89/* MIB Counters register definitions */
90#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
91#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
92#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
93#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
94#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
95#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
96#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
97#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
98#define ETH_MIB_FRAMES_64_OCTETS 0x20
99#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
100#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
101#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
102#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
103#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
104#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
105#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
106#define ETH_MIB_GOOD_FRAMES_SENT 0x40
107#define ETH_MIB_EXCESSIVE_COLLISION 0x44
108#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
109#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
110#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
111#define ETH_MIB_FC_SENT 0x54
112#define ETH_MIB_GOOD_FC_RECEIVED 0x58
113#define ETH_MIB_BAD_FC_RECEIVED 0x5c
114#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
115#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
116#define ETH_MIB_OVERSIZE_RECEIVED 0x68
117#define ETH_MIB_JABBER_RECEIVED 0x6c
118#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
119#define ETH_MIB_BAD_CRC_EVENT 0x74
120#define ETH_MIB_COLLISION 0x78
121#define ETH_MIB_LATE_COLLISION 0x7c
122
123/* Port serial status reg (PSR) */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700124#define ETH_INTERFACE_PCM 0x00000001
125#define ETH_LINK_IS_UP 0x00000002
126#define ETH_PORT_AT_FULL_DUPLEX 0x00000004
127#define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
128#define ETH_GMII_SPEED_1000 0x00000010
129#define ETH_MII_SPEED_100 0x00000020
130#define ETH_TX_IN_PROGRESS 0x00000080
131#define ETH_BYPASS_ACTIVE 0x00000100
132#define ETH_PORT_AT_PARTITION_STATE 0x00000200
133#define ETH_PORT_TX_FIFO_EMPTY 0x00000400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/* SMI reg */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700136#define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
137#define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
138#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
139#define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
140
141/* Interrupt Cause Register Bit Definitions */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143/* SDMA command status fields macros */
144
145/* Tx & Rx descriptors status */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700146#define ETH_ERROR_SUMMARY 0x00000001
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148/* Tx & Rx descriptors command */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700149#define ETH_BUFFER_OWNED_BY_DMA 0x80000000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
151/* Tx descriptors status */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700152#define ETH_LC_ERROR 0
153#define ETH_UR_ERROR 0x00000002
154#define ETH_RL_ERROR 0x00000004
155#define ETH_LLC_SNAP_FORMAT 0x00000200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157/* Rx descriptors status */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700158#define ETH_OVERRUN_ERROR 0x00000002
159#define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
160#define ETH_RESOURCE_ERROR 0x00000006
161#define ETH_VLAN_TAGGED 0x00080000
162#define ETH_BPDU_FRAME 0x00100000
163#define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
164#define ETH_OTHER_FRAME_TYPE 0x00400000
165#define ETH_LAYER_2_IS_ETH_V_2 0x00800000
166#define ETH_FRAME_TYPE_IP_V_4 0x01000000
167#define ETH_FRAME_HEADER_OK 0x02000000
168#define ETH_RX_LAST_DESC 0x04000000
169#define ETH_RX_FIRST_DESC 0x08000000
170#define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
171#define ETH_RX_ENABLE_INTERRUPT 0x20000000
172#define ETH_LAYER_4_CHECKSUM_OK 0x40000000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
174/* Rx descriptors byte count */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700175#define ETH_FRAME_FRAGMENTED 0x00000004
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
177/* Tx descriptors command */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700178#define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
179#define ETH_FRAME_SET_TO_VLAN 0x00008000
180#define ETH_UDP_FRAME 0x00010000
181#define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
182#define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
183#define ETH_ZERO_PADDING 0x00080000
184#define ETH_TX_LAST_DESC 0x00100000
185#define ETH_TX_FIRST_DESC 0x00200000
186#define ETH_GEN_CRC 0x00400000
187#define ETH_TX_ENABLE_INTERRUPT 0x00800000
188#define ETH_AUTO_MODE 0x40000000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Dale Farnsworth26006362005-08-22 15:53:29 -0700190#define ETH_TX_IHL_SHIFT 11
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192/* typedefs */
193
194typedef enum _eth_func_ret_status {
195 ETH_OK, /* Returned as expected. */
196 ETH_ERROR, /* Fundamental error. */
197 ETH_RETRY, /* Could not process request. Try later.*/
198 ETH_END_OF_JOB, /* Ring has nothing to process. */
199 ETH_QUEUE_FULL, /* Ring resource error. */
200 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
201} ETH_FUNC_RET_STATUS;
202
203typedef enum _eth_target {
204 ETH_TARGET_DRAM,
205 ETH_TARGET_DEVICE,
206 ETH_TARGET_CBS,
207 ETH_TARGET_PCI0,
208 ETH_TARGET_PCI1
209} ETH_TARGET;
210
211/* These are for big-endian machines. Little endian needs different
212 * definitions.
213 */
214#if defined(__BIG_ENDIAN)
215struct eth_rx_desc {
216 u16 byte_cnt; /* Descriptor buffer byte count */
217 u16 buf_size; /* Buffer size */
218 u32 cmd_sts; /* Descriptor command status */
219 u32 next_desc_ptr; /* Next descriptor pointer */
220 u32 buf_ptr; /* Descriptor buffer pointer */
221};
222
223struct eth_tx_desc {
224 u16 byte_cnt; /* buffer byte count */
225 u16 l4i_chk; /* CPU provided TCP checksum */
226 u32 cmd_sts; /* Command/status field */
227 u32 next_desc_ptr; /* Pointer to next descriptor */
228 u32 buf_ptr; /* pointer to buffer for this descriptor*/
229};
230
231#elif defined(__LITTLE_ENDIAN)
232struct eth_rx_desc {
233 u32 cmd_sts; /* Descriptor command status */
234 u16 buf_size; /* Buffer size */
235 u16 byte_cnt; /* Descriptor buffer byte count */
236 u32 buf_ptr; /* Descriptor buffer pointer */
237 u32 next_desc_ptr; /* Next descriptor pointer */
238};
239
240struct eth_tx_desc {
241 u32 cmd_sts; /* Command/status field */
242 u16 l4i_chk; /* CPU provided TCP checksum */
243 u16 byte_cnt; /* buffer byte count */
244 u32 buf_ptr; /* pointer to buffer for this descriptor*/
245 u32 next_desc_ptr; /* Pointer to next descriptor */
246};
247#else
248#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
249#endif
250
251/* Unified struct for Rx and Tx operations. The user is not required to */
252/* be familier with neither Tx nor Rx descriptors. */
253struct pkt_info {
254 unsigned short byte_cnt; /* Descriptor buffer byte count */
255 unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
256 unsigned int cmd_sts; /* Descriptor command status */
257 dma_addr_t buf_ptr; /* Descriptor buffer pointer */
258 struct sk_buff *return_info; /* User resource return information */
259};
260
Adrian Bunk47bdd712006-06-30 18:25:18 +0200261/* Ethernet port specific information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263struct mv643xx_mib_counters {
264 u64 good_octets_received;
265 u32 bad_octets_received;
266 u32 internal_mac_transmit_err;
267 u32 good_frames_received;
268 u32 bad_frames_received;
269 u32 broadcast_frames_received;
270 u32 multicast_frames_received;
271 u32 frames_64_octets;
272 u32 frames_65_to_127_octets;
273 u32 frames_128_to_255_octets;
274 u32 frames_256_to_511_octets;
275 u32 frames_512_to_1023_octets;
276 u32 frames_1024_to_max_octets;
277 u64 good_octets_sent;
278 u32 good_frames_sent;
279 u32 excessive_collision;
280 u32 multicast_frames_sent;
281 u32 broadcast_frames_sent;
282 u32 unrec_mac_control_received;
283 u32 fc_sent;
284 u32 good_fc_received;
285 u32 bad_fc_received;
286 u32 undersize_received;
287 u32 fragments_received;
288 u32 oversize_received;
289 u32 jabber_received;
290 u32 mac_receive_error;
291 u32 bad_crc_event;
292 u32 collision;
293 u32 late_collision;
294};
295
296struct mv643xx_private {
297 int port_num; /* User Ethernet port number */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
299 u32 rx_sram_addr; /* Base address of rx sram area */
300 u32 rx_sram_size; /* Size of rx sram area */
301 u32 tx_sram_addr; /* Base address of tx sram area */
302 u32 tx_sram_size; /* Size of tx sram area */
303
304 int rx_resource_err; /* Rx ring resource error flag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 /* Tx/Rx rings managment indexes fields. For driver use */
307
308 /* Next available and first returning Rx resource */
309 int rx_curr_desc_q, rx_used_desc_q;
310
311 /* Next available and first returning Tx resource */
312 int tx_curr_desc_q, tx_used_desc_q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314#ifdef MV643XX_TX_FAST_REFILL
315 u32 tx_clean_threshold;
316#endif
317
318 struct eth_rx_desc *p_rx_desc_area;
319 dma_addr_t rx_desc_dma;
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700320 int rx_desc_area_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 struct sk_buff **rx_skb;
322
323 struct eth_tx_desc *p_tx_desc_area;
324 dma_addr_t tx_desc_dma;
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700325 int tx_desc_area_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 struct sk_buff **tx_skb;
327
328 struct work_struct tx_timeout_task;
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 struct net_device_stats stats;
331 struct mv643xx_mib_counters mib_counters;
332 spinlock_t lock;
333 /* Size of Tx Ring per queue */
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700334 int tx_ring_size;
Dale Farnsworthf98e36f12006-01-27 01:09:18 -0700335 /* Number of tx descriptors in use */
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700336 int tx_desc_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 /* Size of Rx Ring per queue */
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700338 int rx_ring_size;
Dale Farnsworthf98e36f12006-01-27 01:09:18 -0700339 /* Number of rx descriptors in use */
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700340 int rx_desc_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Used in case RX Ring is empty, which can be caused when
344 * system does not have resources (skb's)
345 */
346 struct timer_list timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 u32 rx_int_coal;
349 u32 tx_int_coal;
James Chapmanc28a4f82006-01-27 01:13:15 -0700350 struct mii_if_info mii;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351};
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353/* Port operation control routines */
354static void eth_port_init(struct mv643xx_private *mp);
355static void eth_port_reset(unsigned int eth_port_num);
Dale Farnsworthed9b5d42006-01-27 01:06:38 -0700356static void eth_port_start(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358/* Port MAC address routines */
359static void eth_port_uc_addr_set(unsigned int eth_port_num,
360 unsigned char *p_addr);
361
362/* PHY and MIB routines */
363static void ethernet_phy_reset(unsigned int eth_port_num);
364
365static void eth_port_write_smi_reg(unsigned int eth_port_num,
366 unsigned int phy_reg, unsigned int value);
367
368static void eth_port_read_smi_reg(unsigned int eth_port_num,
369 unsigned int phy_reg, unsigned int *value);
370
371static void eth_clear_mib_counters(unsigned int eth_port_num);
372
373/* Port data flow control routines */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
375 struct pkt_info *p_pkt_info);
376static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
377 struct pkt_info *p_pkt_info);
378
379#endif /* __MV643XX_ETH_H__ */