blob: be669eb23788014802c14f828cf6b3bfb3e61624 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __MV643XX_ETH_H__
2#define __MV643XX_ETH_H__
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/module.h>
5#include <linux/kernel.h>
6#include <linux/spinlock.h>
7#include <linux/workqueue.h>
James Chapmanc28a4f82006-01-27 01:13:15 -07008#include <linux/mii.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10#include <linux/mv643xx.h>
11
Dave Jones471a5672007-02-28 15:41:39 -050012#include <asm/dma-mapping.h>
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/* Checksum offload for Tx works for most packets, but
15 * fails if previous packet sent did not use hw csum
16 */
Dale Farnsworth26006362005-08-22 15:53:29 -070017#define MV643XX_CHECKSUM_OFFLOAD_TX
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define MV643XX_NAPI
19#define MV643XX_TX_FAST_REFILL
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#undef MV643XX_COAL
21
22/*
23 * Number of RX / TX descriptors on RX / TX rings.
24 * Note that allocating RX descriptors is done by allocating the RX
25 * ring AND a preallocated RX buffers (skb's) for each descriptor.
26 * The TX descriptors only allocates the TX descriptors ring,
27 * with no pre allocated TX buffers (skb's are allocated by higher layers.
28 */
29
30/* Default TX ring size is 1000 descriptors */
31#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
32
33/* Default RX ring size is 400 descriptors */
34#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
35
36#define MV643XX_TX_COAL 100
37#ifdef MV643XX_COAL
38#define MV643XX_RX_COAL 100
39#endif
40
Dale Farnsworth7303fde2006-03-03 10:03:36 -070041#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
42#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
43#else
44#define MAX_DESCS_PER_SKB 1
45#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Dale Farnsworth7303fde2006-03-03 10:03:36 -070047#define ETH_VLAN_HLEN 4
48#define ETH_FCS_LEN 4
Dale Farnsworth6f059c32006-03-21 11:44:35 -070049#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
Dale Farnsworth7303fde2006-03-03 10:03:36 -070050#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
Dale Farnsworth6f059c32006-03-21 11:44:35 -070051 ETH_VLAN_HLEN + ETH_FCS_LEN)
Dave Jones471a5672007-02-28 15:41:39 -050052#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment())
Dale Farnsworth7303fde2006-03-03 10:03:36 -070053
54#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
55#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
56
57#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
58#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
59#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
60#define ETH_INT_CAUSE_EXT 0x00000002
61#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
62
63#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
64#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
65#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
66#define ETH_INT_CAUSE_PHY 0x00010000
Dale Farnsworth2bcff602007-09-28 06:30:43 -070067#define ETH_INT_CAUSE_STATE 0x00100000
68#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
69 ETH_INT_CAUSE_STATE)
Dale Farnsworth7303fde2006-03-03 10:03:36 -070070
71#define ETH_INT_MASK_ALL 0x00000000
72#define ETH_INT_MASK_ALL_EXT 0x00000000
73
74#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
75#define PHY_WAIT_MICRO_SECONDS 10
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* Buffer offset from buffer pointer */
78#define RX_BUF_OFFSET 0x2
79
80/* Gigabit Ethernet Unit Global Registers */
81
82/* MIB Counters register definitions */
83#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
84#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
85#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
86#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
87#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
88#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
89#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
90#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
91#define ETH_MIB_FRAMES_64_OCTETS 0x20
92#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
93#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
94#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
95#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
96#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
97#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
98#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
99#define ETH_MIB_GOOD_FRAMES_SENT 0x40
100#define ETH_MIB_EXCESSIVE_COLLISION 0x44
101#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
102#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
103#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
104#define ETH_MIB_FC_SENT 0x54
105#define ETH_MIB_GOOD_FC_RECEIVED 0x58
106#define ETH_MIB_BAD_FC_RECEIVED 0x5c
107#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
108#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
109#define ETH_MIB_OVERSIZE_RECEIVED 0x68
110#define ETH_MIB_JABBER_RECEIVED 0x6c
111#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
112#define ETH_MIB_BAD_CRC_EVENT 0x74
113#define ETH_MIB_COLLISION 0x78
114#define ETH_MIB_LATE_COLLISION 0x7c
115
116/* Port serial status reg (PSR) */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700117#define ETH_INTERFACE_PCM 0x00000001
118#define ETH_LINK_IS_UP 0x00000002
119#define ETH_PORT_AT_FULL_DUPLEX 0x00000004
120#define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
121#define ETH_GMII_SPEED_1000 0x00000010
122#define ETH_MII_SPEED_100 0x00000020
123#define ETH_TX_IN_PROGRESS 0x00000080
124#define ETH_BYPASS_ACTIVE 0x00000100
125#define ETH_PORT_AT_PARTITION_STATE 0x00000200
126#define ETH_PORT_TX_FIFO_EMPTY 0x00000400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128/* SMI reg */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700129#define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
130#define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
131#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
132#define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
133
134/* Interrupt Cause Register Bit Definitions */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136/* SDMA command status fields macros */
137
138/* Tx & Rx descriptors status */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700139#define ETH_ERROR_SUMMARY 0x00000001
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141/* Tx & Rx descriptors command */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700142#define ETH_BUFFER_OWNED_BY_DMA 0x80000000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144/* Tx descriptors status */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700145#define ETH_LC_ERROR 0
146#define ETH_UR_ERROR 0x00000002
147#define ETH_RL_ERROR 0x00000004
148#define ETH_LLC_SNAP_FORMAT 0x00000200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150/* Rx descriptors status */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700151#define ETH_OVERRUN_ERROR 0x00000002
152#define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
153#define ETH_RESOURCE_ERROR 0x00000006
154#define ETH_VLAN_TAGGED 0x00080000
155#define ETH_BPDU_FRAME 0x00100000
156#define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
157#define ETH_OTHER_FRAME_TYPE 0x00400000
158#define ETH_LAYER_2_IS_ETH_V_2 0x00800000
159#define ETH_FRAME_TYPE_IP_V_4 0x01000000
160#define ETH_FRAME_HEADER_OK 0x02000000
161#define ETH_RX_LAST_DESC 0x04000000
162#define ETH_RX_FIRST_DESC 0x08000000
163#define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
164#define ETH_RX_ENABLE_INTERRUPT 0x20000000
165#define ETH_LAYER_4_CHECKSUM_OK 0x40000000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167/* Rx descriptors byte count */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700168#define ETH_FRAME_FRAGMENTED 0x00000004
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170/* Tx descriptors command */
Dale Farnsworthebe19a42006-03-03 10:06:20 -0700171#define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
172#define ETH_FRAME_SET_TO_VLAN 0x00008000
173#define ETH_UDP_FRAME 0x00010000
174#define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
175#define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
176#define ETH_ZERO_PADDING 0x00080000
177#define ETH_TX_LAST_DESC 0x00100000
178#define ETH_TX_FIRST_DESC 0x00200000
179#define ETH_GEN_CRC 0x00400000
180#define ETH_TX_ENABLE_INTERRUPT 0x00800000
181#define ETH_AUTO_MODE 0x40000000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
Dale Farnsworth26006362005-08-22 15:53:29 -0700183#define ETH_TX_IHL_SHIFT 11
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185/* typedefs */
186
187typedef enum _eth_func_ret_status {
188 ETH_OK, /* Returned as expected. */
189 ETH_ERROR, /* Fundamental error. */
190 ETH_RETRY, /* Could not process request. Try later.*/
191 ETH_END_OF_JOB, /* Ring has nothing to process. */
192 ETH_QUEUE_FULL, /* Ring resource error. */
193 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
194} ETH_FUNC_RET_STATUS;
195
196typedef enum _eth_target {
197 ETH_TARGET_DRAM,
198 ETH_TARGET_DEVICE,
199 ETH_TARGET_CBS,
200 ETH_TARGET_PCI0,
201 ETH_TARGET_PCI1
202} ETH_TARGET;
203
204/* These are for big-endian machines. Little endian needs different
205 * definitions.
206 */
207#if defined(__BIG_ENDIAN)
208struct eth_rx_desc {
209 u16 byte_cnt; /* Descriptor buffer byte count */
210 u16 buf_size; /* Buffer size */
211 u32 cmd_sts; /* Descriptor command status */
212 u32 next_desc_ptr; /* Next descriptor pointer */
213 u32 buf_ptr; /* Descriptor buffer pointer */
214};
215
216struct eth_tx_desc {
217 u16 byte_cnt; /* buffer byte count */
218 u16 l4i_chk; /* CPU provided TCP checksum */
219 u32 cmd_sts; /* Command/status field */
220 u32 next_desc_ptr; /* Pointer to next descriptor */
221 u32 buf_ptr; /* pointer to buffer for this descriptor*/
222};
223
224#elif defined(__LITTLE_ENDIAN)
225struct eth_rx_desc {
226 u32 cmd_sts; /* Descriptor command status */
227 u16 buf_size; /* Buffer size */
228 u16 byte_cnt; /* Descriptor buffer byte count */
229 u32 buf_ptr; /* Descriptor buffer pointer */
230 u32 next_desc_ptr; /* Next descriptor pointer */
231};
232
233struct eth_tx_desc {
234 u32 cmd_sts; /* Command/status field */
235 u16 l4i_chk; /* CPU provided TCP checksum */
236 u16 byte_cnt; /* buffer byte count */
237 u32 buf_ptr; /* pointer to buffer for this descriptor*/
238 u32 next_desc_ptr; /* Pointer to next descriptor */
239};
240#else
241#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
242#endif
243
244/* Unified struct for Rx and Tx operations. The user is not required to */
245/* be familier with neither Tx nor Rx descriptors. */
246struct pkt_info {
247 unsigned short byte_cnt; /* Descriptor buffer byte count */
248 unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
249 unsigned int cmd_sts; /* Descriptor command status */
250 dma_addr_t buf_ptr; /* Descriptor buffer pointer */
251 struct sk_buff *return_info; /* User resource return information */
252};
253
Adrian Bunk47bdd712006-06-30 18:25:18 +0200254/* Ethernet port specific information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256struct mv643xx_mib_counters {
257 u64 good_octets_received;
258 u32 bad_octets_received;
259 u32 internal_mac_transmit_err;
260 u32 good_frames_received;
261 u32 bad_frames_received;
262 u32 broadcast_frames_received;
263 u32 multicast_frames_received;
264 u32 frames_64_octets;
265 u32 frames_65_to_127_octets;
266 u32 frames_128_to_255_octets;
267 u32 frames_256_to_511_octets;
268 u32 frames_512_to_1023_octets;
269 u32 frames_1024_to_max_octets;
270 u64 good_octets_sent;
271 u32 good_frames_sent;
272 u32 excessive_collision;
273 u32 multicast_frames_sent;
274 u32 broadcast_frames_sent;
275 u32 unrec_mac_control_received;
276 u32 fc_sent;
277 u32 good_fc_received;
278 u32 bad_fc_received;
279 u32 undersize_received;
280 u32 fragments_received;
281 u32 oversize_received;
282 u32 jabber_received;
283 u32 mac_receive_error;
284 u32 bad_crc_event;
285 u32 collision;
286 u32 late_collision;
287};
288
289struct mv643xx_private {
290 int port_num; /* User Ethernet port number */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292 u32 rx_sram_addr; /* Base address of rx sram area */
293 u32 rx_sram_size; /* Size of rx sram area */
294 u32 tx_sram_addr; /* Base address of tx sram area */
295 u32 tx_sram_size; /* Size of tx sram area */
296
297 int rx_resource_err; /* Rx ring resource error flag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
299 /* Tx/Rx rings managment indexes fields. For driver use */
300
301 /* Next available and first returning Rx resource */
302 int rx_curr_desc_q, rx_used_desc_q;
303
304 /* Next available and first returning Tx resource */
305 int tx_curr_desc_q, tx_used_desc_q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307#ifdef MV643XX_TX_FAST_REFILL
308 u32 tx_clean_threshold;
309#endif
310
311 struct eth_rx_desc *p_rx_desc_area;
312 dma_addr_t rx_desc_dma;
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700313 int rx_desc_area_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 struct sk_buff **rx_skb;
315
316 struct eth_tx_desc *p_tx_desc_area;
317 dma_addr_t tx_desc_dma;
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700318 int tx_desc_area_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 struct sk_buff **tx_skb;
320
321 struct work_struct tx_timeout_task;
322
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700323 struct net_device *dev;
324 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 struct net_device_stats stats;
326 struct mv643xx_mib_counters mib_counters;
327 spinlock_t lock;
328 /* Size of Tx Ring per queue */
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700329 int tx_ring_size;
Dale Farnsworthf98e36f12006-01-27 01:09:18 -0700330 /* Number of tx descriptors in use */
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700331 int tx_desc_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 /* Size of Rx Ring per queue */
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700333 int rx_ring_size;
Dale Farnsworthf98e36f12006-01-27 01:09:18 -0700334 /* Number of rx descriptors in use */
Dale Farnsworthc8aaea22006-03-03 10:02:05 -0700335 int rx_desc_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 * Used in case RX Ring is empty, which can be caused when
339 * system does not have resources (skb's)
340 */
341 struct timer_list timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
343 u32 rx_int_coal;
344 u32 tx_int_coal;
James Chapmanc28a4f82006-01-27 01:13:15 -0700345 struct mii_if_info mii;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346};
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348/* Port operation control routines */
349static void eth_port_init(struct mv643xx_private *mp);
350static void eth_port_reset(unsigned int eth_port_num);
Dale Farnsworthed9b5d42006-01-27 01:06:38 -0700351static void eth_port_start(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353/* PHY and MIB routines */
354static void ethernet_phy_reset(unsigned int eth_port_num);
355
356static void eth_port_write_smi_reg(unsigned int eth_port_num,
357 unsigned int phy_reg, unsigned int value);
358
359static void eth_port_read_smi_reg(unsigned int eth_port_num,
360 unsigned int phy_reg, unsigned int *value);
361
362static void eth_clear_mib_counters(unsigned int eth_port_num);
363
364/* Port data flow control routines */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
366 struct pkt_info *p_pkt_info);
367static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
368 struct pkt_info *p_pkt_info);
369
370#endif /* __MV643XX_ETH_H__ */