blob: a804965e654259a3f136e02d0a4aa961b286eef2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2/*
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
18
19 See the file COPYING in this distribution for more information.
20
21 Contributors:
Jeff Garzikf3b197a2006-05-26 21:39:03 -040022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
Jeff Garzikf3b197a2006-05-26 21:39:03 -040026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 TODO:
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
30
31 Low priority TODO:
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
39 Tx descriptor bit
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
43
44 NOTES:
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
47
48 */
49
50#define DRV_NAME "8139cp"
Andy Gospodarekd5b20692006-09-11 17:39:18 -040051#define DRV_VERSION "1.3"
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#define DRV_RELDATE "Mar 22, 2004"
53
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/module.h>
Stephen Hemmingere21ba282005-05-12 19:33:26 -040056#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/kernel.h>
58#include <linux/compiler.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/init.h>
62#include <linux/pci.h>
Tobias Klauser8662d062005-05-12 22:19:39 -040063#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/delay.h>
65#include <linux/ethtool.h>
66#include <linux/mii.h>
67#include <linux/if_vlan.h>
68#include <linux/crc32.h>
69#include <linux/in.h>
70#include <linux/ip.h>
71#include <linux/tcp.h>
72#include <linux/udp.h>
73#include <linux/cache.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/uaccess.h>
77
78/* VLAN tagging feature enable/disable */
79#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80#define CP_VLAN_TAG_USED 1
81#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
83#else
84#define CP_VLAN_TAG_USED 0
85#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86 do { (tx_desc)->opts2 = 0; } while (0)
87#endif
88
89/* These identify the driver base version and may not be removed. */
90static char version[] =
91KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
92
93MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
a78d8922005-05-12 19:35:42 -040095MODULE_VERSION(DRV_VERSION);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096MODULE_LICENSE("GPL");
97
98static int debug = -1;
Stephen Hemmingere21ba282005-05-12 19:33:26 -040099module_param(debug, int, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
101
102/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
103 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
104static int multicast_filter_limit = 32;
Stephen Hemmingere21ba282005-05-12 19:33:26 -0400105module_param(multicast_filter_limit, int, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
107
108#define PFX DRV_NAME ": "
109
110#ifndef TRUE
111#define FALSE 0
112#define TRUE (!FALSE)
113#endif
114
115#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
116 NETIF_MSG_PROBE | \
117 NETIF_MSG_LINK)
118#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
119#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
120#define CP_REGS_SIZE (0xff + 1)
121#define CP_REGS_VER 1 /* version 1 */
122#define CP_RX_RING_SIZE 64
123#define CP_TX_RING_SIZE 64
124#define CP_RING_BYTES \
125 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
126 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
127 CP_STATS_SIZE)
128#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
129#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
130#define TX_BUFFS_AVAIL(CP) \
131 (((CP)->tx_tail <= (CP)->tx_head) ? \
132 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
133 (CP)->tx_tail - (CP)->tx_head - 1)
134
135#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
136#define RX_OFFSET 2
137#define CP_INTERNAL_PHY 32
138
139/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
140#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
141#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
142#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
143#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
144
145/* Time in jiffies before concluding the transmitter is hung. */
146#define TX_TIMEOUT (6*HZ)
147
148/* hardware minimum and maximum for a single frame's data payload */
149#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
150#define CP_MAX_MTU 4096
151
152enum {
153 /* NIC register offsets */
154 MAC0 = 0x00, /* Ethernet hardware address. */
155 MAR0 = 0x08, /* Multicast filter. */
156 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
157 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
158 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
159 Cmd = 0x37, /* Command register */
160 IntrMask = 0x3C, /* Interrupt mask */
161 IntrStatus = 0x3E, /* Interrupt status */
162 TxConfig = 0x40, /* Tx configuration */
163 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
164 RxConfig = 0x44, /* Rx configuration */
165 RxMissed = 0x4C, /* 24 bits valid, write clears */
166 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
167 Config1 = 0x52, /* Config1 */
168 Config3 = 0x59, /* Config3 */
169 Config4 = 0x5A, /* Config4 */
170 MultiIntr = 0x5C, /* Multiple interrupt select */
171 BasicModeCtrl = 0x62, /* MII BMCR */
172 BasicModeStatus = 0x64, /* MII BMSR */
173 NWayAdvert = 0x66, /* MII ADVERTISE */
174 NWayLPAR = 0x68, /* MII LPA */
175 NWayExpansion = 0x6A, /* MII Expansion */
176 Config5 = 0xD8, /* Config5 */
177 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
178 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
179 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
180 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
181 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
182 TxThresh = 0xEC, /* Early Tx threshold */
183 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
184 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
185
186 /* Tx and Rx status descriptors */
187 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
188 RingEnd = (1 << 30), /* End of descriptor ring */
189 FirstFrag = (1 << 29), /* First segment of a packet */
190 LastFrag = (1 << 28), /* Final segment of a packet */
Jeff Garzikfcec3452005-05-12 19:28:49 -0400191 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
192 MSSShift = 16, /* MSS value position */
193 MSSMask = 0xfff, /* MSS value: 11 bits */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 TxError = (1 << 23), /* Tx error summary */
195 RxError = (1 << 20), /* Rx error summary */
196 IPCS = (1 << 18), /* Calculate IP checksum */
197 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
198 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
199 TxVlanTag = (1 << 17), /* Add VLAN tag */
200 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
201 IPFail = (1 << 15), /* IP checksum failed */
202 UDPFail = (1 << 14), /* UDP/IP checksum failed */
203 TCPFail = (1 << 13), /* TCP/IP checksum failed */
204 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
205 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
206 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
207 RxProtoTCP = 1,
208 RxProtoUDP = 2,
209 RxProtoIP = 3,
210 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
211 TxOWC = (1 << 22), /* Tx Out-of-window collision */
212 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
213 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
214 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
215 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
216 RxErrFrame = (1 << 27), /* Rx frame alignment error */
217 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
218 RxErrCRC = (1 << 18), /* Rx CRC error */
219 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
220 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
221 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
222
223 /* StatsAddr register */
224 DumpStats = (1 << 3), /* Begin stats dump */
225
226 /* RxConfig register */
227 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
228 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
229 AcceptErr = 0x20, /* Accept packets with CRC errors */
230 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
231 AcceptBroadcast = 0x08, /* Accept broadcast packets */
232 AcceptMulticast = 0x04, /* Accept multicast packets */
233 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
234 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
235
236 /* IntrMask / IntrStatus registers */
237 PciErr = (1 << 15), /* System error on the PCI bus */
238 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
239 LenChg = (1 << 13), /* Cable length change */
240 SWInt = (1 << 8), /* Software-requested interrupt */
241 TxEmpty = (1 << 7), /* No Tx descriptors available */
242 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
243 LinkChg = (1 << 5), /* Packet underrun, or link change */
244 RxEmpty = (1 << 4), /* No Rx descriptors available */
245 TxErr = (1 << 3), /* Tx error */
246 TxOK = (1 << 2), /* Tx packet sent */
247 RxErr = (1 << 1), /* Rx error */
248 RxOK = (1 << 0), /* Rx packet received */
249 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
250 but hardware likes to raise it */
251
252 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
253 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
254 RxErr | RxOK | IntrResvd,
255
256 /* C mode command register */
257 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
258 RxOn = (1 << 3), /* Rx mode enable */
259 TxOn = (1 << 2), /* Tx mode enable */
260
261 /* C+ mode command register */
262 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
263 RxChkSum = (1 << 5), /* Rx checksum offload enable */
264 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
265 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
266 CpRxOn = (1 << 1), /* Rx mode enable */
267 CpTxOn = (1 << 0), /* Tx mode enable */
268
269 /* Cfg9436 EEPROM control register */
270 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
271 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
272
273 /* TxConfig register */
274 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
275 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
276
277 /* Early Tx Threshold register */
278 TxThreshMask = 0x3f, /* Mask bits 5-0 */
279 TxThreshMax = 2048, /* Max early Tx threshold */
280
281 /* Config1 register */
282 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
283 LWACT = (1 << 4), /* LWAKE active mode */
284 PMEnable = (1 << 0), /* Enable various PM features of chip */
285
286 /* Config3 register */
287 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
288 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
289 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
290
291 /* Config4 register */
292 LWPTN = (1 << 1), /* LWAKE Pattern */
293 LWPME = (1 << 4), /* LANWAKE vs PMEB */
294
295 /* Config5 register */
296 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
297 MWF = (1 << 5), /* Accept Multicast wakeup frame */
298 UWF = (1 << 4), /* Accept Unicast wakeup frame */
299 LANWake = (1 << 1), /* Enable LANWake signal */
300 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
301
302 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
303 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
304 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
305};
306
307static const unsigned int cp_rx_config =
308 (RX_FIFO_THRESH << RxCfgFIFOShift) |
309 (RX_DMA_BURST << RxCfgDMAShift);
310
311struct cp_desc {
312 u32 opts1;
313 u32 opts2;
314 u64 addr;
315};
316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317struct cp_dma_stats {
318 u64 tx_ok;
319 u64 rx_ok;
320 u64 tx_err;
321 u32 rx_err;
322 u16 rx_fifo;
323 u16 frame_align;
324 u32 tx_ok_1col;
325 u32 tx_ok_mcol;
326 u64 rx_ok_phys;
327 u64 rx_ok_bcast;
328 u32 rx_ok_mcast;
329 u16 tx_abort;
330 u16 tx_underrun;
331} __attribute__((packed));
332
333struct cp_extra_stats {
334 unsigned long rx_frags;
335};
336
337struct cp_private {
338 void __iomem *regs;
339 struct net_device *dev;
340 spinlock_t lock;
341 u32 msg_enable;
342
343 struct pci_dev *pdev;
344 u32 rx_config;
345 u16 cpcmd;
346
347 struct net_device_stats net_stats;
348 struct cp_extra_stats cp_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Francois Romieud03d3762006-01-29 01:31:36 +0100350 unsigned rx_head ____cacheline_aligned;
351 unsigned rx_tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 struct cp_desc *rx_ring;
Francois Romieu0ba894d2006-08-14 19:55:07 +0200353 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355 unsigned tx_head ____cacheline_aligned;
356 unsigned tx_tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 struct cp_desc *tx_ring;
Francois Romieu48907e32006-09-10 23:33:44 +0200358 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
Francois Romieud03d3762006-01-29 01:31:36 +0100359
360 unsigned rx_buf_sz;
361 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363#if CP_VLAN_TAG_USED
364 struct vlan_group *vlgrp;
365#endif
Francois Romieud03d3762006-01-29 01:31:36 +0100366 dma_addr_t ring_dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
368 struct mii_if_info mii_if;
369};
370
371#define cpr8(reg) readb(cp->regs + (reg))
372#define cpr16(reg) readw(cp->regs + (reg))
373#define cpr32(reg) readl(cp->regs + (reg))
374#define cpw8(reg,val) writeb((val), cp->regs + (reg))
375#define cpw16(reg,val) writew((val), cp->regs + (reg))
376#define cpw32(reg,val) writel((val), cp->regs + (reg))
377#define cpw8_f(reg,val) do { \
378 writeb((val), cp->regs + (reg)); \
379 readb(cp->regs + (reg)); \
380 } while (0)
381#define cpw16_f(reg,val) do { \
382 writew((val), cp->regs + (reg)); \
383 readw(cp->regs + (reg)); \
384 } while (0)
385#define cpw32_f(reg,val) do { \
386 writel((val), cp->regs + (reg)); \
387 readl(cp->regs + (reg)); \
388 } while (0)
389
390
391static void __cp_set_rx_mode (struct net_device *dev);
392static void cp_tx (struct cp_private *cp);
393static void cp_clean_rings (struct cp_private *cp);
Steffen Klassert7502cd12005-05-12 19:34:31 -0400394#ifdef CONFIG_NET_POLL_CONTROLLER
395static void cp_poll_controller(struct net_device *dev);
396#endif
Philip Craig722fdb32006-06-21 11:33:27 +1000397static int cp_get_eeprom_len(struct net_device *dev);
398static int cp_get_eeprom(struct net_device *dev,
399 struct ethtool_eeprom *eeprom, u8 *data);
400static int cp_set_eeprom(struct net_device *dev,
401 struct ethtool_eeprom *eeprom, u8 *data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403static struct pci_device_id cp_pci_tbl[] = {
Francois Romieucccb20d2006-08-16 13:07:18 +0200404 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
405 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 { },
407};
408MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
409
410static struct {
411 const char str[ETH_GSTRING_LEN];
412} ethtool_stats_keys[] = {
413 { "tx_ok" },
414 { "rx_ok" },
415 { "tx_err" },
416 { "rx_err" },
417 { "rx_fifo" },
418 { "frame_align" },
419 { "tx_ok_1col" },
420 { "tx_ok_mcol" },
421 { "rx_ok_phys" },
422 { "rx_ok_bcast" },
423 { "rx_ok_mcast" },
424 { "tx_abort" },
425 { "tx_underrun" },
426 { "rx_frags" },
427};
428
429
430#if CP_VLAN_TAG_USED
431static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
432{
433 struct cp_private *cp = netdev_priv(dev);
434 unsigned long flags;
435
436 spin_lock_irqsave(&cp->lock, flags);
437 cp->vlgrp = grp;
Stephen Hemminger7b332242007-06-01 09:43:59 -0700438 if (grp)
439 cp->cpcmd |= RxVlanOn;
440 else
441 cp->cpcmd &= ~RxVlanOn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 cpw16(CpCmd, cp->cpcmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 spin_unlock_irqrestore(&cp->lock, flags);
445}
446#endif /* CP_VLAN_TAG_USED */
447
448static inline void cp_set_rxbufsize (struct cp_private *cp)
449{
450 unsigned int mtu = cp->dev->mtu;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400451
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 if (mtu > ETH_DATA_LEN)
453 /* MTU + ethernet header + FCS + optional VLAN tag */
454 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
455 else
456 cp->rx_buf_sz = PKT_BUF_SZ;
457}
458
459static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
460 struct cp_desc *desc)
461{
462 skb->protocol = eth_type_trans (skb, cp->dev);
463
464 cp->net_stats.rx_packets++;
465 cp->net_stats.rx_bytes += skb->len;
466 cp->dev->last_rx = jiffies;
467
468#if CP_VLAN_TAG_USED
469 if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
470 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
471 be16_to_cpu(desc->opts2 & 0xffff));
472 } else
473#endif
474 netif_receive_skb(skb);
475}
476
477static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
478 u32 status, u32 len)
479{
480 if (netif_msg_rx_err (cp))
481 printk (KERN_DEBUG
482 "%s: rx err, slot %d status 0x%x len %d\n",
483 cp->dev->name, rx_tail, status, len);
484 cp->net_stats.rx_errors++;
485 if (status & RxErrFrame)
486 cp->net_stats.rx_frame_errors++;
487 if (status & RxErrCRC)
488 cp->net_stats.rx_crc_errors++;
489 if ((status & RxErrRunt) || (status & RxErrLong))
490 cp->net_stats.rx_length_errors++;
491 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
492 cp->net_stats.rx_length_errors++;
493 if (status & RxErrFIFO)
494 cp->net_stats.rx_fifo_errors++;
495}
496
497static inline unsigned int cp_rx_csum_ok (u32 status)
498{
499 unsigned int protocol = (status >> 16) & 0x3;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
502 return 1;
503 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
504 return 1;
505 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
506 return 1;
507 return 0;
508}
509
510static int cp_rx_poll (struct net_device *dev, int *budget)
511{
512 struct cp_private *cp = netdev_priv(dev);
513 unsigned rx_tail = cp->rx_tail;
514 unsigned rx_work = dev->quota;
515 unsigned rx;
516
517rx_status_loop:
518 rx = 0;
519 cpw16(IntrStatus, cp_rx_intr_mask);
520
521 while (1) {
522 u32 status, len;
523 dma_addr_t mapping;
524 struct sk_buff *skb, *new_skb;
525 struct cp_desc *desc;
526 unsigned buflen;
527
Francois Romieu0ba894d2006-08-14 19:55:07 +0200528 skb = cp->rx_skb[rx_tail];
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +0200529 BUG_ON(!skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 desc = &cp->rx_ring[rx_tail];
532 status = le32_to_cpu(desc->opts1);
533 if (status & DescOwn)
534 break;
535
536 len = (status & 0x1fff) - 4;
Francois Romieu3598b572006-01-29 01:31:13 +0100537 mapping = le64_to_cpu(desc->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
539 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
540 /* we don't support incoming fragmented frames.
541 * instead, we attempt to ensure that the
542 * pre-allocated RX skbs are properly sized such
543 * that RX fragments are never encountered
544 */
545 cp_rx_err_acct(cp, rx_tail, status, len);
546 cp->net_stats.rx_dropped++;
547 cp->cp_stats.rx_frags++;
548 goto rx_next;
549 }
550
551 if (status & (RxError | RxErrFIFO)) {
552 cp_rx_err_acct(cp, rx_tail, status, len);
553 goto rx_next;
554 }
555
556 if (netif_msg_rx_status(cp))
557 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
Francois Romieuc48e9392006-01-29 01:30:48 +0100558 dev->name, rx_tail, status, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 buflen = cp->rx_buf_sz + RX_OFFSET;
561 new_skb = dev_alloc_skb (buflen);
562 if (!new_skb) {
563 cp->net_stats.rx_dropped++;
564 goto rx_next;
565 }
566
567 skb_reserve(new_skb, RX_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
569 pci_unmap_single(cp->pdev, mapping,
570 buflen, PCI_DMA_FROMDEVICE);
571
572 /* Handle checksum offloading for incoming packets. */
573 if (cp_rx_csum_ok(status))
574 skb->ip_summed = CHECKSUM_UNNECESSARY;
575 else
576 skb->ip_summed = CHECKSUM_NONE;
577
578 skb_put(skb, len);
579
Francois Romieu3598b572006-01-29 01:31:13 +0100580 mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
581 PCI_DMA_FROMDEVICE);
Francois Romieu0ba894d2006-08-14 19:55:07 +0200582 cp->rx_skb[rx_tail] = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
584 cp_rx_skb(cp, skb, desc);
585 rx++;
586
587rx_next:
588 cp->rx_ring[rx_tail].opts2 = 0;
589 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
590 if (rx_tail == (CP_RX_RING_SIZE - 1))
591 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
592 cp->rx_buf_sz);
593 else
594 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
595 rx_tail = NEXT_RX(rx_tail);
596
597 if (!rx_work--)
598 break;
599 }
600
601 cp->rx_tail = rx_tail;
602
603 dev->quota -= rx;
604 *budget -= rx;
605
606 /* if we did not reach work limit, then we're done with
607 * this round of polling
608 */
609 if (rx_work) {
Francois Romieud15e9c42006-12-17 23:03:15 +0100610 unsigned long flags;
611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 if (cpr16(IntrStatus) & cp_rx_intr_mask)
613 goto rx_status_loop;
614
Francois Romieud15e9c42006-12-17 23:03:15 +0100615 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 cpw16_f(IntrMask, cp_intr_mask);
617 __netif_rx_complete(dev);
Francois Romieud15e9c42006-12-17 23:03:15 +0100618 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
620 return 0; /* done */
621 }
622
623 return 1; /* not done */
624}
625
David Howells7d12e782006-10-05 14:55:46 +0100626static irqreturn_t cp_interrupt (int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627{
628 struct net_device *dev = dev_instance;
629 struct cp_private *cp;
630 u16 status;
631
632 if (unlikely(dev == NULL))
633 return IRQ_NONE;
634 cp = netdev_priv(dev);
635
636 status = cpr16(IntrStatus);
637 if (!status || (status == 0xFFFF))
638 return IRQ_NONE;
639
640 if (netif_msg_intr(cp))
641 printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
642 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
643
644 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
645
646 spin_lock(&cp->lock);
647
648 /* close possible race's with dev_close */
649 if (unlikely(!netif_running(dev))) {
650 cpw16(IntrMask, 0);
651 spin_unlock(&cp->lock);
652 return IRQ_HANDLED;
653 }
654
655 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
656 if (netif_rx_schedule_prep(dev)) {
657 cpw16_f(IntrMask, cp_norx_intr_mask);
658 __netif_rx_schedule(dev);
659 }
660
661 if (status & (TxOK | TxErr | TxEmpty | SWInt))
662 cp_tx(cp);
663 if (status & LinkChg)
664 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
665
666 spin_unlock(&cp->lock);
667
668 if (status & PciErr) {
669 u16 pci_status;
670
671 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
672 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
673 printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
674 dev->name, status, pci_status);
675
676 /* TODO: reset hardware */
677 }
678
679 return IRQ_HANDLED;
680}
681
Steffen Klassert7502cd12005-05-12 19:34:31 -0400682#ifdef CONFIG_NET_POLL_CONTROLLER
683/*
684 * Polling receive - used by netconsole and other diagnostic tools
685 * to allow network i/o with interrupts disabled.
686 */
687static void cp_poll_controller(struct net_device *dev)
688{
689 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100690 cp_interrupt(dev->irq, dev);
Steffen Klassert7502cd12005-05-12 19:34:31 -0400691 enable_irq(dev->irq);
692}
693#endif
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695static void cp_tx (struct cp_private *cp)
696{
697 unsigned tx_head = cp->tx_head;
698 unsigned tx_tail = cp->tx_tail;
699
700 while (tx_tail != tx_head) {
Francois Romieu3598b572006-01-29 01:31:13 +0100701 struct cp_desc *txd = cp->tx_ring + tx_tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 struct sk_buff *skb;
703 u32 status;
704
705 rmb();
Francois Romieu3598b572006-01-29 01:31:13 +0100706 status = le32_to_cpu(txd->opts1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 if (status & DescOwn)
708 break;
709
Francois Romieu48907e32006-09-10 23:33:44 +0200710 skb = cp->tx_skb[tx_tail];
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +0200711 BUG_ON(!skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Francois Romieu3598b572006-01-29 01:31:13 +0100713 pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
Francois Romieu48907e32006-09-10 23:33:44 +0200714 le32_to_cpu(txd->opts1) & 0xffff,
715 PCI_DMA_TODEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
717 if (status & LastFrag) {
718 if (status & (TxError | TxFIFOUnder)) {
719 if (netif_msg_tx_err(cp))
720 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
721 cp->dev->name, status);
722 cp->net_stats.tx_errors++;
723 if (status & TxOWC)
724 cp->net_stats.tx_window_errors++;
725 if (status & TxMaxCol)
726 cp->net_stats.tx_aborted_errors++;
727 if (status & TxLinkFail)
728 cp->net_stats.tx_carrier_errors++;
729 if (status & TxFIFOUnder)
730 cp->net_stats.tx_fifo_errors++;
731 } else {
732 cp->net_stats.collisions +=
733 ((status >> TxColCntShift) & TxColCntMask);
734 cp->net_stats.tx_packets++;
735 cp->net_stats.tx_bytes += skb->len;
736 if (netif_msg_tx_done(cp))
737 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
738 }
739 dev_kfree_skb_irq(skb);
740 }
741
Francois Romieu48907e32006-09-10 23:33:44 +0200742 cp->tx_skb[tx_tail] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
744 tx_tail = NEXT_TX(tx_tail);
745 }
746
747 cp->tx_tail = tx_tail;
748
749 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
750 netif_wake_queue(cp->dev);
751}
752
753static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
754{
755 struct cp_private *cp = netdev_priv(dev);
756 unsigned entry;
Jeff Garzikfcec3452005-05-12 19:28:49 -0400757 u32 eor, flags;
Chris Lalancette553af562007-01-16 16:41:44 -0500758 unsigned long intr_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759#if CP_VLAN_TAG_USED
760 u32 vlan_tag = 0;
761#endif
Jeff Garzikfcec3452005-05-12 19:28:49 -0400762 int mss = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
Chris Lalancette553af562007-01-16 16:41:44 -0500764 spin_lock_irqsave(&cp->lock, intr_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 /* This is a hard error, log it. */
767 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
768 netif_stop_queue(dev);
Chris Lalancette553af562007-01-16 16:41:44 -0500769 spin_unlock_irqrestore(&cp->lock, intr_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
771 dev->name);
772 return 1;
773 }
774
775#if CP_VLAN_TAG_USED
776 if (cp->vlgrp && vlan_tx_tag_present(skb))
777 vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
778#endif
779
780 entry = cp->tx_head;
781 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
Jeff Garzikfcec3452005-05-12 19:28:49 -0400782 if (dev->features & NETIF_F_TSO)
Herbert Xu79671682006-06-22 02:40:14 -0700783 mss = skb_shinfo(skb)->gso_size;
Jeff Garzikfcec3452005-05-12 19:28:49 -0400784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 if (skb_shinfo(skb)->nr_frags == 0) {
786 struct cp_desc *txd = &cp->tx_ring[entry];
787 u32 len;
788 dma_addr_t mapping;
789
790 len = skb->len;
791 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
792 CP_VLAN_TX_TAG(txd, vlan_tag);
793 txd->addr = cpu_to_le64(mapping);
794 wmb();
795
Jeff Garzikfcec3452005-05-12 19:28:49 -0400796 flags = eor | len | DescOwn | FirstFrag | LastFrag;
797
798 if (mss)
799 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
Patrick McHardy84fa7932006-08-29 16:44:56 -0700800 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700801 const struct iphdr *ip = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 if (ip->protocol == IPPROTO_TCP)
Jeff Garzikfcec3452005-05-12 19:28:49 -0400803 flags |= IPCS | TCPCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 else if (ip->protocol == IPPROTO_UDP)
Jeff Garzikfcec3452005-05-12 19:28:49 -0400805 flags |= IPCS | UDPCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 else
Francois Romieu57344182005-05-12 19:31:31 -0400807 WARN_ON(1); /* we need a WARN() */
Jeff Garzikfcec3452005-05-12 19:28:49 -0400808 }
809
810 txd->opts1 = cpu_to_le32(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 wmb();
812
Francois Romieu48907e32006-09-10 23:33:44 +0200813 cp->tx_skb[entry] = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 entry = NEXT_TX(entry);
815 } else {
816 struct cp_desc *txd;
817 u32 first_len, first_eor;
818 dma_addr_t first_mapping;
819 int frag, first_entry = entry;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700820 const struct iphdr *ip = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
822 /* We must give this initial chunk to the device last.
823 * Otherwise we could race with the device.
824 */
825 first_eor = eor;
826 first_len = skb_headlen(skb);
827 first_mapping = pci_map_single(cp->pdev, skb->data,
828 first_len, PCI_DMA_TODEVICE);
Francois Romieu48907e32006-09-10 23:33:44 +0200829 cp->tx_skb[entry] = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 entry = NEXT_TX(entry);
831
832 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
833 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
834 u32 len;
835 u32 ctrl;
836 dma_addr_t mapping;
837
838 len = this_frag->size;
839 mapping = pci_map_single(cp->pdev,
840 ((void *) page_address(this_frag->page) +
841 this_frag->page_offset),
842 len, PCI_DMA_TODEVICE);
843 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
844
Jeff Garzikfcec3452005-05-12 19:28:49 -0400845 ctrl = eor | len | DescOwn;
846
847 if (mss)
848 ctrl |= LargeSend |
849 ((mss & MSSMask) << MSSShift);
Patrick McHardy84fa7932006-08-29 16:44:56 -0700850 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 if (ip->protocol == IPPROTO_TCP)
Jeff Garzikfcec3452005-05-12 19:28:49 -0400852 ctrl |= IPCS | TCPCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 else if (ip->protocol == IPPROTO_UDP)
Jeff Garzikfcec3452005-05-12 19:28:49 -0400854 ctrl |= IPCS | UDPCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 else
856 BUG();
Jeff Garzikfcec3452005-05-12 19:28:49 -0400857 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
859 if (frag == skb_shinfo(skb)->nr_frags - 1)
860 ctrl |= LastFrag;
861
862 txd = &cp->tx_ring[entry];
863 CP_VLAN_TX_TAG(txd, vlan_tag);
864 txd->addr = cpu_to_le64(mapping);
865 wmb();
866
867 txd->opts1 = cpu_to_le32(ctrl);
868 wmb();
869
Francois Romieu48907e32006-09-10 23:33:44 +0200870 cp->tx_skb[entry] = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 entry = NEXT_TX(entry);
872 }
873
874 txd = &cp->tx_ring[first_entry];
875 CP_VLAN_TX_TAG(txd, vlan_tag);
876 txd->addr = cpu_to_le64(first_mapping);
877 wmb();
878
Patrick McHardy84fa7932006-08-29 16:44:56 -0700879 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 if (ip->protocol == IPPROTO_TCP)
881 txd->opts1 = cpu_to_le32(first_eor | first_len |
882 FirstFrag | DescOwn |
883 IPCS | TCPCS);
884 else if (ip->protocol == IPPROTO_UDP)
885 txd->opts1 = cpu_to_le32(first_eor | first_len |
886 FirstFrag | DescOwn |
887 IPCS | UDPCS);
888 else
889 BUG();
890 } else
891 txd->opts1 = cpu_to_le32(first_eor | first_len |
892 FirstFrag | DescOwn);
893 wmb();
894 }
895 cp->tx_head = entry;
896 if (netif_msg_tx_queued(cp))
897 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
898 dev->name, entry, skb->len);
899 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
900 netif_stop_queue(dev);
901
Chris Lalancette553af562007-01-16 16:41:44 -0500902 spin_unlock_irqrestore(&cp->lock, intr_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904 cpw8(TxPoll, NormalTxPoll);
905 dev->trans_start = jiffies;
906
907 return 0;
908}
909
910/* Set or clear the multicast filter for this adaptor.
911 This routine is not state sensitive and need not be SMP locked. */
912
913static void __cp_set_rx_mode (struct net_device *dev)
914{
915 struct cp_private *cp = netdev_priv(dev);
916 u32 mc_filter[2]; /* Multicast hash filter */
917 int i, rx_mode;
918 u32 tmp;
919
920 /* Note: do not reorder, GCC is clever about common statements. */
921 if (dev->flags & IFF_PROMISC) {
922 /* Unconditionally log net taps. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 rx_mode =
924 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
925 AcceptAllPhys;
926 mc_filter[1] = mc_filter[0] = 0xffffffff;
927 } else if ((dev->mc_count > multicast_filter_limit)
928 || (dev->flags & IFF_ALLMULTI)) {
929 /* Too many to filter perfectly -- accept all multicasts. */
930 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
931 mc_filter[1] = mc_filter[0] = 0xffffffff;
932 } else {
933 struct dev_mc_list *mclist;
934 rx_mode = AcceptBroadcast | AcceptMyPhys;
935 mc_filter[1] = mc_filter[0] = 0;
936 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
937 i++, mclist = mclist->next) {
938 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
939
940 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
941 rx_mode |= AcceptMulticast;
942 }
943 }
944
945 /* We can safely update without stopping the chip. */
946 tmp = cp_rx_config | rx_mode;
947 if (cp->rx_config != tmp) {
948 cpw32_f (RxConfig, tmp);
949 cp->rx_config = tmp;
950 }
951 cpw32_f (MAR0 + 0, mc_filter[0]);
952 cpw32_f (MAR0 + 4, mc_filter[1]);
953}
954
955static void cp_set_rx_mode (struct net_device *dev)
956{
957 unsigned long flags;
958 struct cp_private *cp = netdev_priv(dev);
959
960 spin_lock_irqsave (&cp->lock, flags);
961 __cp_set_rx_mode(dev);
962 spin_unlock_irqrestore (&cp->lock, flags);
963}
964
965static void __cp_get_stats(struct cp_private *cp)
966{
967 /* only lower 24 bits valid; write any value to clear */
968 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
969 cpw32 (RxMissed, 0);
970}
971
972static struct net_device_stats *cp_get_stats(struct net_device *dev)
973{
974 struct cp_private *cp = netdev_priv(dev);
975 unsigned long flags;
976
977 /* The chip only need report frame silently dropped. */
978 spin_lock_irqsave(&cp->lock, flags);
979 if (netif_running(dev) && netif_device_present(dev))
980 __cp_get_stats(cp);
981 spin_unlock_irqrestore(&cp->lock, flags);
982
983 return &cp->net_stats;
984}
985
986static void cp_stop_hw (struct cp_private *cp)
987{
988 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
989 cpw16_f(IntrMask, 0);
990 cpw8(Cmd, 0);
991 cpw16_f(CpCmd, 0);
992 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
993
994 cp->rx_tail = 0;
995 cp->tx_head = cp->tx_tail = 0;
996}
997
998static void cp_reset_hw (struct cp_private *cp)
999{
1000 unsigned work = 1000;
1001
1002 cpw8(Cmd, CmdReset);
1003
1004 while (work--) {
1005 if (!(cpr8(Cmd) & CmdReset))
1006 return;
1007
Nishanth Aravamudan3173c892005-09-11 02:09:55 -07001008 schedule_timeout_uninterruptible(10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 }
1010
1011 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1012}
1013
1014static inline void cp_start_hw (struct cp_private *cp)
1015{
1016 cpw16(CpCmd, cp->cpcmd);
1017 cpw8(Cmd, RxOn | TxOn);
1018}
1019
1020static void cp_init_hw (struct cp_private *cp)
1021{
1022 struct net_device *dev = cp->dev;
1023 dma_addr_t ring_dma;
1024
1025 cp_reset_hw(cp);
1026
1027 cpw8_f (Cfg9346, Cfg9346_Unlock);
1028
1029 /* Restore our idea of the MAC address. */
1030 cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1031 cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1032
1033 cp_start_hw(cp);
1034 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1035
1036 __cp_set_rx_mode(dev);
1037 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1038
1039 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1040 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1041 cpw8(Config3, PARMEnable);
1042 cp->wol_enabled = 0;
1043
Jeff Garzikf3b197a2006-05-26 21:39:03 -04001044 cpw8(Config5, cpr8(Config5) & PMEStatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 cpw32_f(HiTxRingAddr, 0);
1047 cpw32_f(HiTxRingAddr + 4, 0);
1048
1049 ring_dma = cp->ring_dma;
1050 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1051 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1052
1053 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1054 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1055 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1056
1057 cpw16(MultiIntr, 0);
1058
1059 cpw16_f(IntrMask, cp_intr_mask);
1060
1061 cpw8_f(Cfg9346, Cfg9346_Lock);
1062}
1063
1064static int cp_refill_rx (struct cp_private *cp)
1065{
1066 unsigned i;
1067
1068 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1069 struct sk_buff *skb;
Francois Romieu3598b572006-01-29 01:31:13 +01001070 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
1072 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1073 if (!skb)
1074 goto err_out;
1075
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 skb_reserve(skb, RX_OFFSET);
1077
Francois Romieu3598b572006-01-29 01:31:13 +01001078 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
1079 PCI_DMA_FROMDEVICE);
Francois Romieu0ba894d2006-08-14 19:55:07 +02001080 cp->rx_skb[i] = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
1082 cp->rx_ring[i].opts2 = 0;
Francois Romieu3598b572006-01-29 01:31:13 +01001083 cp->rx_ring[i].addr = cpu_to_le64(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 if (i == (CP_RX_RING_SIZE - 1))
1085 cp->rx_ring[i].opts1 =
1086 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1087 else
1088 cp->rx_ring[i].opts1 =
1089 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1090 }
1091
1092 return 0;
1093
1094err_out:
1095 cp_clean_rings(cp);
1096 return -ENOMEM;
1097}
1098
Francois Romieu576cfa92006-02-27 23:15:06 +01001099static void cp_init_rings_index (struct cp_private *cp)
1100{
1101 cp->rx_tail = 0;
1102 cp->tx_head = cp->tx_tail = 0;
1103}
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105static int cp_init_rings (struct cp_private *cp)
1106{
1107 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1108 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1109
Francois Romieu576cfa92006-02-27 23:15:06 +01001110 cp_init_rings_index(cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
1112 return cp_refill_rx (cp);
1113}
1114
1115static int cp_alloc_rings (struct cp_private *cp)
1116{
1117 void *mem;
1118
1119 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1120 if (!mem)
1121 return -ENOMEM;
1122
1123 cp->rx_ring = mem;
1124 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 return cp_init_rings(cp);
1127}
1128
1129static void cp_clean_rings (struct cp_private *cp)
1130{
Francois Romieu3598b572006-01-29 01:31:13 +01001131 struct cp_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 unsigned i;
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 for (i = 0; i < CP_RX_RING_SIZE; i++) {
Francois Romieu0ba894d2006-08-14 19:55:07 +02001135 if (cp->rx_skb[i]) {
Francois Romieu3598b572006-01-29 01:31:13 +01001136 desc = cp->rx_ring + i;
1137 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
Francois Romieu0ba894d2006-08-14 19:55:07 +02001139 dev_kfree_skb(cp->rx_skb[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 }
1141 }
1142
1143 for (i = 0; i < CP_TX_RING_SIZE; i++) {
Francois Romieu48907e32006-09-10 23:33:44 +02001144 if (cp->tx_skb[i]) {
1145 struct sk_buff *skb = cp->tx_skb[i];
Francois Romieu57344182005-05-12 19:31:31 -04001146
Francois Romieu3598b572006-01-29 01:31:13 +01001147 desc = cp->tx_ring + i;
1148 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
Francois Romieu48907e32006-09-10 23:33:44 +02001149 le32_to_cpu(desc->opts1) & 0xffff,
1150 PCI_DMA_TODEVICE);
Francois Romieu3598b572006-01-29 01:31:13 +01001151 if (le32_to_cpu(desc->opts1) & LastFrag)
Francois Romieu57344182005-05-12 19:31:31 -04001152 dev_kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 cp->net_stats.tx_dropped++;
1154 }
1155 }
1156
Francois Romieu57344182005-05-12 19:31:31 -04001157 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1158 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1159
Francois Romieu0ba894d2006-08-14 19:55:07 +02001160 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
Francois Romieu48907e32006-09-10 23:33:44 +02001161 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162}
1163
1164static void cp_free_rings (struct cp_private *cp)
1165{
1166 cp_clean_rings(cp);
1167 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1168 cp->rx_ring = NULL;
1169 cp->tx_ring = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170}
1171
1172static int cp_open (struct net_device *dev)
1173{
1174 struct cp_private *cp = netdev_priv(dev);
1175 int rc;
1176
1177 if (netif_msg_ifup(cp))
1178 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1179
1180 rc = cp_alloc_rings(cp);
1181 if (rc)
1182 return rc;
1183
1184 cp_init_hw(cp);
1185
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001186 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 if (rc)
1188 goto err_out_hw;
1189
1190 netif_carrier_off(dev);
1191 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1192 netif_start_queue(dev);
1193
1194 return 0;
1195
1196err_out_hw:
1197 cp_stop_hw(cp);
1198 cp_free_rings(cp);
1199 return rc;
1200}
1201
1202static int cp_close (struct net_device *dev)
1203{
1204 struct cp_private *cp = netdev_priv(dev);
1205 unsigned long flags;
1206
1207 if (netif_msg_ifdown(cp))
1208 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1209
1210 spin_lock_irqsave(&cp->lock, flags);
1211
1212 netif_stop_queue(dev);
1213 netif_carrier_off(dev);
1214
1215 cp_stop_hw(cp);
1216
1217 spin_unlock_irqrestore(&cp->lock, flags);
1218
1219 synchronize_irq(dev->irq);
1220 free_irq(dev->irq, dev);
1221
1222 cp_free_rings(cp);
1223 return 0;
1224}
1225
1226#ifdef BROKEN
1227static int cp_change_mtu(struct net_device *dev, int new_mtu)
1228{
1229 struct cp_private *cp = netdev_priv(dev);
1230 int rc;
1231 unsigned long flags;
1232
1233 /* check for invalid MTU, according to hardware limits */
1234 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1235 return -EINVAL;
1236
1237 /* if network interface not up, no need for complexity */
1238 if (!netif_running(dev)) {
1239 dev->mtu = new_mtu;
1240 cp_set_rxbufsize(cp); /* set new rx buf size */
1241 return 0;
1242 }
1243
1244 spin_lock_irqsave(&cp->lock, flags);
1245
1246 cp_stop_hw(cp); /* stop h/w and free rings */
1247 cp_clean_rings(cp);
1248
1249 dev->mtu = new_mtu;
1250 cp_set_rxbufsize(cp); /* set new rx buf size */
1251
1252 rc = cp_init_rings(cp); /* realloc and restart h/w */
1253 cp_start_hw(cp);
1254
1255 spin_unlock_irqrestore(&cp->lock, flags);
1256
1257 return rc;
1258}
1259#endif /* BROKEN */
1260
Arjan van de Venf71e1302006-03-03 21:33:57 -05001261static const char mii_2_8139_map[8] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 BasicModeCtrl,
1263 BasicModeStatus,
1264 0,
1265 0,
1266 NWayAdvert,
1267 NWayLPAR,
1268 NWayExpansion,
1269 0
1270};
1271
1272static int mdio_read(struct net_device *dev, int phy_id, int location)
1273{
1274 struct cp_private *cp = netdev_priv(dev);
1275
1276 return location < 8 && mii_2_8139_map[location] ?
1277 readw(cp->regs + mii_2_8139_map[location]) : 0;
1278}
1279
1280
1281static void mdio_write(struct net_device *dev, int phy_id, int location,
1282 int value)
1283{
1284 struct cp_private *cp = netdev_priv(dev);
1285
1286 if (location == 0) {
1287 cpw8(Cfg9346, Cfg9346_Unlock);
1288 cpw16(BasicModeCtrl, value);
1289 cpw8(Cfg9346, Cfg9346_Lock);
1290 } else if (location < 8 && mii_2_8139_map[location])
1291 cpw16(mii_2_8139_map[location], value);
1292}
1293
1294/* Set the ethtool Wake-on-LAN settings */
1295static int netdev_set_wol (struct cp_private *cp,
1296 const struct ethtool_wolinfo *wol)
1297{
1298 u8 options;
1299
1300 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1301 /* If WOL is being disabled, no need for complexity */
1302 if (wol->wolopts) {
1303 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1304 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1305 }
1306
1307 cpw8 (Cfg9346, Cfg9346_Unlock);
1308 cpw8 (Config3, options);
1309 cpw8 (Cfg9346, Cfg9346_Lock);
1310
1311 options = 0; /* Paranoia setting */
1312 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1313 /* If WOL is being disabled, no need for complexity */
1314 if (wol->wolopts) {
1315 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1316 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1317 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1318 }
1319
1320 cpw8 (Config5, options);
1321
1322 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1323
1324 return 0;
1325}
1326
1327/* Get the ethtool Wake-on-LAN settings */
1328static void netdev_get_wol (struct cp_private *cp,
1329 struct ethtool_wolinfo *wol)
1330{
1331 u8 options;
1332
1333 wol->wolopts = 0; /* Start from scratch */
1334 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1335 WAKE_MCAST | WAKE_UCAST;
1336 /* We don't need to go on if WOL is disabled */
1337 if (!cp->wol_enabled) return;
Jeff Garzikf3b197a2006-05-26 21:39:03 -04001338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 options = cpr8 (Config3);
1340 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1341 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1342
1343 options = 0; /* Paranoia setting */
1344 options = cpr8 (Config5);
1345 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1346 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1347 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1348}
1349
1350static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1351{
1352 struct cp_private *cp = netdev_priv(dev);
1353
1354 strcpy (info->driver, DRV_NAME);
1355 strcpy (info->version, DRV_VERSION);
1356 strcpy (info->bus_info, pci_name(cp->pdev));
1357}
1358
1359static int cp_get_regs_len(struct net_device *dev)
1360{
1361 return CP_REGS_SIZE;
1362}
1363
1364static int cp_get_stats_count (struct net_device *dev)
1365{
1366 return CP_NUM_STATS;
1367}
1368
1369static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1370{
1371 struct cp_private *cp = netdev_priv(dev);
1372 int rc;
1373 unsigned long flags;
1374
1375 spin_lock_irqsave(&cp->lock, flags);
1376 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1377 spin_unlock_irqrestore(&cp->lock, flags);
1378
1379 return rc;
1380}
1381
1382static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1383{
1384 struct cp_private *cp = netdev_priv(dev);
1385 int rc;
1386 unsigned long flags;
1387
1388 spin_lock_irqsave(&cp->lock, flags);
1389 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1390 spin_unlock_irqrestore(&cp->lock, flags);
1391
1392 return rc;
1393}
1394
1395static int cp_nway_reset(struct net_device *dev)
1396{
1397 struct cp_private *cp = netdev_priv(dev);
1398 return mii_nway_restart(&cp->mii_if);
1399}
1400
1401static u32 cp_get_msglevel(struct net_device *dev)
1402{
1403 struct cp_private *cp = netdev_priv(dev);
1404 return cp->msg_enable;
1405}
1406
1407static void cp_set_msglevel(struct net_device *dev, u32 value)
1408{
1409 struct cp_private *cp = netdev_priv(dev);
1410 cp->msg_enable = value;
1411}
1412
1413static u32 cp_get_rx_csum(struct net_device *dev)
1414{
1415 struct cp_private *cp = netdev_priv(dev);
1416 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1417}
1418
1419static int cp_set_rx_csum(struct net_device *dev, u32 data)
1420{
1421 struct cp_private *cp = netdev_priv(dev);
1422 u16 cmd = cp->cpcmd, newcmd;
1423
1424 newcmd = cmd;
1425
1426 if (data)
1427 newcmd |= RxChkSum;
1428 else
1429 newcmd &= ~RxChkSum;
1430
1431 if (newcmd != cmd) {
1432 unsigned long flags;
1433
1434 spin_lock_irqsave(&cp->lock, flags);
1435 cp->cpcmd = newcmd;
1436 cpw16_f(CpCmd, newcmd);
1437 spin_unlock_irqrestore(&cp->lock, flags);
1438 }
1439
1440 return 0;
1441}
1442
1443static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1444 void *p)
1445{
1446 struct cp_private *cp = netdev_priv(dev);
1447 unsigned long flags;
1448
1449 if (regs->len < CP_REGS_SIZE)
1450 return /* -EINVAL */;
1451
1452 regs->version = CP_REGS_VER;
1453
1454 spin_lock_irqsave(&cp->lock, flags);
1455 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1456 spin_unlock_irqrestore(&cp->lock, flags);
1457}
1458
1459static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1460{
1461 struct cp_private *cp = netdev_priv(dev);
1462 unsigned long flags;
1463
1464 spin_lock_irqsave (&cp->lock, flags);
1465 netdev_get_wol (cp, wol);
1466 spin_unlock_irqrestore (&cp->lock, flags);
1467}
1468
1469static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1470{
1471 struct cp_private *cp = netdev_priv(dev);
1472 unsigned long flags;
1473 int rc;
1474
1475 spin_lock_irqsave (&cp->lock, flags);
1476 rc = netdev_set_wol (cp, wol);
1477 spin_unlock_irqrestore (&cp->lock, flags);
1478
1479 return rc;
1480}
1481
1482static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1483{
1484 switch (stringset) {
1485 case ETH_SS_STATS:
1486 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1487 break;
1488 default:
1489 BUG();
1490 break;
1491 }
1492}
1493
1494static void cp_get_ethtool_stats (struct net_device *dev,
1495 struct ethtool_stats *estats, u64 *tmp_stats)
1496{
1497 struct cp_private *cp = netdev_priv(dev);
Stephen Hemminger8b512922005-09-14 09:45:44 -07001498 struct cp_dma_stats *nic_stats;
1499 dma_addr_t dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 int i;
1501
Stephen Hemminger8b512922005-09-14 09:45:44 -07001502 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1503 if (!nic_stats)
1504 return;
Stephen Hemminger97f568d2005-06-26 18:02:44 -04001505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 /* begin NIC statistics dump */
Stephen Hemminger8b512922005-09-14 09:45:44 -07001507 cpw32(StatsAddr + 4, (u64)dma >> 32);
1508 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 cpr32(StatsAddr);
1510
Stephen Hemminger97f568d2005-06-26 18:02:44 -04001511 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 if ((cpr32(StatsAddr) & DumpStats) == 0)
1513 break;
Stephen Hemminger97f568d2005-06-26 18:02:44 -04001514 udelay(10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 }
Stephen Hemminger97f568d2005-06-26 18:02:44 -04001516 cpw32(StatsAddr, 0);
1517 cpw32(StatsAddr + 4, 0);
Stephen Hemminger8b512922005-09-14 09:45:44 -07001518 cpr32(StatsAddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
1520 i = 0;
Stephen Hemminger8b512922005-09-14 09:45:44 -07001521 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1522 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1523 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1524 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1525 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1526 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1527 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1528 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1529 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1530 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1531 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1532 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1533 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 tmp_stats[i++] = cp->cp_stats.rx_frags;
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02001535 BUG_ON(i != CP_NUM_STATS);
Stephen Hemminger8b512922005-09-14 09:45:44 -07001536
1537 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538}
1539
Jeff Garzik7282d492006-09-13 14:30:00 -04001540static const struct ethtool_ops cp_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 .get_drvinfo = cp_get_drvinfo,
1542 .get_regs_len = cp_get_regs_len,
1543 .get_stats_count = cp_get_stats_count,
1544 .get_settings = cp_get_settings,
1545 .set_settings = cp_set_settings,
1546 .nway_reset = cp_nway_reset,
1547 .get_link = ethtool_op_get_link,
1548 .get_msglevel = cp_get_msglevel,
1549 .set_msglevel = cp_set_msglevel,
1550 .get_rx_csum = cp_get_rx_csum,
1551 .set_rx_csum = cp_set_rx_csum,
1552 .get_tx_csum = ethtool_op_get_tx_csum,
1553 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1554 .get_sg = ethtool_op_get_sg,
1555 .set_sg = ethtool_op_set_sg,
Jeff Garzikfcec3452005-05-12 19:28:49 -04001556 .get_tso = ethtool_op_get_tso,
1557 .set_tso = ethtool_op_set_tso,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 .get_regs = cp_get_regs,
1559 .get_wol = cp_get_wol,
1560 .set_wol = cp_set_wol,
1561 .get_strings = cp_get_strings,
1562 .get_ethtool_stats = cp_get_ethtool_stats,
John W. Linvillebb0ce602005-09-12 10:48:54 -04001563 .get_perm_addr = ethtool_op_get_perm_addr,
Philip Craig722fdb32006-06-21 11:33:27 +10001564 .get_eeprom_len = cp_get_eeprom_len,
1565 .get_eeprom = cp_get_eeprom,
1566 .set_eeprom = cp_set_eeprom,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567};
1568
1569static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1570{
1571 struct cp_private *cp = netdev_priv(dev);
1572 int rc;
1573 unsigned long flags;
1574
1575 if (!netif_running(dev))
1576 return -EINVAL;
1577
1578 spin_lock_irqsave(&cp->lock, flags);
1579 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1580 spin_unlock_irqrestore(&cp->lock, flags);
1581 return rc;
1582}
1583
1584/* Serial EEPROM section. */
1585
1586/* EEPROM_Ctrl bits. */
1587#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1588#define EE_CS 0x08 /* EEPROM chip select. */
1589#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1590#define EE_WRITE_0 0x00
1591#define EE_WRITE_1 0x02
1592#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1593#define EE_ENB (0x80 | EE_CS)
1594
1595/* Delay between EEPROM clock transitions.
1596 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1597 */
1598
1599#define eeprom_delay() readl(ee_addr)
1600
1601/* The EEPROM commands include the alway-set leading bit. */
Philip Craig722fdb32006-06-21 11:33:27 +10001602#define EE_EXTEND_CMD (4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603#define EE_WRITE_CMD (5)
1604#define EE_READ_CMD (6)
1605#define EE_ERASE_CMD (7)
1606
Philip Craig722fdb32006-06-21 11:33:27 +10001607#define EE_EWDS_ADDR (0)
1608#define EE_WRAL_ADDR (1)
1609#define EE_ERAL_ADDR (2)
1610#define EE_EWEN_ADDR (3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
Philip Craig722fdb32006-06-21 11:33:27 +10001612#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1613
1614static void eeprom_cmd_start(void __iomem *ee_addr)
1615{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 writeb (EE_ENB & ~EE_CS, ee_addr);
1617 writeb (EE_ENB, ee_addr);
1618 eeprom_delay ();
Philip Craig722fdb32006-06-21 11:33:27 +10001619}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Philip Craig722fdb32006-06-21 11:33:27 +10001621static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1622{
1623 int i;
1624
1625 /* Shift the command bits out. */
1626 for (i = cmd_len - 1; i >= 0; i--) {
1627 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 writeb (EE_ENB | dataval, ee_addr);
1629 eeprom_delay ();
1630 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1631 eeprom_delay ();
1632 }
1633 writeb (EE_ENB, ee_addr);
1634 eeprom_delay ();
Philip Craig722fdb32006-06-21 11:33:27 +10001635}
1636
1637static void eeprom_cmd_end(void __iomem *ee_addr)
1638{
1639 writeb (~EE_CS, ee_addr);
1640 eeprom_delay ();
1641}
1642
1643static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1644 int addr_len)
1645{
1646 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1647
1648 eeprom_cmd_start(ee_addr);
1649 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1650 eeprom_cmd_end(ee_addr);
1651}
1652
1653static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1654{
1655 int i;
1656 u16 retval = 0;
1657 void __iomem *ee_addr = ioaddr + Cfg9346;
1658 int read_cmd = location | (EE_READ_CMD << addr_len);
1659
1660 eeprom_cmd_start(ee_addr);
1661 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
1663 for (i = 16; i > 0; i--) {
1664 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1665 eeprom_delay ();
1666 retval =
1667 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1668 0);
1669 writeb (EE_ENB, ee_addr);
1670 eeprom_delay ();
1671 }
1672
Philip Craig722fdb32006-06-21 11:33:27 +10001673 eeprom_cmd_end(ee_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
1675 return retval;
1676}
1677
Philip Craig722fdb32006-06-21 11:33:27 +10001678static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1679 int addr_len)
1680{
1681 int i;
1682 void __iomem *ee_addr = ioaddr + Cfg9346;
1683 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1684
1685 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1686
1687 eeprom_cmd_start(ee_addr);
1688 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1689 eeprom_cmd(ee_addr, val, 16);
1690 eeprom_cmd_end(ee_addr);
1691
1692 eeprom_cmd_start(ee_addr);
1693 for (i = 0; i < 20000; i++)
1694 if (readb(ee_addr) & EE_DATA_READ)
1695 break;
1696 eeprom_cmd_end(ee_addr);
1697
1698 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1699}
1700
1701static int cp_get_eeprom_len(struct net_device *dev)
1702{
1703 struct cp_private *cp = netdev_priv(dev);
1704 int size;
1705
1706 spin_lock_irq(&cp->lock);
1707 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1708 spin_unlock_irq(&cp->lock);
1709
1710 return size;
1711}
1712
1713static int cp_get_eeprom(struct net_device *dev,
1714 struct ethtool_eeprom *eeprom, u8 *data)
1715{
1716 struct cp_private *cp = netdev_priv(dev);
1717 unsigned int addr_len;
1718 u16 val;
1719 u32 offset = eeprom->offset >> 1;
1720 u32 len = eeprom->len;
1721 u32 i = 0;
1722
1723 eeprom->magic = CP_EEPROM_MAGIC;
1724
1725 spin_lock_irq(&cp->lock);
1726
1727 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1728
1729 if (eeprom->offset & 1) {
1730 val = read_eeprom(cp->regs, offset, addr_len);
1731 data[i++] = (u8)(val >> 8);
1732 offset++;
1733 }
1734
1735 while (i < len - 1) {
1736 val = read_eeprom(cp->regs, offset, addr_len);
1737 data[i++] = (u8)val;
1738 data[i++] = (u8)(val >> 8);
1739 offset++;
1740 }
1741
1742 if (i < len) {
1743 val = read_eeprom(cp->regs, offset, addr_len);
1744 data[i] = (u8)val;
1745 }
1746
1747 spin_unlock_irq(&cp->lock);
1748 return 0;
1749}
1750
1751static int cp_set_eeprom(struct net_device *dev,
1752 struct ethtool_eeprom *eeprom, u8 *data)
1753{
1754 struct cp_private *cp = netdev_priv(dev);
1755 unsigned int addr_len;
1756 u16 val;
1757 u32 offset = eeprom->offset >> 1;
1758 u32 len = eeprom->len;
1759 u32 i = 0;
1760
1761 if (eeprom->magic != CP_EEPROM_MAGIC)
1762 return -EINVAL;
1763
1764 spin_lock_irq(&cp->lock);
1765
1766 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1767
1768 if (eeprom->offset & 1) {
1769 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1770 val |= (u16)data[i++] << 8;
1771 write_eeprom(cp->regs, offset, val, addr_len);
1772 offset++;
1773 }
1774
1775 while (i < len - 1) {
1776 val = (u16)data[i++];
1777 val |= (u16)data[i++] << 8;
1778 write_eeprom(cp->regs, offset, val, addr_len);
1779 offset++;
1780 }
1781
1782 if (i < len) {
1783 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1784 val |= (u16)data[i];
1785 write_eeprom(cp->regs, offset, val, addr_len);
1786 }
1787
1788 spin_unlock_irq(&cp->lock);
1789 return 0;
1790}
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792/* Put the board into D3cold state and wait for WakeUp signal */
1793static void cp_set_d3_state (struct cp_private *cp)
1794{
1795 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1796 pci_set_power_state (cp->pdev, PCI_D3hot);
1797}
1798
1799static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1800{
1801 struct net_device *dev;
1802 struct cp_private *cp;
1803 int rc;
1804 void __iomem *regs;
Greg Kroah-Hartman2427ddd2006-06-12 17:07:52 -07001805 resource_size_t pciaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 unsigned int addr_len, i, pci_using_dac;
1807 u8 pci_rev;
1808
1809#ifndef MODULE
1810 static int version_printed;
1811 if (version_printed++ == 0)
1812 printk("%s", version);
1813#endif
1814
1815 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1816
1817 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1818 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001819 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04001820 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1821 pdev->vendor, pdev->device, pci_rev);
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001822 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 return -ENODEV;
1824 }
1825
1826 dev = alloc_etherdev(sizeof(struct cp_private));
1827 if (!dev)
1828 return -ENOMEM;
1829 SET_MODULE_OWNER(dev);
1830 SET_NETDEV_DEV(dev, &pdev->dev);
1831
1832 cp = netdev_priv(dev);
1833 cp->pdev = pdev;
1834 cp->dev = dev;
1835 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1836 spin_lock_init (&cp->lock);
1837 cp->mii_if.dev = dev;
1838 cp->mii_if.mdio_read = mdio_read;
1839 cp->mii_if.mdio_write = mdio_write;
1840 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1841 cp->mii_if.phy_id_mask = 0x1f;
1842 cp->mii_if.reg_num_mask = 0x1f;
1843 cp_set_rxbufsize(cp);
1844
1845 rc = pci_enable_device(pdev);
1846 if (rc)
1847 goto err_out_free;
1848
1849 rc = pci_set_mwi(pdev);
1850 if (rc)
1851 goto err_out_disable;
1852
1853 rc = pci_request_regions(pdev, DRV_NAME);
1854 if (rc)
1855 goto err_out_mwi;
1856
1857 pciaddr = pci_resource_start(pdev, 1);
1858 if (!pciaddr) {
1859 rc = -EIO;
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001860 dev_err(&pdev->dev, "no MMIO resource\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 goto err_out_res;
1862 }
1863 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1864 rc = -EIO;
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001865 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
Jeff Garzik2e8a5382006-06-27 10:47:51 -04001866 (unsigned long long)pci_resource_len(pdev, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 goto err_out_res;
1868 }
1869
1870 /* Configure DMA attributes. */
1871 if ((sizeof(dma_addr_t) > 4) &&
Tobias Klauser8662d062005-05-12 22:19:39 -04001872 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1873 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 pci_using_dac = 1;
1875 } else {
1876 pci_using_dac = 0;
1877
Tobias Klauser8662d062005-05-12 22:19:39 -04001878 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001880 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04001881 "No usable DMA configuration, aborting.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 goto err_out_res;
1883 }
Tobias Klauser8662d062005-05-12 22:19:39 -04001884 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001886 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04001887 "No usable consistent DMA configuration, "
1888 "aborting.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 goto err_out_res;
1890 }
1891 }
1892
1893 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1894 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1895
1896 regs = ioremap(pciaddr, CP_REGS_SIZE);
1897 if (!regs) {
1898 rc = -EIO;
Andrew Morton4626dd42006-07-06 23:58:26 -07001899 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
Jeff Garzik2e8a5382006-06-27 10:47:51 -04001900 (unsigned long long)pci_resource_len(pdev, 1),
1901 (unsigned long long)pciaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 goto err_out_res;
1903 }
1904 dev->base_addr = (unsigned long) regs;
1905 cp->regs = regs;
1906
1907 cp_stop_hw(cp);
1908
1909 /* read MAC address from EEPROM */
1910 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1911 for (i = 0; i < 3; i++)
1912 ((u16 *) (dev->dev_addr))[i] =
1913 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
John W. Linvillebb0ce602005-09-12 10:48:54 -04001914 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916 dev->open = cp_open;
1917 dev->stop = cp_close;
1918 dev->set_multicast_list = cp_set_rx_mode;
1919 dev->hard_start_xmit = cp_start_xmit;
1920 dev->get_stats = cp_get_stats;
1921 dev->do_ioctl = cp_ioctl;
1922 dev->poll = cp_rx_poll;
Steffen Klassert7502cd12005-05-12 19:34:31 -04001923#ifdef CONFIG_NET_POLL_CONTROLLER
1924 dev->poll_controller = cp_poll_controller;
1925#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1927#ifdef BROKEN
1928 dev->change_mtu = cp_change_mtu;
1929#endif
1930 dev->ethtool_ops = &cp_ethtool_ops;
1931#if 0
1932 dev->tx_timeout = cp_tx_timeout;
1933 dev->watchdog_timeo = TX_TIMEOUT;
1934#endif
1935
1936#if CP_VLAN_TAG_USED
1937 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1938 dev->vlan_rx_register = cp_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939#endif
1940
1941 if (pci_using_dac)
1942 dev->features |= NETIF_F_HIGHDMA;
1943
Jeff Garzikfcec3452005-05-12 19:28:49 -04001944#if 0 /* disabled by default until verified */
1945 dev->features |= NETIF_F_TSO;
1946#endif
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 dev->irq = pdev->irq;
1949
1950 rc = register_netdev(dev);
1951 if (rc)
1952 goto err_out_iomap;
1953
1954 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1955 "%02x:%02x:%02x:%02x:%02x:%02x, "
1956 "IRQ %d\n",
1957 dev->name,
1958 dev->base_addr,
1959 dev->dev_addr[0], dev->dev_addr[1],
1960 dev->dev_addr[2], dev->dev_addr[3],
1961 dev->dev_addr[4], dev->dev_addr[5],
1962 dev->irq);
1963
1964 pci_set_drvdata(pdev, dev);
1965
1966 /* enable busmastering and memory-write-invalidate */
1967 pci_set_master(pdev);
1968
Jeff Garzik2e8a5382006-06-27 10:47:51 -04001969 if (cp->wol_enabled)
1970 cp_set_d3_state (cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
1972 return 0;
1973
1974err_out_iomap:
1975 iounmap(regs);
1976err_out_res:
1977 pci_release_regions(pdev);
1978err_out_mwi:
1979 pci_clear_mwi(pdev);
1980err_out_disable:
1981 pci_disable_device(pdev);
1982err_out_free:
1983 free_netdev(dev);
1984 return rc;
1985}
1986
1987static void cp_remove_one (struct pci_dev *pdev)
1988{
1989 struct net_device *dev = pci_get_drvdata(pdev);
1990 struct cp_private *cp = netdev_priv(dev);
1991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 unregister_netdev(dev);
1993 iounmap(cp->regs);
Jeff Garzik2e8a5382006-06-27 10:47:51 -04001994 if (cp->wol_enabled)
1995 pci_set_power_state (pdev, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 pci_release_regions(pdev);
1997 pci_clear_mwi(pdev);
1998 pci_disable_device(pdev);
1999 pci_set_drvdata(pdev, NULL);
2000 free_netdev(dev);
2001}
2002
2003#ifdef CONFIG_PM
Pavel Machek05adc3b2005-04-16 15:25:25 -07002004static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
François Romieu7668a492006-08-15 20:10:57 +02002006 struct net_device *dev = pci_get_drvdata(pdev);
2007 struct cp_private *cp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 unsigned long flags;
2009
François Romieu7668a492006-08-15 20:10:57 +02002010 if (!netif_running(dev))
2011 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012
2013 netif_device_detach (dev);
2014 netif_stop_queue (dev);
2015
2016 spin_lock_irqsave (&cp->lock, flags);
2017
2018 /* Disable Rx and Tx */
2019 cpw16 (IntrMask, 0);
2020 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2021
2022 spin_unlock_irqrestore (&cp->lock, flags);
2023
Francois Romieu576cfa92006-02-27 23:15:06 +01002024 pci_save_state(pdev);
2025 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2026 pci_set_power_state(pdev, pci_choose_state(pdev, state));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
2028 return 0;
2029}
2030
2031static int cp_resume (struct pci_dev *pdev)
2032{
Francois Romieu576cfa92006-02-27 23:15:06 +01002033 struct net_device *dev = pci_get_drvdata (pdev);
2034 struct cp_private *cp = netdev_priv(dev);
Pierre Ossmana4cf0762005-07-04 00:22:53 +02002035 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
Francois Romieu576cfa92006-02-27 23:15:06 +01002037 if (!netif_running(dev))
2038 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039
2040 netif_device_attach (dev);
Francois Romieu576cfa92006-02-27 23:15:06 +01002041
2042 pci_set_power_state(pdev, PCI_D0);
2043 pci_restore_state(pdev);
2044 pci_enable_wake(pdev, PCI_D0, 0);
2045
2046 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2047 cp_init_rings_index (cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 cp_init_hw (cp);
2049 netif_start_queue (dev);
Pierre Ossmana4cf0762005-07-04 00:22:53 +02002050
2051 spin_lock_irqsave (&cp->lock, flags);
2052
2053 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
2054
2055 spin_unlock_irqrestore (&cp->lock, flags);
Jeff Garzikf3b197a2006-05-26 21:39:03 -04002056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 return 0;
2058}
2059#endif /* CONFIG_PM */
2060
2061static struct pci_driver cp_driver = {
2062 .name = DRV_NAME,
2063 .id_table = cp_pci_tbl,
2064 .probe = cp_init_one,
2065 .remove = cp_remove_one,
2066#ifdef CONFIG_PM
2067 .resume = cp_resume,
2068 .suspend = cp_suspend,
2069#endif
2070};
2071
2072static int __init cp_init (void)
2073{
2074#ifdef MODULE
2075 printk("%s", version);
2076#endif
Jeff Garzik29917622006-08-19 17:48:59 -04002077 return pci_register_driver(&cp_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078}
2079
2080static void __exit cp_exit (void)
2081{
2082 pci_unregister_driver (&cp_driver);
2083}
2084
2085module_init(cp_init);
2086module_exit(cp_exit);