blob: ca4c9ac7e11550689f37bfa990ad9fead086dec1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2/*
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
18
19 See the file COPYING in this distribution for more information.
20
21 Contributors:
22
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
26
27 TODO:
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
30
31 Low priority TODO:
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
39 Tx descriptor bit
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
43
44 NOTES:
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
47
48 */
49
50#define DRV_NAME "8139cp"
51#define DRV_VERSION "1.2"
52#define DRV_RELDATE "Mar 22, 2004"
53
54
55#include <linux/config.h>
56#include <linux/module.h>
57#include <linux/kernel.h>
58#include <linux/compiler.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/init.h>
62#include <linux/pci.h>
Tobias Klauser8662d062005-05-12 22:19:39 -040063#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/delay.h>
65#include <linux/ethtool.h>
66#include <linux/mii.h>
67#include <linux/if_vlan.h>
68#include <linux/crc32.h>
69#include <linux/in.h>
70#include <linux/ip.h>
71#include <linux/tcp.h>
72#include <linux/udp.h>
73#include <linux/cache.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/uaccess.h>
77
78/* VLAN tagging feature enable/disable */
79#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80#define CP_VLAN_TAG_USED 1
81#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
83#else
84#define CP_VLAN_TAG_USED 0
85#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86 do { (tx_desc)->opts2 = 0; } while (0)
87#endif
88
89/* These identify the driver base version and may not be removed. */
90static char version[] =
91KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
92
93MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
95MODULE_LICENSE("GPL");
96
97static int debug = -1;
98MODULE_PARM (debug, "i");
99MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
100
101/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
102 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
103static int multicast_filter_limit = 32;
104MODULE_PARM (multicast_filter_limit, "i");
105MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
106
107#define PFX DRV_NAME ": "
108
109#ifndef TRUE
110#define FALSE 0
111#define TRUE (!FALSE)
112#endif
113
114#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
115 NETIF_MSG_PROBE | \
116 NETIF_MSG_LINK)
117#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
118#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
119#define CP_REGS_SIZE (0xff + 1)
120#define CP_REGS_VER 1 /* version 1 */
121#define CP_RX_RING_SIZE 64
122#define CP_TX_RING_SIZE 64
123#define CP_RING_BYTES \
124 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
125 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
126 CP_STATS_SIZE)
127#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
128#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
129#define TX_BUFFS_AVAIL(CP) \
130 (((CP)->tx_tail <= (CP)->tx_head) ? \
131 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
132 (CP)->tx_tail - (CP)->tx_head - 1)
133
134#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
135#define RX_OFFSET 2
136#define CP_INTERNAL_PHY 32
137
138/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
139#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
140#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
141#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
142#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
143
144/* Time in jiffies before concluding the transmitter is hung. */
145#define TX_TIMEOUT (6*HZ)
146
147/* hardware minimum and maximum for a single frame's data payload */
148#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
149#define CP_MAX_MTU 4096
150
151enum {
152 /* NIC register offsets */
153 MAC0 = 0x00, /* Ethernet hardware address. */
154 MAR0 = 0x08, /* Multicast filter. */
155 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
156 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
157 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
158 Cmd = 0x37, /* Command register */
159 IntrMask = 0x3C, /* Interrupt mask */
160 IntrStatus = 0x3E, /* Interrupt status */
161 TxConfig = 0x40, /* Tx configuration */
162 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
163 RxConfig = 0x44, /* Rx configuration */
164 RxMissed = 0x4C, /* 24 bits valid, write clears */
165 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
166 Config1 = 0x52, /* Config1 */
167 Config3 = 0x59, /* Config3 */
168 Config4 = 0x5A, /* Config4 */
169 MultiIntr = 0x5C, /* Multiple interrupt select */
170 BasicModeCtrl = 0x62, /* MII BMCR */
171 BasicModeStatus = 0x64, /* MII BMSR */
172 NWayAdvert = 0x66, /* MII ADVERTISE */
173 NWayLPAR = 0x68, /* MII LPA */
174 NWayExpansion = 0x6A, /* MII Expansion */
175 Config5 = 0xD8, /* Config5 */
176 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
177 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
178 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
179 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
180 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
181 TxThresh = 0xEC, /* Early Tx threshold */
182 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
183 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
184
185 /* Tx and Rx status descriptors */
186 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
187 RingEnd = (1 << 30), /* End of descriptor ring */
188 FirstFrag = (1 << 29), /* First segment of a packet */
189 LastFrag = (1 << 28), /* Final segment of a packet */
190 TxError = (1 << 23), /* Tx error summary */
191 RxError = (1 << 20), /* Rx error summary */
192 IPCS = (1 << 18), /* Calculate IP checksum */
193 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
194 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
195 TxVlanTag = (1 << 17), /* Add VLAN tag */
196 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
197 IPFail = (1 << 15), /* IP checksum failed */
198 UDPFail = (1 << 14), /* UDP/IP checksum failed */
199 TCPFail = (1 << 13), /* TCP/IP checksum failed */
200 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
201 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
202 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
203 RxProtoTCP = 1,
204 RxProtoUDP = 2,
205 RxProtoIP = 3,
206 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
207 TxOWC = (1 << 22), /* Tx Out-of-window collision */
208 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
209 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
210 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
211 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
212 RxErrFrame = (1 << 27), /* Rx frame alignment error */
213 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
214 RxErrCRC = (1 << 18), /* Rx CRC error */
215 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
216 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
217 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
218
219 /* StatsAddr register */
220 DumpStats = (1 << 3), /* Begin stats dump */
221
222 /* RxConfig register */
223 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
224 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
225 AcceptErr = 0x20, /* Accept packets with CRC errors */
226 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
227 AcceptBroadcast = 0x08, /* Accept broadcast packets */
228 AcceptMulticast = 0x04, /* Accept multicast packets */
229 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
230 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
231
232 /* IntrMask / IntrStatus registers */
233 PciErr = (1 << 15), /* System error on the PCI bus */
234 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
235 LenChg = (1 << 13), /* Cable length change */
236 SWInt = (1 << 8), /* Software-requested interrupt */
237 TxEmpty = (1 << 7), /* No Tx descriptors available */
238 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
239 LinkChg = (1 << 5), /* Packet underrun, or link change */
240 RxEmpty = (1 << 4), /* No Rx descriptors available */
241 TxErr = (1 << 3), /* Tx error */
242 TxOK = (1 << 2), /* Tx packet sent */
243 RxErr = (1 << 1), /* Rx error */
244 RxOK = (1 << 0), /* Rx packet received */
245 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
246 but hardware likes to raise it */
247
248 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
249 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
250 RxErr | RxOK | IntrResvd,
251
252 /* C mode command register */
253 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
254 RxOn = (1 << 3), /* Rx mode enable */
255 TxOn = (1 << 2), /* Tx mode enable */
256
257 /* C+ mode command register */
258 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
259 RxChkSum = (1 << 5), /* Rx checksum offload enable */
260 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
261 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
262 CpRxOn = (1 << 1), /* Rx mode enable */
263 CpTxOn = (1 << 0), /* Tx mode enable */
264
265 /* Cfg9436 EEPROM control register */
266 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
267 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
268
269 /* TxConfig register */
270 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
271 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
272
273 /* Early Tx Threshold register */
274 TxThreshMask = 0x3f, /* Mask bits 5-0 */
275 TxThreshMax = 2048, /* Max early Tx threshold */
276
277 /* Config1 register */
278 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
279 LWACT = (1 << 4), /* LWAKE active mode */
280 PMEnable = (1 << 0), /* Enable various PM features of chip */
281
282 /* Config3 register */
283 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
284 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
285 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
286
287 /* Config4 register */
288 LWPTN = (1 << 1), /* LWAKE Pattern */
289 LWPME = (1 << 4), /* LANWAKE vs PMEB */
290
291 /* Config5 register */
292 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
293 MWF = (1 << 5), /* Accept Multicast wakeup frame */
294 UWF = (1 << 4), /* Accept Unicast wakeup frame */
295 LANWake = (1 << 1), /* Enable LANWake signal */
296 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
297
298 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
299 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
300 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
301};
302
303static const unsigned int cp_rx_config =
304 (RX_FIFO_THRESH << RxCfgFIFOShift) |
305 (RX_DMA_BURST << RxCfgDMAShift);
306
307struct cp_desc {
308 u32 opts1;
309 u32 opts2;
310 u64 addr;
311};
312
313struct ring_info {
314 struct sk_buff *skb;
315 dma_addr_t mapping;
316 unsigned frag;
317};
318
319struct cp_dma_stats {
320 u64 tx_ok;
321 u64 rx_ok;
322 u64 tx_err;
323 u32 rx_err;
324 u16 rx_fifo;
325 u16 frame_align;
326 u32 tx_ok_1col;
327 u32 tx_ok_mcol;
328 u64 rx_ok_phys;
329 u64 rx_ok_bcast;
330 u32 rx_ok_mcast;
331 u16 tx_abort;
332 u16 tx_underrun;
333} __attribute__((packed));
334
335struct cp_extra_stats {
336 unsigned long rx_frags;
337};
338
339struct cp_private {
340 void __iomem *regs;
341 struct net_device *dev;
342 spinlock_t lock;
343 u32 msg_enable;
344
345 struct pci_dev *pdev;
346 u32 rx_config;
347 u16 cpcmd;
348
349 struct net_device_stats net_stats;
350 struct cp_extra_stats cp_stats;
351 struct cp_dma_stats *nic_stats;
352 dma_addr_t nic_stats_dma;
353
354 unsigned rx_tail ____cacheline_aligned;
355 struct cp_desc *rx_ring;
356 struct ring_info rx_skb[CP_RX_RING_SIZE];
357 unsigned rx_buf_sz;
358
359 unsigned tx_head ____cacheline_aligned;
360 unsigned tx_tail;
361
362 struct cp_desc *tx_ring;
363 struct ring_info tx_skb[CP_TX_RING_SIZE];
364 dma_addr_t ring_dma;
365
366#if CP_VLAN_TAG_USED
367 struct vlan_group *vlgrp;
368#endif
369
370 unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */
371
372 struct mii_if_info mii_if;
373};
374
375#define cpr8(reg) readb(cp->regs + (reg))
376#define cpr16(reg) readw(cp->regs + (reg))
377#define cpr32(reg) readl(cp->regs + (reg))
378#define cpw8(reg,val) writeb((val), cp->regs + (reg))
379#define cpw16(reg,val) writew((val), cp->regs + (reg))
380#define cpw32(reg,val) writel((val), cp->regs + (reg))
381#define cpw8_f(reg,val) do { \
382 writeb((val), cp->regs + (reg)); \
383 readb(cp->regs + (reg)); \
384 } while (0)
385#define cpw16_f(reg,val) do { \
386 writew((val), cp->regs + (reg)); \
387 readw(cp->regs + (reg)); \
388 } while (0)
389#define cpw32_f(reg,val) do { \
390 writel((val), cp->regs + (reg)); \
391 readl(cp->regs + (reg)); \
392 } while (0)
393
394
395static void __cp_set_rx_mode (struct net_device *dev);
396static void cp_tx (struct cp_private *cp);
397static void cp_clean_rings (struct cp_private *cp);
398
399static struct pci_device_id cp_pci_tbl[] = {
400 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
401 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
402 { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322,
403 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
404 { },
405};
406MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
407
408static struct {
409 const char str[ETH_GSTRING_LEN];
410} ethtool_stats_keys[] = {
411 { "tx_ok" },
412 { "rx_ok" },
413 { "tx_err" },
414 { "rx_err" },
415 { "rx_fifo" },
416 { "frame_align" },
417 { "tx_ok_1col" },
418 { "tx_ok_mcol" },
419 { "rx_ok_phys" },
420 { "rx_ok_bcast" },
421 { "rx_ok_mcast" },
422 { "tx_abort" },
423 { "tx_underrun" },
424 { "rx_frags" },
425};
426
427
428#if CP_VLAN_TAG_USED
429static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
430{
431 struct cp_private *cp = netdev_priv(dev);
432 unsigned long flags;
433
434 spin_lock_irqsave(&cp->lock, flags);
435 cp->vlgrp = grp;
436 cp->cpcmd |= RxVlanOn;
437 cpw16(CpCmd, cp->cpcmd);
438 spin_unlock_irqrestore(&cp->lock, flags);
439}
440
441static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
442{
443 struct cp_private *cp = netdev_priv(dev);
444 unsigned long flags;
445
446 spin_lock_irqsave(&cp->lock, flags);
447 cp->cpcmd &= ~RxVlanOn;
448 cpw16(CpCmd, cp->cpcmd);
449 if (cp->vlgrp)
450 cp->vlgrp->vlan_devices[vid] = NULL;
451 spin_unlock_irqrestore(&cp->lock, flags);
452}
453#endif /* CP_VLAN_TAG_USED */
454
455static inline void cp_set_rxbufsize (struct cp_private *cp)
456{
457 unsigned int mtu = cp->dev->mtu;
458
459 if (mtu > ETH_DATA_LEN)
460 /* MTU + ethernet header + FCS + optional VLAN tag */
461 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
462 else
463 cp->rx_buf_sz = PKT_BUF_SZ;
464}
465
466static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
467 struct cp_desc *desc)
468{
469 skb->protocol = eth_type_trans (skb, cp->dev);
470
471 cp->net_stats.rx_packets++;
472 cp->net_stats.rx_bytes += skb->len;
473 cp->dev->last_rx = jiffies;
474
475#if CP_VLAN_TAG_USED
476 if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
477 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
478 be16_to_cpu(desc->opts2 & 0xffff));
479 } else
480#endif
481 netif_receive_skb(skb);
482}
483
484static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
485 u32 status, u32 len)
486{
487 if (netif_msg_rx_err (cp))
488 printk (KERN_DEBUG
489 "%s: rx err, slot %d status 0x%x len %d\n",
490 cp->dev->name, rx_tail, status, len);
491 cp->net_stats.rx_errors++;
492 if (status & RxErrFrame)
493 cp->net_stats.rx_frame_errors++;
494 if (status & RxErrCRC)
495 cp->net_stats.rx_crc_errors++;
496 if ((status & RxErrRunt) || (status & RxErrLong))
497 cp->net_stats.rx_length_errors++;
498 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
499 cp->net_stats.rx_length_errors++;
500 if (status & RxErrFIFO)
501 cp->net_stats.rx_fifo_errors++;
502}
503
504static inline unsigned int cp_rx_csum_ok (u32 status)
505{
506 unsigned int protocol = (status >> 16) & 0x3;
507
508 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
509 return 1;
510 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
511 return 1;
512 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
513 return 1;
514 return 0;
515}
516
517static int cp_rx_poll (struct net_device *dev, int *budget)
518{
519 struct cp_private *cp = netdev_priv(dev);
520 unsigned rx_tail = cp->rx_tail;
521 unsigned rx_work = dev->quota;
522 unsigned rx;
523
524rx_status_loop:
525 rx = 0;
526 cpw16(IntrStatus, cp_rx_intr_mask);
527
528 while (1) {
529 u32 status, len;
530 dma_addr_t mapping;
531 struct sk_buff *skb, *new_skb;
532 struct cp_desc *desc;
533 unsigned buflen;
534
535 skb = cp->rx_skb[rx_tail].skb;
536 if (!skb)
537 BUG();
538
539 desc = &cp->rx_ring[rx_tail];
540 status = le32_to_cpu(desc->opts1);
541 if (status & DescOwn)
542 break;
543
544 len = (status & 0x1fff) - 4;
545 mapping = cp->rx_skb[rx_tail].mapping;
546
547 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
548 /* we don't support incoming fragmented frames.
549 * instead, we attempt to ensure that the
550 * pre-allocated RX skbs are properly sized such
551 * that RX fragments are never encountered
552 */
553 cp_rx_err_acct(cp, rx_tail, status, len);
554 cp->net_stats.rx_dropped++;
555 cp->cp_stats.rx_frags++;
556 goto rx_next;
557 }
558
559 if (status & (RxError | RxErrFIFO)) {
560 cp_rx_err_acct(cp, rx_tail, status, len);
561 goto rx_next;
562 }
563
564 if (netif_msg_rx_status(cp))
565 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
566 cp->dev->name, rx_tail, status, len);
567
568 buflen = cp->rx_buf_sz + RX_OFFSET;
569 new_skb = dev_alloc_skb (buflen);
570 if (!new_skb) {
571 cp->net_stats.rx_dropped++;
572 goto rx_next;
573 }
574
575 skb_reserve(new_skb, RX_OFFSET);
576 new_skb->dev = cp->dev;
577
578 pci_unmap_single(cp->pdev, mapping,
579 buflen, PCI_DMA_FROMDEVICE);
580
581 /* Handle checksum offloading for incoming packets. */
582 if (cp_rx_csum_ok(status))
583 skb->ip_summed = CHECKSUM_UNNECESSARY;
584 else
585 skb->ip_summed = CHECKSUM_NONE;
586
587 skb_put(skb, len);
588
589 mapping =
590 cp->rx_skb[rx_tail].mapping =
591 pci_map_single(cp->pdev, new_skb->tail,
592 buflen, PCI_DMA_FROMDEVICE);
593 cp->rx_skb[rx_tail].skb = new_skb;
594
595 cp_rx_skb(cp, skb, desc);
596 rx++;
597
598rx_next:
599 cp->rx_ring[rx_tail].opts2 = 0;
600 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
601 if (rx_tail == (CP_RX_RING_SIZE - 1))
602 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
603 cp->rx_buf_sz);
604 else
605 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
606 rx_tail = NEXT_RX(rx_tail);
607
608 if (!rx_work--)
609 break;
610 }
611
612 cp->rx_tail = rx_tail;
613
614 dev->quota -= rx;
615 *budget -= rx;
616
617 /* if we did not reach work limit, then we're done with
618 * this round of polling
619 */
620 if (rx_work) {
621 if (cpr16(IntrStatus) & cp_rx_intr_mask)
622 goto rx_status_loop;
623
624 local_irq_disable();
625 cpw16_f(IntrMask, cp_intr_mask);
626 __netif_rx_complete(dev);
627 local_irq_enable();
628
629 return 0; /* done */
630 }
631
632 return 1; /* not done */
633}
634
635static irqreturn_t
636cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
637{
638 struct net_device *dev = dev_instance;
639 struct cp_private *cp;
640 u16 status;
641
642 if (unlikely(dev == NULL))
643 return IRQ_NONE;
644 cp = netdev_priv(dev);
645
646 status = cpr16(IntrStatus);
647 if (!status || (status == 0xFFFF))
648 return IRQ_NONE;
649
650 if (netif_msg_intr(cp))
651 printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
652 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
653
654 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
655
656 spin_lock(&cp->lock);
657
658 /* close possible race's with dev_close */
659 if (unlikely(!netif_running(dev))) {
660 cpw16(IntrMask, 0);
661 spin_unlock(&cp->lock);
662 return IRQ_HANDLED;
663 }
664
665 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
666 if (netif_rx_schedule_prep(dev)) {
667 cpw16_f(IntrMask, cp_norx_intr_mask);
668 __netif_rx_schedule(dev);
669 }
670
671 if (status & (TxOK | TxErr | TxEmpty | SWInt))
672 cp_tx(cp);
673 if (status & LinkChg)
674 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
675
676 spin_unlock(&cp->lock);
677
678 if (status & PciErr) {
679 u16 pci_status;
680
681 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
682 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
683 printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
684 dev->name, status, pci_status);
685
686 /* TODO: reset hardware */
687 }
688
689 return IRQ_HANDLED;
690}
691
692static void cp_tx (struct cp_private *cp)
693{
694 unsigned tx_head = cp->tx_head;
695 unsigned tx_tail = cp->tx_tail;
696
697 while (tx_tail != tx_head) {
698 struct sk_buff *skb;
699 u32 status;
700
701 rmb();
702 status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);
703 if (status & DescOwn)
704 break;
705
706 skb = cp->tx_skb[tx_tail].skb;
707 if (!skb)
708 BUG();
709
710 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
711 skb->len, PCI_DMA_TODEVICE);
712
713 if (status & LastFrag) {
714 if (status & (TxError | TxFIFOUnder)) {
715 if (netif_msg_tx_err(cp))
716 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
717 cp->dev->name, status);
718 cp->net_stats.tx_errors++;
719 if (status & TxOWC)
720 cp->net_stats.tx_window_errors++;
721 if (status & TxMaxCol)
722 cp->net_stats.tx_aborted_errors++;
723 if (status & TxLinkFail)
724 cp->net_stats.tx_carrier_errors++;
725 if (status & TxFIFOUnder)
726 cp->net_stats.tx_fifo_errors++;
727 } else {
728 cp->net_stats.collisions +=
729 ((status >> TxColCntShift) & TxColCntMask);
730 cp->net_stats.tx_packets++;
731 cp->net_stats.tx_bytes += skb->len;
732 if (netif_msg_tx_done(cp))
733 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
734 }
735 dev_kfree_skb_irq(skb);
736 }
737
738 cp->tx_skb[tx_tail].skb = NULL;
739
740 tx_tail = NEXT_TX(tx_tail);
741 }
742
743 cp->tx_tail = tx_tail;
744
745 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
746 netif_wake_queue(cp->dev);
747}
748
749static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
750{
751 struct cp_private *cp = netdev_priv(dev);
752 unsigned entry;
753 u32 eor;
754#if CP_VLAN_TAG_USED
755 u32 vlan_tag = 0;
756#endif
757
758 spin_lock_irq(&cp->lock);
759
760 /* This is a hard error, log it. */
761 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
762 netif_stop_queue(dev);
763 spin_unlock_irq(&cp->lock);
764 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
765 dev->name);
766 return 1;
767 }
768
769#if CP_VLAN_TAG_USED
770 if (cp->vlgrp && vlan_tx_tag_present(skb))
771 vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
772#endif
773
774 entry = cp->tx_head;
775 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
776 if (skb_shinfo(skb)->nr_frags == 0) {
777 struct cp_desc *txd = &cp->tx_ring[entry];
778 u32 len;
779 dma_addr_t mapping;
780
781 len = skb->len;
782 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
783 CP_VLAN_TX_TAG(txd, vlan_tag);
784 txd->addr = cpu_to_le64(mapping);
785 wmb();
786
787 if (skb->ip_summed == CHECKSUM_HW) {
788 const struct iphdr *ip = skb->nh.iph;
789 if (ip->protocol == IPPROTO_TCP)
790 txd->opts1 = cpu_to_le32(eor | len | DescOwn |
791 FirstFrag | LastFrag |
792 IPCS | TCPCS);
793 else if (ip->protocol == IPPROTO_UDP)
794 txd->opts1 = cpu_to_le32(eor | len | DescOwn |
795 FirstFrag | LastFrag |
796 IPCS | UDPCS);
797 else
798 BUG();
799 } else
800 txd->opts1 = cpu_to_le32(eor | len | DescOwn |
801 FirstFrag | LastFrag);
802 wmb();
803
804 cp->tx_skb[entry].skb = skb;
805 cp->tx_skb[entry].mapping = mapping;
806 cp->tx_skb[entry].frag = 0;
807 entry = NEXT_TX(entry);
808 } else {
809 struct cp_desc *txd;
810 u32 first_len, first_eor;
811 dma_addr_t first_mapping;
812 int frag, first_entry = entry;
813 const struct iphdr *ip = skb->nh.iph;
814
815 /* We must give this initial chunk to the device last.
816 * Otherwise we could race with the device.
817 */
818 first_eor = eor;
819 first_len = skb_headlen(skb);
820 first_mapping = pci_map_single(cp->pdev, skb->data,
821 first_len, PCI_DMA_TODEVICE);
822 cp->tx_skb[entry].skb = skb;
823 cp->tx_skb[entry].mapping = first_mapping;
824 cp->tx_skb[entry].frag = 1;
825 entry = NEXT_TX(entry);
826
827 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
828 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
829 u32 len;
830 u32 ctrl;
831 dma_addr_t mapping;
832
833 len = this_frag->size;
834 mapping = pci_map_single(cp->pdev,
835 ((void *) page_address(this_frag->page) +
836 this_frag->page_offset),
837 len, PCI_DMA_TODEVICE);
838 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
839
840 if (skb->ip_summed == CHECKSUM_HW) {
841 ctrl = eor | len | DescOwn | IPCS;
842 if (ip->protocol == IPPROTO_TCP)
843 ctrl |= TCPCS;
844 else if (ip->protocol == IPPROTO_UDP)
845 ctrl |= UDPCS;
846 else
847 BUG();
848 } else
849 ctrl = eor | len | DescOwn;
850
851 if (frag == skb_shinfo(skb)->nr_frags - 1)
852 ctrl |= LastFrag;
853
854 txd = &cp->tx_ring[entry];
855 CP_VLAN_TX_TAG(txd, vlan_tag);
856 txd->addr = cpu_to_le64(mapping);
857 wmb();
858
859 txd->opts1 = cpu_to_le32(ctrl);
860 wmb();
861
862 cp->tx_skb[entry].skb = skb;
863 cp->tx_skb[entry].mapping = mapping;
864 cp->tx_skb[entry].frag = frag + 2;
865 entry = NEXT_TX(entry);
866 }
867
868 txd = &cp->tx_ring[first_entry];
869 CP_VLAN_TX_TAG(txd, vlan_tag);
870 txd->addr = cpu_to_le64(first_mapping);
871 wmb();
872
873 if (skb->ip_summed == CHECKSUM_HW) {
874 if (ip->protocol == IPPROTO_TCP)
875 txd->opts1 = cpu_to_le32(first_eor | first_len |
876 FirstFrag | DescOwn |
877 IPCS | TCPCS);
878 else if (ip->protocol == IPPROTO_UDP)
879 txd->opts1 = cpu_to_le32(first_eor | first_len |
880 FirstFrag | DescOwn |
881 IPCS | UDPCS);
882 else
883 BUG();
884 } else
885 txd->opts1 = cpu_to_le32(first_eor | first_len |
886 FirstFrag | DescOwn);
887 wmb();
888 }
889 cp->tx_head = entry;
890 if (netif_msg_tx_queued(cp))
891 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
892 dev->name, entry, skb->len);
893 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
894 netif_stop_queue(dev);
895
896 spin_unlock_irq(&cp->lock);
897
898 cpw8(TxPoll, NormalTxPoll);
899 dev->trans_start = jiffies;
900
901 return 0;
902}
903
904/* Set or clear the multicast filter for this adaptor.
905 This routine is not state sensitive and need not be SMP locked. */
906
907static void __cp_set_rx_mode (struct net_device *dev)
908{
909 struct cp_private *cp = netdev_priv(dev);
910 u32 mc_filter[2]; /* Multicast hash filter */
911 int i, rx_mode;
912 u32 tmp;
913
914 /* Note: do not reorder, GCC is clever about common statements. */
915 if (dev->flags & IFF_PROMISC) {
916 /* Unconditionally log net taps. */
917 printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
918 dev->name);
919 rx_mode =
920 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
921 AcceptAllPhys;
922 mc_filter[1] = mc_filter[0] = 0xffffffff;
923 } else if ((dev->mc_count > multicast_filter_limit)
924 || (dev->flags & IFF_ALLMULTI)) {
925 /* Too many to filter perfectly -- accept all multicasts. */
926 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
927 mc_filter[1] = mc_filter[0] = 0xffffffff;
928 } else {
929 struct dev_mc_list *mclist;
930 rx_mode = AcceptBroadcast | AcceptMyPhys;
931 mc_filter[1] = mc_filter[0] = 0;
932 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
933 i++, mclist = mclist->next) {
934 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
935
936 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
937 rx_mode |= AcceptMulticast;
938 }
939 }
940
941 /* We can safely update without stopping the chip. */
942 tmp = cp_rx_config | rx_mode;
943 if (cp->rx_config != tmp) {
944 cpw32_f (RxConfig, tmp);
945 cp->rx_config = tmp;
946 }
947 cpw32_f (MAR0 + 0, mc_filter[0]);
948 cpw32_f (MAR0 + 4, mc_filter[1]);
949}
950
951static void cp_set_rx_mode (struct net_device *dev)
952{
953 unsigned long flags;
954 struct cp_private *cp = netdev_priv(dev);
955
956 spin_lock_irqsave (&cp->lock, flags);
957 __cp_set_rx_mode(dev);
958 spin_unlock_irqrestore (&cp->lock, flags);
959}
960
961static void __cp_get_stats(struct cp_private *cp)
962{
963 /* only lower 24 bits valid; write any value to clear */
964 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
965 cpw32 (RxMissed, 0);
966}
967
968static struct net_device_stats *cp_get_stats(struct net_device *dev)
969{
970 struct cp_private *cp = netdev_priv(dev);
971 unsigned long flags;
972
973 /* The chip only need report frame silently dropped. */
974 spin_lock_irqsave(&cp->lock, flags);
975 if (netif_running(dev) && netif_device_present(dev))
976 __cp_get_stats(cp);
977 spin_unlock_irqrestore(&cp->lock, flags);
978
979 return &cp->net_stats;
980}
981
982static void cp_stop_hw (struct cp_private *cp)
983{
984 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
985 cpw16_f(IntrMask, 0);
986 cpw8(Cmd, 0);
987 cpw16_f(CpCmd, 0);
988 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
989
990 cp->rx_tail = 0;
991 cp->tx_head = cp->tx_tail = 0;
992}
993
994static void cp_reset_hw (struct cp_private *cp)
995{
996 unsigned work = 1000;
997
998 cpw8(Cmd, CmdReset);
999
1000 while (work--) {
1001 if (!(cpr8(Cmd) & CmdReset))
1002 return;
1003
1004 set_current_state(TASK_UNINTERRUPTIBLE);
1005 schedule_timeout(10);
1006 }
1007
1008 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1009}
1010
1011static inline void cp_start_hw (struct cp_private *cp)
1012{
1013 cpw16(CpCmd, cp->cpcmd);
1014 cpw8(Cmd, RxOn | TxOn);
1015}
1016
1017static void cp_init_hw (struct cp_private *cp)
1018{
1019 struct net_device *dev = cp->dev;
1020 dma_addr_t ring_dma;
1021
1022 cp_reset_hw(cp);
1023
1024 cpw8_f (Cfg9346, Cfg9346_Unlock);
1025
1026 /* Restore our idea of the MAC address. */
1027 cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1028 cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1029
1030 cp_start_hw(cp);
1031 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1032
1033 __cp_set_rx_mode(dev);
1034 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1035
1036 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1037 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1038 cpw8(Config3, PARMEnable);
1039 cp->wol_enabled = 0;
1040
1041 cpw8(Config5, cpr8(Config5) & PMEStatus);
1042
1043 cpw32_f(HiTxRingAddr, 0);
1044 cpw32_f(HiTxRingAddr + 4, 0);
1045
1046 ring_dma = cp->ring_dma;
1047 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1048 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1049
1050 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1051 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1052 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1053
1054 cpw16(MultiIntr, 0);
1055
1056 cpw16_f(IntrMask, cp_intr_mask);
1057
1058 cpw8_f(Cfg9346, Cfg9346_Lock);
1059}
1060
1061static int cp_refill_rx (struct cp_private *cp)
1062{
1063 unsigned i;
1064
1065 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1066 struct sk_buff *skb;
1067
1068 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1069 if (!skb)
1070 goto err_out;
1071
1072 skb->dev = cp->dev;
1073 skb_reserve(skb, RX_OFFSET);
1074
1075 cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
1076 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1077 cp->rx_skb[i].skb = skb;
1078 cp->rx_skb[i].frag = 0;
1079
1080 cp->rx_ring[i].opts2 = 0;
1081 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
1082 if (i == (CP_RX_RING_SIZE - 1))
1083 cp->rx_ring[i].opts1 =
1084 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1085 else
1086 cp->rx_ring[i].opts1 =
1087 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1088 }
1089
1090 return 0;
1091
1092err_out:
1093 cp_clean_rings(cp);
1094 return -ENOMEM;
1095}
1096
1097static int cp_init_rings (struct cp_private *cp)
1098{
1099 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1100 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1101
1102 cp->rx_tail = 0;
1103 cp->tx_head = cp->tx_tail = 0;
1104
1105 return cp_refill_rx (cp);
1106}
1107
1108static int cp_alloc_rings (struct cp_private *cp)
1109{
1110 void *mem;
1111
1112 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1113 if (!mem)
1114 return -ENOMEM;
1115
1116 cp->rx_ring = mem;
1117 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1118
1119 mem += (CP_RING_BYTES - CP_STATS_SIZE);
1120 cp->nic_stats = mem;
1121 cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE);
1122
1123 return cp_init_rings(cp);
1124}
1125
1126static void cp_clean_rings (struct cp_private *cp)
1127{
1128 unsigned i;
1129
1130 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1131 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1132
1133 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1134 if (cp->rx_skb[i].skb) {
1135 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
1136 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1137 dev_kfree_skb(cp->rx_skb[i].skb);
1138 }
1139 }
1140
1141 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1142 if (cp->tx_skb[i].skb) {
1143 struct sk_buff *skb = cp->tx_skb[i].skb;
1144 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
1145 skb->len, PCI_DMA_TODEVICE);
1146 dev_kfree_skb(skb);
1147 cp->net_stats.tx_dropped++;
1148 }
1149 }
1150
1151 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
1152 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1153}
1154
1155static void cp_free_rings (struct cp_private *cp)
1156{
1157 cp_clean_rings(cp);
1158 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1159 cp->rx_ring = NULL;
1160 cp->tx_ring = NULL;
1161 cp->nic_stats = NULL;
1162}
1163
1164static int cp_open (struct net_device *dev)
1165{
1166 struct cp_private *cp = netdev_priv(dev);
1167 int rc;
1168
1169 if (netif_msg_ifup(cp))
1170 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1171
1172 rc = cp_alloc_rings(cp);
1173 if (rc)
1174 return rc;
1175
1176 cp_init_hw(cp);
1177
1178 rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev);
1179 if (rc)
1180 goto err_out_hw;
1181
1182 netif_carrier_off(dev);
1183 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1184 netif_start_queue(dev);
1185
1186 return 0;
1187
1188err_out_hw:
1189 cp_stop_hw(cp);
1190 cp_free_rings(cp);
1191 return rc;
1192}
1193
1194static int cp_close (struct net_device *dev)
1195{
1196 struct cp_private *cp = netdev_priv(dev);
1197 unsigned long flags;
1198
1199 if (netif_msg_ifdown(cp))
1200 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1201
1202 spin_lock_irqsave(&cp->lock, flags);
1203
1204 netif_stop_queue(dev);
1205 netif_carrier_off(dev);
1206
1207 cp_stop_hw(cp);
1208
1209 spin_unlock_irqrestore(&cp->lock, flags);
1210
1211 synchronize_irq(dev->irq);
1212 free_irq(dev->irq, dev);
1213
1214 cp_free_rings(cp);
1215 return 0;
1216}
1217
1218#ifdef BROKEN
1219static int cp_change_mtu(struct net_device *dev, int new_mtu)
1220{
1221 struct cp_private *cp = netdev_priv(dev);
1222 int rc;
1223 unsigned long flags;
1224
1225 /* check for invalid MTU, according to hardware limits */
1226 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1227 return -EINVAL;
1228
1229 /* if network interface not up, no need for complexity */
1230 if (!netif_running(dev)) {
1231 dev->mtu = new_mtu;
1232 cp_set_rxbufsize(cp); /* set new rx buf size */
1233 return 0;
1234 }
1235
1236 spin_lock_irqsave(&cp->lock, flags);
1237
1238 cp_stop_hw(cp); /* stop h/w and free rings */
1239 cp_clean_rings(cp);
1240
1241 dev->mtu = new_mtu;
1242 cp_set_rxbufsize(cp); /* set new rx buf size */
1243
1244 rc = cp_init_rings(cp); /* realloc and restart h/w */
1245 cp_start_hw(cp);
1246
1247 spin_unlock_irqrestore(&cp->lock, flags);
1248
1249 return rc;
1250}
1251#endif /* BROKEN */
1252
1253static char mii_2_8139_map[8] = {
1254 BasicModeCtrl,
1255 BasicModeStatus,
1256 0,
1257 0,
1258 NWayAdvert,
1259 NWayLPAR,
1260 NWayExpansion,
1261 0
1262};
1263
1264static int mdio_read(struct net_device *dev, int phy_id, int location)
1265{
1266 struct cp_private *cp = netdev_priv(dev);
1267
1268 return location < 8 && mii_2_8139_map[location] ?
1269 readw(cp->regs + mii_2_8139_map[location]) : 0;
1270}
1271
1272
1273static void mdio_write(struct net_device *dev, int phy_id, int location,
1274 int value)
1275{
1276 struct cp_private *cp = netdev_priv(dev);
1277
1278 if (location == 0) {
1279 cpw8(Cfg9346, Cfg9346_Unlock);
1280 cpw16(BasicModeCtrl, value);
1281 cpw8(Cfg9346, Cfg9346_Lock);
1282 } else if (location < 8 && mii_2_8139_map[location])
1283 cpw16(mii_2_8139_map[location], value);
1284}
1285
1286/* Set the ethtool Wake-on-LAN settings */
1287static int netdev_set_wol (struct cp_private *cp,
1288 const struct ethtool_wolinfo *wol)
1289{
1290 u8 options;
1291
1292 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1293 /* If WOL is being disabled, no need for complexity */
1294 if (wol->wolopts) {
1295 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1296 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1297 }
1298
1299 cpw8 (Cfg9346, Cfg9346_Unlock);
1300 cpw8 (Config3, options);
1301 cpw8 (Cfg9346, Cfg9346_Lock);
1302
1303 options = 0; /* Paranoia setting */
1304 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1305 /* If WOL is being disabled, no need for complexity */
1306 if (wol->wolopts) {
1307 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1308 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1309 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1310 }
1311
1312 cpw8 (Config5, options);
1313
1314 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1315
1316 return 0;
1317}
1318
1319/* Get the ethtool Wake-on-LAN settings */
1320static void netdev_get_wol (struct cp_private *cp,
1321 struct ethtool_wolinfo *wol)
1322{
1323 u8 options;
1324
1325 wol->wolopts = 0; /* Start from scratch */
1326 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1327 WAKE_MCAST | WAKE_UCAST;
1328 /* We don't need to go on if WOL is disabled */
1329 if (!cp->wol_enabled) return;
1330
1331 options = cpr8 (Config3);
1332 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1333 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1334
1335 options = 0; /* Paranoia setting */
1336 options = cpr8 (Config5);
1337 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1338 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1339 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1340}
1341
1342static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1343{
1344 struct cp_private *cp = netdev_priv(dev);
1345
1346 strcpy (info->driver, DRV_NAME);
1347 strcpy (info->version, DRV_VERSION);
1348 strcpy (info->bus_info, pci_name(cp->pdev));
1349}
1350
1351static int cp_get_regs_len(struct net_device *dev)
1352{
1353 return CP_REGS_SIZE;
1354}
1355
1356static int cp_get_stats_count (struct net_device *dev)
1357{
1358 return CP_NUM_STATS;
1359}
1360
1361static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1362{
1363 struct cp_private *cp = netdev_priv(dev);
1364 int rc;
1365 unsigned long flags;
1366
1367 spin_lock_irqsave(&cp->lock, flags);
1368 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1369 spin_unlock_irqrestore(&cp->lock, flags);
1370
1371 return rc;
1372}
1373
1374static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1375{
1376 struct cp_private *cp = netdev_priv(dev);
1377 int rc;
1378 unsigned long flags;
1379
1380 spin_lock_irqsave(&cp->lock, flags);
1381 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1382 spin_unlock_irqrestore(&cp->lock, flags);
1383
1384 return rc;
1385}
1386
1387static int cp_nway_reset(struct net_device *dev)
1388{
1389 struct cp_private *cp = netdev_priv(dev);
1390 return mii_nway_restart(&cp->mii_if);
1391}
1392
1393static u32 cp_get_msglevel(struct net_device *dev)
1394{
1395 struct cp_private *cp = netdev_priv(dev);
1396 return cp->msg_enable;
1397}
1398
1399static void cp_set_msglevel(struct net_device *dev, u32 value)
1400{
1401 struct cp_private *cp = netdev_priv(dev);
1402 cp->msg_enable = value;
1403}
1404
1405static u32 cp_get_rx_csum(struct net_device *dev)
1406{
1407 struct cp_private *cp = netdev_priv(dev);
1408 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1409}
1410
1411static int cp_set_rx_csum(struct net_device *dev, u32 data)
1412{
1413 struct cp_private *cp = netdev_priv(dev);
1414 u16 cmd = cp->cpcmd, newcmd;
1415
1416 newcmd = cmd;
1417
1418 if (data)
1419 newcmd |= RxChkSum;
1420 else
1421 newcmd &= ~RxChkSum;
1422
1423 if (newcmd != cmd) {
1424 unsigned long flags;
1425
1426 spin_lock_irqsave(&cp->lock, flags);
1427 cp->cpcmd = newcmd;
1428 cpw16_f(CpCmd, newcmd);
1429 spin_unlock_irqrestore(&cp->lock, flags);
1430 }
1431
1432 return 0;
1433}
1434
1435static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1436 void *p)
1437{
1438 struct cp_private *cp = netdev_priv(dev);
1439 unsigned long flags;
1440
1441 if (regs->len < CP_REGS_SIZE)
1442 return /* -EINVAL */;
1443
1444 regs->version = CP_REGS_VER;
1445
1446 spin_lock_irqsave(&cp->lock, flags);
1447 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1448 spin_unlock_irqrestore(&cp->lock, flags);
1449}
1450
1451static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1452{
1453 struct cp_private *cp = netdev_priv(dev);
1454 unsigned long flags;
1455
1456 spin_lock_irqsave (&cp->lock, flags);
1457 netdev_get_wol (cp, wol);
1458 spin_unlock_irqrestore (&cp->lock, flags);
1459}
1460
1461static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1462{
1463 struct cp_private *cp = netdev_priv(dev);
1464 unsigned long flags;
1465 int rc;
1466
1467 spin_lock_irqsave (&cp->lock, flags);
1468 rc = netdev_set_wol (cp, wol);
1469 spin_unlock_irqrestore (&cp->lock, flags);
1470
1471 return rc;
1472}
1473
1474static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1475{
1476 switch (stringset) {
1477 case ETH_SS_STATS:
1478 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1479 break;
1480 default:
1481 BUG();
1482 break;
1483 }
1484}
1485
1486static void cp_get_ethtool_stats (struct net_device *dev,
1487 struct ethtool_stats *estats, u64 *tmp_stats)
1488{
1489 struct cp_private *cp = netdev_priv(dev);
1490 unsigned int work = 100;
1491 int i;
1492
1493 /* begin NIC statistics dump */
1494 cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16);
1495 cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats);
1496 cpr32(StatsAddr);
1497
1498 while (work-- > 0) {
1499 if ((cpr32(StatsAddr) & DumpStats) == 0)
1500 break;
1501 cpu_relax();
1502 }
1503
1504 if (cpr32(StatsAddr) & DumpStats)
1505 return /* -EIO */;
1506
1507 i = 0;
1508 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok);
1509 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok);
1510 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err);
1511 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err);
1512 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo);
1513 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align);
1514 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col);
1515 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol);
1516 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys);
1517 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast);
1518 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast);
1519 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort);
1520 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun);
1521 tmp_stats[i++] = cp->cp_stats.rx_frags;
1522 if (i != CP_NUM_STATS)
1523 BUG();
1524}
1525
1526static struct ethtool_ops cp_ethtool_ops = {
1527 .get_drvinfo = cp_get_drvinfo,
1528 .get_regs_len = cp_get_regs_len,
1529 .get_stats_count = cp_get_stats_count,
1530 .get_settings = cp_get_settings,
1531 .set_settings = cp_set_settings,
1532 .nway_reset = cp_nway_reset,
1533 .get_link = ethtool_op_get_link,
1534 .get_msglevel = cp_get_msglevel,
1535 .set_msglevel = cp_set_msglevel,
1536 .get_rx_csum = cp_get_rx_csum,
1537 .set_rx_csum = cp_set_rx_csum,
1538 .get_tx_csum = ethtool_op_get_tx_csum,
1539 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1540 .get_sg = ethtool_op_get_sg,
1541 .set_sg = ethtool_op_set_sg,
1542 .get_regs = cp_get_regs,
1543 .get_wol = cp_get_wol,
1544 .set_wol = cp_set_wol,
1545 .get_strings = cp_get_strings,
1546 .get_ethtool_stats = cp_get_ethtool_stats,
1547};
1548
1549static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1550{
1551 struct cp_private *cp = netdev_priv(dev);
1552 int rc;
1553 unsigned long flags;
1554
1555 if (!netif_running(dev))
1556 return -EINVAL;
1557
1558 spin_lock_irqsave(&cp->lock, flags);
1559 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1560 spin_unlock_irqrestore(&cp->lock, flags);
1561 return rc;
1562}
1563
1564/* Serial EEPROM section. */
1565
1566/* EEPROM_Ctrl bits. */
1567#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1568#define EE_CS 0x08 /* EEPROM chip select. */
1569#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1570#define EE_WRITE_0 0x00
1571#define EE_WRITE_1 0x02
1572#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1573#define EE_ENB (0x80 | EE_CS)
1574
1575/* Delay between EEPROM clock transitions.
1576 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1577 */
1578
1579#define eeprom_delay() readl(ee_addr)
1580
1581/* The EEPROM commands include the alway-set leading bit. */
1582#define EE_WRITE_CMD (5)
1583#define EE_READ_CMD (6)
1584#define EE_ERASE_CMD (7)
1585
1586static int read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1587{
1588 int i;
1589 unsigned retval = 0;
1590 void __iomem *ee_addr = ioaddr + Cfg9346;
1591 int read_cmd = location | (EE_READ_CMD << addr_len);
1592
1593 writeb (EE_ENB & ~EE_CS, ee_addr);
1594 writeb (EE_ENB, ee_addr);
1595 eeprom_delay ();
1596
1597 /* Shift the read command bits out. */
1598 for (i = 4 + addr_len; i >= 0; i--) {
1599 int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1600 writeb (EE_ENB | dataval, ee_addr);
1601 eeprom_delay ();
1602 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1603 eeprom_delay ();
1604 }
1605 writeb (EE_ENB, ee_addr);
1606 eeprom_delay ();
1607
1608 for (i = 16; i > 0; i--) {
1609 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1610 eeprom_delay ();
1611 retval =
1612 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1613 0);
1614 writeb (EE_ENB, ee_addr);
1615 eeprom_delay ();
1616 }
1617
1618 /* Terminate the EEPROM access. */
1619 writeb (~EE_CS, ee_addr);
1620 eeprom_delay ();
1621
1622 return retval;
1623}
1624
1625/* Put the board into D3cold state and wait for WakeUp signal */
1626static void cp_set_d3_state (struct cp_private *cp)
1627{
1628 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1629 pci_set_power_state (cp->pdev, PCI_D3hot);
1630}
1631
1632static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1633{
1634 struct net_device *dev;
1635 struct cp_private *cp;
1636 int rc;
1637 void __iomem *regs;
1638 long pciaddr;
1639 unsigned int addr_len, i, pci_using_dac;
1640 u8 pci_rev;
1641
1642#ifndef MODULE
1643 static int version_printed;
1644 if (version_printed++ == 0)
1645 printk("%s", version);
1646#endif
1647
1648 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1649
1650 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1651 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
1652 printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1653 pci_name(pdev), pdev->vendor, pdev->device, pci_rev);
1654 printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n");
1655 return -ENODEV;
1656 }
1657
1658 dev = alloc_etherdev(sizeof(struct cp_private));
1659 if (!dev)
1660 return -ENOMEM;
1661 SET_MODULE_OWNER(dev);
1662 SET_NETDEV_DEV(dev, &pdev->dev);
1663
1664 cp = netdev_priv(dev);
1665 cp->pdev = pdev;
1666 cp->dev = dev;
1667 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1668 spin_lock_init (&cp->lock);
1669 cp->mii_if.dev = dev;
1670 cp->mii_if.mdio_read = mdio_read;
1671 cp->mii_if.mdio_write = mdio_write;
1672 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1673 cp->mii_if.phy_id_mask = 0x1f;
1674 cp->mii_if.reg_num_mask = 0x1f;
1675 cp_set_rxbufsize(cp);
1676
1677 rc = pci_enable_device(pdev);
1678 if (rc)
1679 goto err_out_free;
1680
1681 rc = pci_set_mwi(pdev);
1682 if (rc)
1683 goto err_out_disable;
1684
1685 rc = pci_request_regions(pdev, DRV_NAME);
1686 if (rc)
1687 goto err_out_mwi;
1688
1689 pciaddr = pci_resource_start(pdev, 1);
1690 if (!pciaddr) {
1691 rc = -EIO;
1692 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
1693 pci_name(pdev));
1694 goto err_out_res;
1695 }
1696 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1697 rc = -EIO;
1698 printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
1699 pci_resource_len(pdev, 1), pci_name(pdev));
1700 goto err_out_res;
1701 }
1702
1703 /* Configure DMA attributes. */
1704 if ((sizeof(dma_addr_t) > 4) &&
Tobias Klauser8662d062005-05-12 22:19:39 -04001705 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1706 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 pci_using_dac = 1;
1708 } else {
1709 pci_using_dac = 0;
1710
Tobias Klauser8662d062005-05-12 22:19:39 -04001711 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 if (rc) {
1713 printk(KERN_ERR PFX "No usable DMA configuration, "
1714 "aborting.\n");
1715 goto err_out_res;
1716 }
Tobias Klauser8662d062005-05-12 22:19:39 -04001717 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 if (rc) {
1719 printk(KERN_ERR PFX "No usable consistent DMA configuration, "
1720 "aborting.\n");
1721 goto err_out_res;
1722 }
1723 }
1724
1725 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1726 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1727
1728 regs = ioremap(pciaddr, CP_REGS_SIZE);
1729 if (!regs) {
1730 rc = -EIO;
1731 printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
1732 pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
1733 goto err_out_res;
1734 }
1735 dev->base_addr = (unsigned long) regs;
1736 cp->regs = regs;
1737
1738 cp_stop_hw(cp);
1739
1740 /* read MAC address from EEPROM */
1741 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1742 for (i = 0; i < 3; i++)
1743 ((u16 *) (dev->dev_addr))[i] =
1744 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
1745
1746 dev->open = cp_open;
1747 dev->stop = cp_close;
1748 dev->set_multicast_list = cp_set_rx_mode;
1749 dev->hard_start_xmit = cp_start_xmit;
1750 dev->get_stats = cp_get_stats;
1751 dev->do_ioctl = cp_ioctl;
1752 dev->poll = cp_rx_poll;
1753 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1754#ifdef BROKEN
1755 dev->change_mtu = cp_change_mtu;
1756#endif
1757 dev->ethtool_ops = &cp_ethtool_ops;
1758#if 0
1759 dev->tx_timeout = cp_tx_timeout;
1760 dev->watchdog_timeo = TX_TIMEOUT;
1761#endif
1762
1763#if CP_VLAN_TAG_USED
1764 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1765 dev->vlan_rx_register = cp_vlan_rx_register;
1766 dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
1767#endif
1768
1769 if (pci_using_dac)
1770 dev->features |= NETIF_F_HIGHDMA;
1771
1772 dev->irq = pdev->irq;
1773
1774 rc = register_netdev(dev);
1775 if (rc)
1776 goto err_out_iomap;
1777
1778 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1779 "%02x:%02x:%02x:%02x:%02x:%02x, "
1780 "IRQ %d\n",
1781 dev->name,
1782 dev->base_addr,
1783 dev->dev_addr[0], dev->dev_addr[1],
1784 dev->dev_addr[2], dev->dev_addr[3],
1785 dev->dev_addr[4], dev->dev_addr[5],
1786 dev->irq);
1787
1788 pci_set_drvdata(pdev, dev);
1789
1790 /* enable busmastering and memory-write-invalidate */
1791 pci_set_master(pdev);
1792
1793 if (cp->wol_enabled) cp_set_d3_state (cp);
1794
1795 return 0;
1796
1797err_out_iomap:
1798 iounmap(regs);
1799err_out_res:
1800 pci_release_regions(pdev);
1801err_out_mwi:
1802 pci_clear_mwi(pdev);
1803err_out_disable:
1804 pci_disable_device(pdev);
1805err_out_free:
1806 free_netdev(dev);
1807 return rc;
1808}
1809
1810static void cp_remove_one (struct pci_dev *pdev)
1811{
1812 struct net_device *dev = pci_get_drvdata(pdev);
1813 struct cp_private *cp = netdev_priv(dev);
1814
1815 if (!dev)
1816 BUG();
1817 unregister_netdev(dev);
1818 iounmap(cp->regs);
1819 if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
1820 pci_release_regions(pdev);
1821 pci_clear_mwi(pdev);
1822 pci_disable_device(pdev);
1823 pci_set_drvdata(pdev, NULL);
1824 free_netdev(dev);
1825}
1826
1827#ifdef CONFIG_PM
Pavel Machek05adc3b2005-04-16 15:25:25 -07001828static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829{
1830 struct net_device *dev;
1831 struct cp_private *cp;
1832 unsigned long flags;
1833
1834 dev = pci_get_drvdata (pdev);
1835 cp = netdev_priv(dev);
1836
1837 if (!dev || !netif_running (dev)) return 0;
1838
1839 netif_device_detach (dev);
1840 netif_stop_queue (dev);
1841
1842 spin_lock_irqsave (&cp->lock, flags);
1843
1844 /* Disable Rx and Tx */
1845 cpw16 (IntrMask, 0);
1846 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
1847
1848 spin_unlock_irqrestore (&cp->lock, flags);
1849
1850 if (cp->pdev && cp->wol_enabled) {
1851 pci_save_state (cp->pdev);
1852 cp_set_d3_state (cp);
1853 }
1854
1855 return 0;
1856}
1857
1858static int cp_resume (struct pci_dev *pdev)
1859{
1860 struct net_device *dev;
1861 struct cp_private *cp;
1862
1863 dev = pci_get_drvdata (pdev);
1864 cp = netdev_priv(dev);
1865
1866 netif_device_attach (dev);
1867
1868 if (cp->pdev && cp->wol_enabled) {
1869 pci_set_power_state (cp->pdev, PCI_D0);
1870 pci_restore_state (cp->pdev);
1871 }
1872
1873 cp_init_hw (cp);
1874 netif_start_queue (dev);
1875
1876 return 0;
1877}
1878#endif /* CONFIG_PM */
1879
1880static struct pci_driver cp_driver = {
1881 .name = DRV_NAME,
1882 .id_table = cp_pci_tbl,
1883 .probe = cp_init_one,
1884 .remove = cp_remove_one,
1885#ifdef CONFIG_PM
1886 .resume = cp_resume,
1887 .suspend = cp_suspend,
1888#endif
1889};
1890
1891static int __init cp_init (void)
1892{
1893#ifdef MODULE
1894 printk("%s", version);
1895#endif
1896 return pci_module_init (&cp_driver);
1897}
1898
1899static void __exit cp_exit (void)
1900{
1901 pci_unregister_driver (&cp_driver);
1902}
1903
1904module_init(cp_init);
1905module_exit(cp_exit);