blob: 450e29d7a9f34118bc24455abcec788e596e9dff [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This code is derived from the VIA reference driver (copyright message
3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
4 * addition to the Linux kernel.
5 *
6 * The code has been merged into one source file, cleaned up to follow
7 * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned
8 * for 64bit hardware platforms.
9 *
10 * TODO
11 * Big-endian support
12 * rx_copybreak/alignment
13 * Scatter gather
14 * More testing
15 *
16 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@redhat.com>
17 * Additional fixes and clean up: Francois Romieu
18 *
19 * This source has not been verified for use in safety critical systems.
20 *
21 * Please direct queries about the revamped driver to the linux-kernel
22 * list not VIA.
23 *
24 * Original code:
25 *
26 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
27 * All rights reserved.
28 *
29 * This software may be redistributed and/or modified under
30 * the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but
35 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
36 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
37 * for more details.
38 *
39 * Author: Chuang Liang-Shing, AJ Jiang
40 *
41 * Date: Jan 24, 2003
42 *
43 * MODULE_LICENSE("GPL");
44 *
45 */
46
47
48#include <linux/module.h>
49#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/init.h>
51#include <linux/mm.h>
52#include <linux/errno.h>
53#include <linux/ioport.h>
54#include <linux/pci.h>
55#include <linux/kernel.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/skbuff.h>
59#include <linux/delay.h>
60#include <linux/timer.h>
61#include <linux/slab.h>
62#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/string.h>
64#include <linux/wait.h>
65#include <asm/io.h>
66#include <linux/if.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <asm/uaccess.h>
68#include <linux/proc_fs.h>
69#include <linux/inetdevice.h>
70#include <linux/reboot.h>
71#include <linux/ethtool.h>
72#include <linux/mii.h>
73#include <linux/in.h>
74#include <linux/if_arp.h>
Stephen Hemminger501e4d22007-08-24 13:56:49 -070075#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/ip.h>
77#include <linux/tcp.h>
78#include <linux/udp.h>
79#include <linux/crc-ccitt.h>
80#include <linux/crc32.h>
81
82#include "via-velocity.h"
83
84
85static int velocity_nics = 0;
86static int msglevel = MSG_LEVEL_INFO;
87
Stephen Hemminger01faccb2007-08-24 14:40:45 -070088/**
89 * mac_get_cam_mask - Read a CAM mask
90 * @regs: register block for this velocity
91 * @mask: buffer to store mask
92 *
93 * Fetch the mask bits of the selected CAM and store them into the
94 * provided mask buffer.
95 */
96
97static void mac_get_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
98{
99 int i;
100
101 /* Select CAM mask */
102 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
103
104 writeb(0, &regs->CAMADDR);
105
106 /* read mask */
107 for (i = 0; i < 8; i++)
108 *mask++ = readb(&(regs->MARCAM[i]));
109
110 /* disable CAMEN */
111 writeb(0, &regs->CAMADDR);
112
113 /* Select mar */
114 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
115
116}
117
118
119/**
120 * mac_set_cam_mask - Set a CAM mask
121 * @regs: register block for this velocity
122 * @mask: CAM mask to load
123 *
124 * Store a new mask into a CAM
125 */
126
127static void mac_set_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
128{
129 int i;
130 /* Select CAM mask */
131 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
132
133 writeb(CAMADDR_CAMEN, &regs->CAMADDR);
134
135 for (i = 0; i < 8; i++) {
136 writeb(*mask++, &(regs->MARCAM[i]));
137 }
138 /* disable CAMEN */
139 writeb(0, &regs->CAMADDR);
140
141 /* Select mar */
142 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
143}
144
145static void mac_set_vlan_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
146{
147 int i;
148 /* Select CAM mask */
149 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
150
151 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
152
153 for (i = 0; i < 8; i++) {
154 writeb(*mask++, &(regs->MARCAM[i]));
155 }
156 /* disable CAMEN */
157 writeb(0, &regs->CAMADDR);
158
159 /* Select mar */
160 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
161}
162
163/**
164 * mac_set_cam - set CAM data
165 * @regs: register block of this velocity
166 * @idx: Cam index
167 * @addr: 2 or 6 bytes of CAM data
168 *
169 * Load an address or vlan tag into a CAM
170 */
171
172static void mac_set_cam(struct mac_regs __iomem * regs, int idx, const u8 *addr)
173{
174 int i;
175
176 /* Select CAM mask */
177 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
178
179 idx &= (64 - 1);
180
181 writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
182
183 for (i = 0; i < 6; i++) {
184 writeb(*addr++, &(regs->MARCAM[i]));
185 }
186 BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
187
188 udelay(10);
189
190 writeb(0, &regs->CAMADDR);
191
192 /* Select mar */
193 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
194}
195
196static void mac_set_vlan_cam(struct mac_regs __iomem * regs, int idx,
197 const u8 *addr)
198{
199
200 /* Select CAM mask */
201 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
202
203 idx &= (64 - 1);
204
205 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
206 writew(*((u16 *) addr), &regs->MARCAM[0]);
207
208 BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
209
210 udelay(10);
211
212 writeb(0, &regs->CAMADDR);
213
214 /* Select mar */
215 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
216}
217
218
219/**
220 * mac_wol_reset - reset WOL after exiting low power
221 * @regs: register block of this velocity
222 *
223 * Called after we drop out of wake on lan mode in order to
224 * reset the Wake on lan features. This function doesn't restore
225 * the rest of the logic from the result of sleep/wakeup
226 */
227
228static void mac_wol_reset(struct mac_regs __iomem * regs)
229{
230
231 /* Turn off SWPTAG right after leaving power mode */
232 BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
233 /* clear sticky bits */
234 BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
235
236 BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
237 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
238 /* disable force PME-enable */
239 writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
240 /* disable power-event config bit */
241 writew(0xFFFF, &regs->WOLCRClr);
242 /* clear power status */
243 writew(0xFFFF, &regs->WOLSRClr);
244}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400247static const struct ethtool_ops velocity_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249/*
250 Define module options
251*/
252
253MODULE_AUTHOR("VIA Networking Technologies, Inc.");
254MODULE_LICENSE("GPL");
255MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
256
257#define VELOCITY_PARAM(N,D) \
258 static int N[MAX_UNITS]=OPTION_DEFAULT;\
259 module_param_array(N, int, NULL, 0); \
260 MODULE_PARM_DESC(N, D);
261
262#define RX_DESC_MIN 64
263#define RX_DESC_MAX 255
264#define RX_DESC_DEF 64
265VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
266
267#define TX_DESC_MIN 16
268#define TX_DESC_MAX 256
269#define TX_DESC_DEF 64
270VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272#define RX_THRESH_MIN 0
273#define RX_THRESH_MAX 3
274#define RX_THRESH_DEF 0
275/* rx_thresh[] is used for controlling the receive fifo threshold.
276 0: indicate the rxfifo threshold is 128 bytes.
277 1: indicate the rxfifo threshold is 512 bytes.
278 2: indicate the rxfifo threshold is 1024 bytes.
279 3: indicate the rxfifo threshold is store & forward.
280*/
281VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
282
283#define DMA_LENGTH_MIN 0
284#define DMA_LENGTH_MAX 7
285#define DMA_LENGTH_DEF 0
286
287/* DMA_length[] is used for controlling the DMA length
288 0: 8 DWORDs
289 1: 16 DWORDs
290 2: 32 DWORDs
291 3: 64 DWORDs
292 4: 128 DWORDs
293 5: 256 DWORDs
294 6: SF(flush till emply)
295 7: SF(flush till emply)
296*/
297VELOCITY_PARAM(DMA_length, "DMA length");
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299#define IP_ALIG_DEF 0
300/* IP_byte_align[] is used for IP header DWORD byte aligned
301 0: indicate the IP header won't be DWORD byte aligned.(Default) .
302 1: indicate the IP header will be DWORD byte aligned.
303 In some enviroment, the IP header should be DWORD byte aligned,
304 or the packet will be droped when we receive it. (eg: IPVS)
305*/
306VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
307
308#define TX_CSUM_DEF 1
309/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
310 (We only support RX checksum offload now)
311 0: disable csum_offload[checksum offload
312 1: enable checksum offload. (Default)
313*/
314VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
315
316#define FLOW_CNTL_DEF 1
317#define FLOW_CNTL_MIN 1
318#define FLOW_CNTL_MAX 5
319
320/* flow_control[] is used for setting the flow control ability of NIC.
321 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
322 2: enable TX flow control.
323 3: enable RX flow control.
324 4: enable RX/TX flow control.
325 5: disable
326*/
327VELOCITY_PARAM(flow_control, "Enable flow control ability");
328
329#define MED_LNK_DEF 0
330#define MED_LNK_MIN 0
331#define MED_LNK_MAX 4
332/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
333 0: indicate autonegotiation for both speed and duplex mode
334 1: indicate 100Mbps half duplex mode
335 2: indicate 100Mbps full duplex mode
336 3: indicate 10Mbps half duplex mode
337 4: indicate 10Mbps full duplex mode
338
339 Note:
340 if EEPROM have been set to the force mode, this option is ignored
341 by driver.
342*/
343VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
344
345#define VAL_PKT_LEN_DEF 0
346/* ValPktLen[] is used for setting the checksum offload ability of NIC.
347 0: Receive frame with invalid layer 2 length (Default)
348 1: Drop frame with invalid layer 2 length
349*/
350VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
351
352#define WOL_OPT_DEF 0
353#define WOL_OPT_MIN 0
354#define WOL_OPT_MAX 7
355/* wol_opts[] is used for controlling wake on lan behavior.
356 0: Wake up if recevied a magic packet. (Default)
357 1: Wake up if link status is on/off.
358 2: Wake up if recevied an arp packet.
359 4: Wake up if recevied any unicast packet.
360 Those value can be sumed up to support more than one option.
361*/
362VELOCITY_PARAM(wol_opts, "Wake On Lan options");
363
364#define INT_WORKS_DEF 20
365#define INT_WORKS_MIN 10
366#define INT_WORKS_MAX 64
367
368VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
369
370static int rx_copybreak = 200;
371module_param(rx_copybreak, int, 0644);
372MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
373
Jeff Garzikcabb7662006-06-27 09:25:28 -0400374static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
375 const struct velocity_info_tbl *info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
377static void velocity_print_info(struct velocity_info *vptr);
378static int velocity_open(struct net_device *dev);
379static int velocity_change_mtu(struct net_device *dev, int mtu);
380static int velocity_xmit(struct sk_buff *skb, struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100381static int velocity_intr(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382static void velocity_set_multi(struct net_device *dev);
383static struct net_device_stats *velocity_get_stats(struct net_device *dev);
384static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
385static int velocity_close(struct net_device *dev);
386static int velocity_receive_frame(struct velocity_info *, int idx);
387static int velocity_alloc_rx_buf(struct velocity_info *, int idx);
388static void velocity_free_rd_ring(struct velocity_info *vptr);
389static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
390static int velocity_soft_reset(struct velocity_info *vptr);
391static void mii_init(struct velocity_info *vptr, u32 mii_status);
Francois Romieu8a22ddd2006-06-23 00:47:06 +0200392static u32 velocity_get_link(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
394static void velocity_print_link_status(struct velocity_info *vptr);
395static void safe_disable_mii_autopoll(struct mac_regs __iomem * regs);
396static void velocity_shutdown(struct velocity_info *vptr);
397static void enable_flow_control_ability(struct velocity_info *vptr);
398static void enable_mii_autopoll(struct mac_regs __iomem * regs);
399static int velocity_mii_read(struct mac_regs __iomem *, u8 byIdx, u16 * pdata);
400static int velocity_mii_write(struct mac_regs __iomem *, u8 byMiiAddr, u16 data);
401static u32 mii_check_media_mode(struct mac_regs __iomem * regs);
402static u32 check_connection_type(struct mac_regs __iomem * regs);
403static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
404
405#ifdef CONFIG_PM
406
407static int velocity_suspend(struct pci_dev *pdev, pm_message_t state);
408static int velocity_resume(struct pci_dev *pdev);
409
Randy Dunlapce9f7fe2006-12-18 21:21:10 -0800410static DEFINE_SPINLOCK(velocity_dev_list_lock);
411static LIST_HEAD(velocity_dev_list);
412
413#endif
414
415#if defined(CONFIG_PM) && defined(CONFIG_INET)
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr);
418
419static struct notifier_block velocity_inetaddr_notifier = {
420 .notifier_call = velocity_netdev_event,
421};
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423static void velocity_register_notifier(void)
424{
425 register_inetaddr_notifier(&velocity_inetaddr_notifier);
426}
427
428static void velocity_unregister_notifier(void)
429{
430 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
431}
432
Randy Dunlapce9f7fe2006-12-18 21:21:10 -0800433#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435#define velocity_register_notifier() do {} while (0)
436#define velocity_unregister_notifier() do {} while (0)
437
Randy Dunlapce9f7fe2006-12-18 21:21:10 -0800438#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
440/*
441 * Internal board variants. At the moment we have only one
442 */
443
Jeff Garzikcabb7662006-06-27 09:25:28 -0400444static const struct velocity_info_tbl chip_info_table[] __devinitdata = {
445 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
446 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447};
448
449/*
450 * Describe the PCI device identifiers that we support in this
451 * device driver. Used for hotplug autoloading.
452 */
453
Jeff Garzike54f4892006-06-27 09:20:08 -0400454static const struct pci_device_id velocity_id_table[] __devinitdata = {
455 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
456 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457};
458
459MODULE_DEVICE_TABLE(pci, velocity_id_table);
460
461/**
462 * get_chip_name - identifier to name
463 * @id: chip identifier
464 *
465 * Given a chip identifier return a suitable description. Returns
466 * a pointer a static string valid while the driver is loaded.
467 */
468
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700469static const char __devinit *get_chip_name(enum chip_type chip_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
471 int i;
472 for (i = 0; chip_info_table[i].name != NULL; i++)
473 if (chip_info_table[i].chip_id == chip_id)
474 break;
475 return chip_info_table[i].name;
476}
477
478/**
479 * velocity_remove1 - device unplug
480 * @pdev: PCI device being removed
481 *
482 * Device unload callback. Called on an unplug or on module
483 * unload for each active device that is present. Disconnects
484 * the device from the network layer and frees all the resources
485 */
486
487static void __devexit velocity_remove1(struct pci_dev *pdev)
488{
489 struct net_device *dev = pci_get_drvdata(pdev);
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -0400490 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492#ifdef CONFIG_PM
493 unsigned long flags;
494
495 spin_lock_irqsave(&velocity_dev_list_lock, flags);
496 if (!list_empty(&velocity_dev_list))
497 list_del(&vptr->list);
498 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
499#endif
500 unregister_netdev(dev);
501 iounmap(vptr->mac_regs);
502 pci_release_regions(pdev);
503 pci_disable_device(pdev);
504 pci_set_drvdata(pdev, NULL);
505 free_netdev(dev);
506
507 velocity_nics--;
508}
509
510/**
511 * velocity_set_int_opt - parser for integer options
512 * @opt: pointer to option value
513 * @val: value the user requested (or -1 for default)
514 * @min: lowest value allowed
515 * @max: highest value allowed
516 * @def: default value
517 * @name: property name
518 * @dev: device name
519 *
520 * Set an integer property in the module options. This function does
521 * all the verification and checking as well as reporting so that
522 * we don't duplicate code for each option.
523 */
524
525static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, char *devname)
526{
527 if (val == -1)
528 *opt = def;
529 else if (val < min || val > max) {
530 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
531 devname, name, min, max);
532 *opt = def;
533 } else {
534 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
535 devname, name, val);
536 *opt = val;
537 }
538}
539
540/**
541 * velocity_set_bool_opt - parser for boolean options
542 * @opt: pointer to option value
543 * @val: value the user requested (or -1 for default)
544 * @def: default value (yes/no)
545 * @flag: numeric value to set for true.
546 * @name: property name
547 * @dev: device name
548 *
549 * Set a boolean property in the module options. This function does
550 * all the verification and checking as well as reporting so that
551 * we don't duplicate code for each option.
552 */
553
554static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, char *devname)
555{
556 (*opt) &= (~flag);
557 if (val == -1)
558 *opt |= (def ? flag : 0);
559 else if (val < 0 || val > 1) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400560 printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 devname, name);
562 *opt |= (def ? flag : 0);
563 } else {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400564 printk(KERN_INFO "%s: set parameter %s to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 devname, name, val ? "TRUE" : "FALSE");
566 *opt |= (val ? flag : 0);
567 }
568}
569
570/**
571 * velocity_get_options - set options on device
572 * @opts: option structure for the device
573 * @index: index of option to use in module options array
574 * @devname: device name
575 *
576 * Turn the module and command options into a single structure
577 * for the current device
578 */
579
580static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname)
581{
582
583 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
584 velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
585 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
586 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
589 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
590 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
591 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
592 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
593 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
594 velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
595 opts->numrx = (opts->numrx & ~3);
596}
597
598/**
599 * velocity_init_cam_filter - initialise CAM
600 * @vptr: velocity to program
601 *
602 * Initialize the content addressable memory used for filters. Load
603 * appropriately according to the presence of VLAN
604 */
605
606static void velocity_init_cam_filter(struct velocity_info *vptr)
607{
608 struct mac_regs __iomem * regs = vptr->mac_regs;
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700609 unsigned short vid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
612 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
613 WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
614
615 /* Disable all CAMs */
616 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
617 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700618 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
619 mac_set_cam_mask(regs, vptr->mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 /* Enable first VCAM */
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700622 if (vptr->vlgrp) {
623 for (vid = 0; vid < VLAN_VID_MASK; vid++) {
624 if (vlan_group_get_device(vptr->vlgrp, vid)) {
625 /* If Tagging option is enabled and
626 VLAN ID is not zero, then
627 turn on MCFG_RTGOPT also */
628 if (vid != 0)
629 WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700631 mac_set_vlan_cam(regs, 0, (u8 *) &vid);
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700632 }
633 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 vptr->vCAMmask[0] |= 1;
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700635 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 } else {
637 u16 temp = 0;
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700638 mac_set_vlan_cam(regs, 0, (u8 *) &temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 temp = 1;
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700640 mac_set_vlan_cam_mask(regs, (u8 *) &temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 }
642}
643
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700644static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
645{
646 struct velocity_info *vptr = netdev_priv(dev);
647
648 spin_lock_irq(&vptr->lock);
649 velocity_init_cam_filter(vptr);
650 spin_unlock_irq(&vptr->lock);
651}
652
653static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
654{
655 struct velocity_info *vptr = netdev_priv(dev);
656
657 spin_lock_irq(&vptr->lock);
658 vlan_group_set_device(vptr->vlgrp, vid, NULL);
659 velocity_init_cam_filter(vptr);
660 spin_unlock_irq(&vptr->lock);
661}
662
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664/**
665 * velocity_rx_reset - handle a receive reset
666 * @vptr: velocity we are resetting
667 *
668 * Reset the ownership and status for the receive ring side.
669 * Hand all the receive queue to the NIC.
670 */
671
672static void velocity_rx_reset(struct velocity_info *vptr)
673{
674
675 struct mac_regs __iomem * regs = vptr->mac_regs;
676 int i;
677
678 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;
679
680 /*
681 * Init state, all RD entries belong to the NIC
682 */
683 for (i = 0; i < vptr->options.numrx; ++i)
684 vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
685
686 writew(vptr->options.numrx, &regs->RBRDU);
687 writel(vptr->rd_pool_dma, &regs->RDBaseLo);
688 writew(0, &regs->RDIdx);
689 writew(vptr->options.numrx - 1, &regs->RDCSize);
690}
691
692/**
693 * velocity_init_registers - initialise MAC registers
694 * @vptr: velocity to init
695 * @type: type of initialisation (hot or cold)
696 *
697 * Initialise the MAC on a reset or on first set up on the
698 * hardware.
699 */
700
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400701static void velocity_init_registers(struct velocity_info *vptr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 enum velocity_init_type type)
703{
704 struct mac_regs __iomem * regs = vptr->mac_regs;
705 int i, mii_status;
706
707 mac_wol_reset(regs);
708
709 switch (type) {
710 case VELOCITY_INIT_RESET:
711 case VELOCITY_INIT_WOL:
712
713 netif_stop_queue(vptr->dev);
714
715 /*
716 * Reset RX to prevent RX pointer not on the 4X location
717 */
718 velocity_rx_reset(vptr);
719 mac_rx_queue_run(regs);
720 mac_rx_queue_wake(regs);
721
722 mii_status = velocity_get_opt_media_mode(vptr);
723 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
724 velocity_print_link_status(vptr);
725 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
726 netif_wake_queue(vptr->dev);
727 }
728
729 enable_flow_control_ability(vptr);
730
731 mac_clear_isr(regs);
732 writel(CR0_STOP, &regs->CR0Clr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400733 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 &regs->CR0Set);
735
736 break;
737
738 case VELOCITY_INIT_COLD:
739 default:
740 /*
741 * Do reset
742 */
743 velocity_soft_reset(vptr);
744 mdelay(5);
745
746 mac_eeprom_reload(regs);
747 for (i = 0; i < 6; i++) {
748 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
749 }
750 /*
751 * clear Pre_ACPI bit.
752 */
753 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
754 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
755 mac_set_dma_length(regs, vptr->options.DMA_length);
756
757 writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
758 /*
759 * Back off algorithm use original IEEE standard
760 */
761 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
762
763 /*
764 * Init CAM filter
765 */
766 velocity_init_cam_filter(vptr);
767
768 /*
769 * Set packet filter: Receive directed and broadcast address
770 */
771 velocity_set_multi(vptr->dev);
772
773 /*
774 * Enable MII auto-polling
775 */
776 enable_mii_autopoll(regs);
777
778 vptr->int_mask = INT_MASK_DEF;
779
780 writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo);
781 writew(vptr->options.numrx - 1, &regs->RDCSize);
782 mac_rx_queue_run(regs);
783 mac_rx_queue_wake(regs);
784
785 writew(vptr->options.numtx - 1, &regs->TDCSize);
786
787 for (i = 0; i < vptr->num_txq; i++) {
788 writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
789 mac_tx_queue_run(regs, i);
790 }
791
792 init_flow_control_register(vptr);
793
794 writel(CR0_STOP, &regs->CR0Clr);
795 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
796
797 mii_status = velocity_get_opt_media_mode(vptr);
798 netif_stop_queue(vptr->dev);
799
800 mii_init(vptr, mii_status);
801
802 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
803 velocity_print_link_status(vptr);
804 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
805 netif_wake_queue(vptr->dev);
806 }
807
808 enable_flow_control_ability(vptr);
809 mac_hw_mibs_init(regs);
810 mac_write_int_mask(vptr->int_mask, regs);
811 mac_clear_isr(regs);
812
813 }
814}
815
816/**
817 * velocity_soft_reset - soft reset
818 * @vptr: velocity to reset
819 *
820 * Kick off a soft reset of the velocity adapter and then poll
821 * until the reset sequence has completed before returning.
822 */
823
824static int velocity_soft_reset(struct velocity_info *vptr)
825{
826 struct mac_regs __iomem * regs = vptr->mac_regs;
827 int i = 0;
828
829 writel(CR0_SFRST, &regs->CR0Set);
830
831 for (i = 0; i < W_MAX_TIMEOUT; i++) {
832 udelay(5);
833 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
834 break;
835 }
836
837 if (i == W_MAX_TIMEOUT) {
838 writel(CR0_FORSRST, &regs->CR0Set);
839 /* FIXME: PCI POSTING */
840 /* delay 2ms */
841 mdelay(2);
842 }
843 return 0;
844}
845
846/**
847 * velocity_found1 - set up discovered velocity card
848 * @pdev: PCI device
849 * @ent: PCI device table entry that matched
850 *
851 * Configure a discovered adapter from scratch. Return a negative
852 * errno error code on failure paths.
853 */
854
855static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
856{
857 static int first = 1;
858 struct net_device *dev;
859 int i;
Jeff Garzikcabb7662006-06-27 09:25:28 -0400860 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 struct velocity_info *vptr;
862 struct mac_regs __iomem * regs;
863 int ret = -ENOMEM;
864
Jeff Garzike54f4892006-06-27 09:20:08 -0400865 /* FIXME: this driver, like almost all other ethernet drivers,
866 * can support more than MAX_UNITS.
867 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 if (velocity_nics >= MAX_UNITS) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400869 dev_notice(&pdev->dev, "already found %d NICs.\n",
Jeff Garzike54f4892006-06-27 09:20:08 -0400870 velocity_nics);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 return -ENODEV;
872 }
873
874 dev = alloc_etherdev(sizeof(struct velocity_info));
Jeff Garzike54f4892006-06-27 09:20:08 -0400875 if (!dev) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -0400876 dev_err(&pdev->dev, "allocate net device failed.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 goto out;
878 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400879
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 /* Chain it all together */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 SET_NETDEV_DEV(dev, &pdev->dev);
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -0400883 vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885
886 if (first) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400887 printk(KERN_INFO "%s Ver. %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
889 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
890 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
891 first = 0;
892 }
893
894 velocity_init_info(pdev, vptr, info);
895
896 vptr->dev = dev;
897
898 dev->irq = pdev->irq;
899
900 ret = pci_enable_device(pdev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400901 if (ret < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 goto err_free_dev;
903
904 ret = velocity_get_pci_info(vptr, pdev);
905 if (ret < 0) {
Jeff Garzike54f4892006-06-27 09:20:08 -0400906 /* error message already printed */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 goto err_disable;
908 }
909
910 ret = pci_request_regions(pdev, VELOCITY_NAME);
911 if (ret < 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -0400912 dev_err(&pdev->dev, "No PCI resources.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 goto err_disable;
914 }
915
Jeff Garzikcabb7662006-06-27 09:25:28 -0400916 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 if (regs == NULL) {
918 ret = -EIO;
919 goto err_release_res;
920 }
921
922 vptr->mac_regs = regs;
923
924 mac_wol_reset(regs);
925
926 dev->base_addr = vptr->ioaddr;
927
928 for (i = 0; i < 6; i++)
929 dev->dev_addr[i] = readb(&regs->PAR[i]);
930
931
932 velocity_get_options(&vptr->options, velocity_nics, dev->name);
933
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400934 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 * Mask out the options cannot be set to the chip
936 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 vptr->options.flags &= info->flags;
939
940 /*
941 * Enable the chip specified capbilities
942 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
945
946 vptr->wol_opts = vptr->options.wol_opts;
947 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
948
949 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
950
951 dev->irq = pdev->irq;
952 dev->open = velocity_open;
953 dev->hard_start_xmit = velocity_xmit;
954 dev->stop = velocity_close;
955 dev->get_stats = velocity_get_stats;
956 dev->set_multicast_list = velocity_set_multi;
957 dev->do_ioctl = velocity_ioctl;
958 dev->ethtool_ops = &velocity_ethtool_ops;
959 dev->change_mtu = velocity_change_mtu;
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700960
961 dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid;
962 dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid;
963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964#ifdef VELOCITY_ZERO_COPY_SUPPORT
965 dev->features |= NETIF_F_SG;
966#endif
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700967 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700969 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
John W. Linville9f3f46b2005-12-09 10:36:09 -0500970 dev->features |= NETIF_F_IP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 ret = register_netdev(dev);
973 if (ret < 0)
974 goto err_iounmap;
975
Francois Romieu8a22ddd2006-06-23 00:47:06 +0200976 if (velocity_get_link(dev))
977 netif_carrier_off(dev);
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 velocity_print_info(vptr);
980 pci_set_drvdata(pdev, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 /* and leave the chip powered down */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 pci_set_power_state(pdev, PCI_D3hot);
985#ifdef CONFIG_PM
986 {
987 unsigned long flags;
988
989 spin_lock_irqsave(&velocity_dev_list_lock, flags);
990 list_add(&vptr->list, &velocity_dev_list);
991 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
992 }
993#endif
994 velocity_nics++;
995out:
996 return ret;
997
998err_iounmap:
999 iounmap(regs);
1000err_release_res:
1001 pci_release_regions(pdev);
1002err_disable:
1003 pci_disable_device(pdev);
1004err_free_dev:
1005 free_netdev(dev);
1006 goto out;
1007}
1008
1009/**
1010 * velocity_print_info - per driver data
1011 * @vptr: velocity
1012 *
1013 * Print per driver data as the kernel driver finds Velocity
1014 * hardware
1015 */
1016
1017static void __devinit velocity_print_info(struct velocity_info *vptr)
1018{
1019 struct net_device *dev = vptr->dev;
1020
1021 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001022 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
1023 dev->name,
1024 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1026}
1027
1028/**
1029 * velocity_init_info - init private data
1030 * @pdev: PCI device
1031 * @vptr: Velocity info
1032 * @info: Board type
1033 *
1034 * Set up the initial velocity_info struct for the device that has been
1035 * discovered.
1036 */
1037
Jeff Garzikcabb7662006-06-27 09:25:28 -04001038static void __devinit velocity_init_info(struct pci_dev *pdev,
1039 struct velocity_info *vptr,
1040 const struct velocity_info_tbl *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041{
1042 memset(vptr, 0, sizeof(struct velocity_info));
1043
1044 vptr->pdev = pdev;
1045 vptr->chip_id = info->chip_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 vptr->num_txq = info->txqueue;
1047 vptr->multicast_limit = MCAM_SIZE;
1048 spin_lock_init(&vptr->lock);
1049 INIT_LIST_HEAD(&vptr->list);
1050}
1051
1052/**
1053 * velocity_get_pci_info - retrieve PCI info for device
1054 * @vptr: velocity device
1055 * @pdev: PCI device it matches
1056 *
1057 * Retrieve the PCI configuration space data that interests us from
1058 * the kernel PCI layer
1059 */
1060
1061static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
1062{
Auke Kok44c10132007-06-08 15:46:36 -07001063 vptr->rev_id = pdev->revision;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 pci_set_master(pdev);
1066
1067 vptr->ioaddr = pci_resource_start(pdev, 0);
1068 vptr->memaddr = pci_resource_start(pdev, 1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001069
Jeff Garzike54f4892006-06-27 09:20:08 -04001070 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001071 dev_err(&pdev->dev,
Jeff Garzike54f4892006-06-27 09:20:08 -04001072 "region #0 is not an I/O resource, aborting.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 return -EINVAL;
1074 }
1075
Jeff Garzike54f4892006-06-27 09:20:08 -04001076 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001077 dev_err(&pdev->dev,
Jeff Garzike54f4892006-06-27 09:20:08 -04001078 "region #1 is an I/O resource, aborting.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 return -EINVAL;
1080 }
1081
Jeff Garzikcabb7662006-06-27 09:25:28 -04001082 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04001083 dev_err(&pdev->dev, "region #1 is too small.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 return -EINVAL;
1085 }
1086 vptr->pdev = pdev;
1087
1088 return 0;
1089}
1090
1091/**
1092 * velocity_init_rings - set up DMA rings
1093 * @vptr: Velocity to set up
1094 *
1095 * Allocate PCI mapped DMA rings for the receive and transmit layer
1096 * to use.
1097 */
1098
1099static int velocity_init_rings(struct velocity_info *vptr)
1100{
1101 int i;
1102 unsigned int psize;
1103 unsigned int tsize;
1104 dma_addr_t pool_dma;
1105 u8 *pool;
1106
1107 /*
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001108 * Allocate all RD/TD rings a single pool
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001110
1111 psize = vptr->options.numrx * sizeof(struct rx_desc) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1113
1114 /*
1115 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1116 * alignment
1117 */
1118 pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma);
1119
1120 if (pool == NULL) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001121 printk(KERN_ERR "%s : DMA memory allocation failed.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 vptr->dev->name);
1123 return -ENOMEM;
1124 }
1125
1126 memset(pool, 0, psize);
1127
1128 vptr->rd_ring = (struct rx_desc *) pool;
1129
1130 vptr->rd_pool_dma = pool_dma;
1131
1132 tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001133 vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 &vptr->tx_bufs_dma);
1135
1136 if (vptr->tx_bufs == NULL) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001137 printk(KERN_ERR "%s: DMA memory allocation failed.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 vptr->dev->name);
1139 pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
1140 return -ENOMEM;
1141 }
1142
1143 memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq);
1144
1145 i = vptr->options.numrx * sizeof(struct rx_desc);
1146 pool += i;
1147 pool_dma += i;
1148 for (i = 0; i < vptr->num_txq; i++) {
1149 int offset = vptr->options.numtx * sizeof(struct tx_desc);
1150
1151 vptr->td_pool_dma[i] = pool_dma;
1152 vptr->td_rings[i] = (struct tx_desc *) pool;
1153 pool += offset;
1154 pool_dma += offset;
1155 }
1156 return 0;
1157}
1158
1159/**
1160 * velocity_free_rings - free PCI ring pointers
1161 * @vptr: Velocity to free from
1162 *
1163 * Clean up the PCI ring buffers allocated to this velocity.
1164 */
1165
1166static void velocity_free_rings(struct velocity_info *vptr)
1167{
1168 int size;
1169
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001170 size = vptr->options.numrx * sizeof(struct rx_desc) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1172
1173 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
1174
1175 size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
1176
1177 pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
1178}
1179
1180static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
1181{
1182 struct mac_regs __iomem *regs = vptr->mac_regs;
1183 int avail, dirty, unusable;
1184
1185 /*
1186 * RD number must be equal to 4X per hardware spec
1187 * (programming guide rev 1.20, p.13)
1188 */
1189 if (vptr->rd_filled < 4)
1190 return;
1191
1192 wmb();
1193
1194 unusable = vptr->rd_filled & 0x0003;
1195 dirty = vptr->rd_dirty - unusable;
1196 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
1197 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1198 vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
1199 }
1200
1201 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
1202 vptr->rd_filled = unusable;
1203}
1204
1205static int velocity_rx_refill(struct velocity_info *vptr)
1206{
1207 int dirty = vptr->rd_dirty, done = 0, ret = 0;
1208
1209 do {
1210 struct rx_desc *rd = vptr->rd_ring + dirty;
1211
1212 /* Fine for an all zero Rx desc at init time as well */
1213 if (rd->rdesc0.owner == OWNED_BY_NIC)
1214 break;
1215
1216 if (!vptr->rd_info[dirty].skb) {
1217 ret = velocity_alloc_rx_buf(vptr, dirty);
1218 if (ret < 0)
1219 break;
1220 }
1221 done++;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001222 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 } while (dirty != vptr->rd_curr);
1224
1225 if (done) {
1226 vptr->rd_dirty = dirty;
1227 vptr->rd_filled += done;
1228 velocity_give_many_rx_descs(vptr);
1229 }
1230
1231 return ret;
1232}
1233
1234/**
1235 * velocity_init_rd_ring - set up receive ring
1236 * @vptr: velocity to configure
1237 *
1238 * Allocate and set up the receive buffers for each ring slot and
1239 * assign them to the network adapter.
1240 */
1241
1242static int velocity_init_rd_ring(struct velocity_info *vptr)
1243{
Mariusz Kozlowskiae946072007-08-01 00:11:50 +02001244 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Mariusz Kozlowskiae946072007-08-01 00:11:50 +02001246 vptr->rd_info = kcalloc(vptr->options.numrx,
1247 sizeof(struct velocity_rd_info), GFP_KERNEL);
1248 if (!vptr->rd_info)
1249 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
1251 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;
1252
1253 ret = velocity_rx_refill(vptr);
1254 if (ret < 0) {
1255 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1256 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1257 velocity_free_rd_ring(vptr);
1258 }
Mariusz Kozlowskiae946072007-08-01 00:11:50 +02001259
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 return ret;
1261}
1262
1263/**
1264 * velocity_free_rd_ring - free receive ring
1265 * @vptr: velocity to clean up
1266 *
1267 * Free the receive buffers for each ring slot and any
1268 * attached socket buffers that need to go away.
1269 */
1270
1271static void velocity_free_rd_ring(struct velocity_info *vptr)
1272{
1273 int i;
1274
1275 if (vptr->rd_info == NULL)
1276 return;
1277
1278 for (i = 0; i < vptr->options.numrx; i++) {
1279 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
Francois Romieub3c3e7d2006-02-27 23:11:08 +01001280 struct rx_desc *rd = vptr->rd_ring + i;
1281
1282 memset(rd, 0, sizeof(*rd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
1284 if (!rd_info->skb)
1285 continue;
1286 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
1287 PCI_DMA_FROMDEVICE);
1288 rd_info->skb_dma = (dma_addr_t) NULL;
1289
1290 dev_kfree_skb(rd_info->skb);
1291 rd_info->skb = NULL;
1292 }
1293
1294 kfree(vptr->rd_info);
1295 vptr->rd_info = NULL;
1296}
1297
1298/**
1299 * velocity_init_td_ring - set up transmit ring
1300 * @vptr: velocity
1301 *
1302 * Set up the transmit ring and chain the ring pointers together.
1303 * Returns zero on success or a negative posix errno code for
1304 * failure.
1305 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001306
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307static int velocity_init_td_ring(struct velocity_info *vptr)
1308{
1309 int i, j;
1310 dma_addr_t curr;
1311 struct tx_desc *td;
1312 struct velocity_td_info *td_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 /* Init the TD ring entries */
1315 for (j = 0; j < vptr->num_txq; j++) {
1316 curr = vptr->td_pool_dma[j];
1317
Mariusz Kozlowskiae946072007-08-01 00:11:50 +02001318 vptr->td_infos[j] = kcalloc(vptr->options.numtx,
1319 sizeof(struct velocity_td_info),
1320 GFP_KERNEL);
1321 if (!vptr->td_infos[j]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 while(--j >= 0)
1323 kfree(vptr->td_infos[j]);
1324 return -ENOMEM;
1325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
1327 for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
1328 td = &(vptr->td_rings[j][i]);
1329 td_info = &(vptr->td_infos[j][i]);
1330 td_info->buf = vptr->tx_bufs +
1331 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1332 td_info->buf_dma = vptr->tx_bufs_dma +
1333 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1334 }
1335 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
1336 }
1337 return 0;
1338}
1339
1340/*
1341 * FIXME: could we merge this with velocity_free_tx_buf ?
1342 */
1343
1344static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1345 int q, int n)
1346{
1347 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);
1348 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (td_info == NULL)
1351 return;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001352
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 if (td_info->skb) {
1354 for (i = 0; i < td_info->nskb_dma; i++)
1355 {
1356 if (td_info->skb_dma[i]) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001357 pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 td_info->skb->len, PCI_DMA_TODEVICE);
1359 td_info->skb_dma[i] = (dma_addr_t) NULL;
1360 }
1361 }
1362 dev_kfree_skb(td_info->skb);
1363 td_info->skb = NULL;
1364 }
1365}
1366
1367/**
1368 * velocity_free_td_ring - free td ring
1369 * @vptr: velocity
1370 *
1371 * Free up the transmit ring for this particular velocity adapter.
1372 * We free the ring contents but not the ring itself.
1373 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001374
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375static void velocity_free_td_ring(struct velocity_info *vptr)
1376{
1377 int i, j;
1378
1379 for (j = 0; j < vptr->num_txq; j++) {
1380 if (vptr->td_infos[j] == NULL)
1381 continue;
1382 for (i = 0; i < vptr->options.numtx; i++) {
1383 velocity_free_td_ring_entry(vptr, j, i);
1384
1385 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -04001386 kfree(vptr->td_infos[j]);
1387 vptr->td_infos[j] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 }
1389}
1390
1391/**
1392 * velocity_rx_srv - service RX interrupt
1393 * @vptr: velocity
1394 * @status: adapter status (unused)
1395 *
1396 * Walk the receive ring of the velocity adapter and remove
1397 * any received packets from the receive queue. Hand the ring
1398 * slots back to the adapter for reuse.
1399 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401static int velocity_rx_srv(struct velocity_info *vptr, int status)
1402{
1403 struct net_device_stats *stats = &vptr->stats;
1404 int rd_curr = vptr->rd_curr;
1405 int works = 0;
1406
1407 do {
1408 struct rx_desc *rd = vptr->rd_ring + rd_curr;
1409
1410 if (!vptr->rd_info[rd_curr].skb)
1411 break;
1412
1413 if (rd->rdesc0.owner == OWNED_BY_NIC)
1414 break;
1415
1416 rmb();
1417
1418 /*
1419 * Don't drop CE or RL error frame although RXOK is off
1420 */
1421 if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
1422 if (velocity_receive_frame(vptr, rd_curr) < 0)
1423 stats->rx_dropped++;
1424 } else {
1425 if (rd->rdesc0.RSR & RSR_CRC)
1426 stats->rx_crc_errors++;
1427 if (rd->rdesc0.RSR & RSR_FAE)
1428 stats->rx_frame_errors++;
1429
1430 stats->rx_dropped++;
1431 }
1432
1433 rd->inten = 1;
1434
1435 vptr->dev->last_rx = jiffies;
1436
1437 rd_curr++;
1438 if (rd_curr >= vptr->options.numrx)
1439 rd_curr = 0;
1440 } while (++works <= 15);
1441
1442 vptr->rd_curr = rd_curr;
1443
1444 if (works > 0 && velocity_rx_refill(vptr) < 0) {
1445 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1446 "%s: rx buf allocation failure\n", vptr->dev->name);
1447 }
1448
1449 VAR_USED(stats);
1450 return works;
1451}
1452
1453/**
1454 * velocity_rx_csum - checksum process
1455 * @rd: receive packet descriptor
1456 * @skb: network layer packet buffer
1457 *
1458 * Process the status bits for the received packet and determine
1459 * if the checksum was computed and verified by the hardware
1460 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1463{
1464 skb->ip_summed = CHECKSUM_NONE;
1465
1466 if (rd->rdesc1.CSM & CSM_IPKT) {
1467 if (rd->rdesc1.CSM & CSM_IPOK) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001468 if ((rd->rdesc1.CSM & CSM_TCPKT) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 (rd->rdesc1.CSM & CSM_UDPKT)) {
1470 if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
1471 return;
1472 }
1473 }
1474 skb->ip_summed = CHECKSUM_UNNECESSARY;
1475 }
1476 }
1477}
1478
1479/**
1480 * velocity_rx_copy - in place Rx copy for small packets
1481 * @rx_skb: network layer packet buffer candidate
1482 * @pkt_size: received data size
1483 * @rd: receive packet descriptor
1484 * @dev: network device
1485 *
1486 * Replace the current skb that is scheduled for Rx processing by a
1487 * shorter, immediatly allocated skb, if the received packet is small
1488 * enough. This function returns a negative value if the received
1489 * packet is too big or if memory is exhausted.
1490 */
1491static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1492 struct velocity_info *vptr)
1493{
1494 int ret = -1;
1495
1496 if (pkt_size < rx_copybreak) {
1497 struct sk_buff *new_skb;
1498
1499 new_skb = dev_alloc_skb(pkt_size + 2);
1500 if (new_skb) {
1501 new_skb->dev = vptr->dev;
1502 new_skb->ip_summed = rx_skb[0]->ip_summed;
1503
1504 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
1505 skb_reserve(new_skb, 2);
1506
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001507 skb_copy_from_linear_data(rx_skb[0], new_skb->data,
1508 pkt_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 *rx_skb = new_skb;
1510 ret = 0;
1511 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 }
1514 return ret;
1515}
1516
1517/**
1518 * velocity_iph_realign - IP header alignment
1519 * @vptr: velocity we are handling
1520 * @skb: network layer packet buffer
1521 * @pkt_size: received data size
1522 *
1523 * Align IP header on a 2 bytes boundary. This behavior can be
1524 * configured by the user.
1525 */
1526static inline void velocity_iph_realign(struct velocity_info *vptr,
1527 struct sk_buff *skb, int pkt_size)
1528{
1529 /* FIXME - memmove ? */
1530 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
1531 int i;
1532
1533 for (i = pkt_size; i >= 0; i--)
1534 *(skb->data + i + 2) = *(skb->data + i);
1535 skb_reserve(skb, 2);
1536 }
1537}
1538
1539/**
1540 * velocity_receive_frame - received packet processor
1541 * @vptr: velocity we are handling
1542 * @idx: ring index
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001543 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 * A packet has arrived. We process the packet and if appropriate
1545 * pass the frame up the network stack
1546 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1549{
1550 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1551 struct net_device_stats *stats = &vptr->stats;
1552 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
1553 struct rx_desc *rd = &(vptr->rd_ring[idx]);
1554 int pkt_len = rd->rdesc0.len;
1555 struct sk_buff *skb;
1556
1557 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
1558 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
1559 stats->rx_length_errors++;
1560 return -EINVAL;
1561 }
1562
1563 if (rd->rdesc0.RSR & RSR_MAR)
1564 vptr->stats.multicast++;
1565
1566 skb = rd_info->skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
1568 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1569 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
1570
1571 /*
1572 * Drop frame not meeting IEEE 802.3
1573 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
1576 if (rd->rdesc0.RSR & RSR_RL) {
1577 stats->rx_length_errors++;
1578 return -EINVAL;
1579 }
1580 }
1581
1582 pci_action = pci_dma_sync_single_for_device;
1583
1584 velocity_rx_csum(rd, skb);
1585
1586 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
1587 velocity_iph_realign(vptr, skb, pkt_len);
1588 pci_action = pci_unmap_single;
1589 rd_info->skb = NULL;
1590 }
1591
1592 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
1593 PCI_DMA_FROMDEVICE);
1594
1595 skb_put(skb, pkt_len - 4);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001596 skb->protocol = eth_type_trans(skb, vptr->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597
1598 stats->rx_bytes += pkt_len;
1599 netif_rx(skb);
1600
1601 return 0;
1602}
1603
1604/**
1605 * velocity_alloc_rx_buf - allocate aligned receive buffer
1606 * @vptr: velocity
1607 * @idx: ring index
1608 *
1609 * Allocate a new full sized buffer for the reception of a frame and
1610 * map it into PCI space for the hardware to use. The hardware
1611 * requires *64* byte alignment of the buffer which makes life
1612 * less fun than would be ideal.
1613 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001614
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1616{
1617 struct rx_desc *rd = &(vptr->rd_ring[idx]);
1618 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
1619
1620 rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);
1621 if (rd_info->skb == NULL)
1622 return -ENOMEM;
1623
1624 /*
1625 * Do the gymnastics to get the buffer head for data at
1626 * 64byte alignment.
1627 */
David S. Miller689be432005-06-28 15:25:31 -07001628 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 rd_info->skb->dev = vptr->dev;
David S. Miller689be432005-06-28 15:25:31 -07001630 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 /*
1633 * Fill in the descriptor to match
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001634 */
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 *((u32 *) & (rd->rdesc0)) = 0;
1637 rd->len = cpu_to_le32(vptr->rx_buf_sz);
1638 rd->inten = 1;
1639 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1640 rd->pa_high = 0;
1641 return 0;
1642}
1643
1644/**
1645 * tx_srv - transmit interrupt service
1646 * @vptr; Velocity
1647 * @status:
1648 *
1649 * Scan the queues looking for transmitted packets that
1650 * we can complete and clean up. Update any statistics as
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +02001651 * necessary/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1655{
1656 struct tx_desc *td;
1657 int qnum;
1658 int full = 0;
1659 int idx;
1660 int works = 0;
1661 struct velocity_td_info *tdinfo;
1662 struct net_device_stats *stats = &vptr->stats;
1663
1664 for (qnum = 0; qnum < vptr->num_txq; qnum++) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001665 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 idx = (idx + 1) % vptr->options.numtx) {
1667
1668 /*
1669 * Get Tx Descriptor
1670 */
1671 td = &(vptr->td_rings[qnum][idx]);
1672 tdinfo = &(vptr->td_infos[qnum][idx]);
1673
1674 if (td->tdesc0.owner == OWNED_BY_NIC)
1675 break;
1676
1677 if ((works++ > 15))
1678 break;
1679
1680 if (td->tdesc0.TSR & TSR0_TERR) {
1681 stats->tx_errors++;
1682 stats->tx_dropped++;
1683 if (td->tdesc0.TSR & TSR0_CDH)
1684 stats->tx_heartbeat_errors++;
1685 if (td->tdesc0.TSR & TSR0_CRS)
1686 stats->tx_carrier_errors++;
1687 if (td->tdesc0.TSR & TSR0_ABT)
1688 stats->tx_aborted_errors++;
1689 if (td->tdesc0.TSR & TSR0_OWC)
1690 stats->tx_window_errors++;
1691 } else {
1692 stats->tx_packets++;
1693 stats->tx_bytes += tdinfo->skb->len;
1694 }
1695 velocity_free_tx_buf(vptr, tdinfo);
1696 vptr->td_used[qnum]--;
1697 }
1698 vptr->td_tail[qnum] = idx;
1699
1700 if (AVAIL_TD(vptr, qnum) < 1) {
1701 full = 1;
1702 }
1703 }
1704 /*
1705 * Look to see if we should kick the transmit network
1706 * layer for more work.
1707 */
1708 if (netif_queue_stopped(vptr->dev) && (full == 0)
1709 && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1710 netif_wake_queue(vptr->dev);
1711 }
1712 return works;
1713}
1714
1715/**
1716 * velocity_print_link_status - link status reporting
1717 * @vptr: velocity to report on
1718 *
1719 * Turn the link status of the velocity card into a kernel log
1720 * description of the new link state, detailing speed and duplex
1721 * status
1722 */
1723
1724static void velocity_print_link_status(struct velocity_info *vptr)
1725{
1726
1727 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1728 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1729 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
Dave Jonesb4fea612007-06-06 03:07:52 -04001730 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 if (vptr->mii_status & VELOCITY_SPEED_1000)
1733 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1734 else if (vptr->mii_status & VELOCITY_SPEED_100)
1735 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1736 else
1737 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1738
1739 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1740 VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1741 else
1742 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1743 } else {
1744 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1745 switch (vptr->options.spd_dpx) {
1746 case SPD_DPX_100_HALF:
1747 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1748 break;
1749 case SPD_DPX_100_FULL:
1750 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1751 break;
1752 case SPD_DPX_10_HALF:
1753 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1754 break;
1755 case SPD_DPX_10_FULL:
1756 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1757 break;
1758 default:
1759 break;
1760 }
1761 }
1762}
1763
1764/**
1765 * velocity_error - handle error from controller
1766 * @vptr: velocity
1767 * @status: card status
1768 *
1769 * Process an error report from the hardware and attempt to recover
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001770 * the card itself. At the moment we cannot recover from some
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 * theoretically impossible errors but this could be fixed using
1772 * the pci_device_failed logic to bounce the hardware
1773 *
1774 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776static void velocity_error(struct velocity_info *vptr, int status)
1777{
1778
1779 if (status & ISR_TXSTLI) {
1780 struct mac_regs __iomem * regs = vptr->mac_regs;
1781
Eddy L O Jansson0e6ff152007-07-31 00:38:53 -07001782 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1784 writew(TRDCSR_RUN, &regs->TDCSRClr);
1785 netif_stop_queue(vptr->dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 /* FIXME: port over the pci_device_failed code and use it
1788 here */
1789 }
1790
1791 if (status & ISR_SRCI) {
1792 struct mac_regs __iomem * regs = vptr->mac_regs;
1793 int linked;
1794
1795 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1796 vptr->mii_status = check_connection_type(regs);
1797
1798 /*
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001799 * If it is a 3119, disable frame bursting in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 * halfduplex mode and enable it in fullduplex
1801 * mode
1802 */
1803 if (vptr->rev_id < REV_ID_VT3216_A0) {
1804 if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
1805 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1806 else
1807 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1808 }
1809 /*
1810 * Only enable CD heart beat counter in 10HD mode
1811 */
1812 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) {
1813 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1814 } else {
1815 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1816 }
1817 }
1818 /*
1819 * Get link status from PHYSR0
1820 */
1821 linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1822
1823 if (linked) {
1824 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
Francois Romieu8a22ddd2006-06-23 00:47:06 +02001825 netif_carrier_on(vptr->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 } else {
1827 vptr->mii_status |= VELOCITY_LINK_FAIL;
Francois Romieu8a22ddd2006-06-23 00:47:06 +02001828 netif_carrier_off(vptr->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 }
1830
1831 velocity_print_link_status(vptr);
1832 enable_flow_control_ability(vptr);
1833
1834 /*
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001835 * Re-enable auto-polling because SRCI will disable
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 * auto-polling
1837 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001838
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 enable_mii_autopoll(regs);
1840
1841 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1842 netif_stop_queue(vptr->dev);
1843 else
1844 netif_wake_queue(vptr->dev);
1845
1846 };
1847 if (status & ISR_MIBFI)
1848 velocity_update_hw_mibs(vptr);
1849 if (status & ISR_LSTEI)
1850 mac_rx_queue_wake(vptr->mac_regs);
1851}
1852
1853/**
1854 * velocity_free_tx_buf - free transmit buffer
1855 * @vptr: velocity
1856 * @tdinfo: buffer
1857 *
1858 * Release an transmit buffer. If the buffer was preallocated then
1859 * recycle it, if not then unmap the buffer.
1860 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001861
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
1863{
1864 struct sk_buff *skb = tdinfo->skb;
1865 int i;
1866
1867 /*
1868 * Don't unmap the pre-allocated tx_bufs
1869 */
1870 if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) {
1871
1872 for (i = 0; i < tdinfo->nskb_dma; i++) {
1873#ifdef VELOCITY_ZERO_COPY_SUPPORT
1874 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
1875#else
1876 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
1877#endif
1878 tdinfo->skb_dma[i] = 0;
1879 }
1880 }
1881 dev_kfree_skb_irq(skb);
1882 tdinfo->skb = NULL;
1883}
1884
1885/**
1886 * velocity_open - interface activation callback
1887 * @dev: network layer device to open
1888 *
1889 * Called when the network layer brings the interface up. Returns
1890 * a negative posix error code on failure, or zero on success.
1891 *
1892 * All the ring allocation and set up is done on open for this
1893 * adapter to minimise memory usage when inactive
1894 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001895
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896static int velocity_open(struct net_device *dev)
1897{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04001898 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 int ret;
1900
1901 vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
1902
1903 ret = velocity_init_rings(vptr);
1904 if (ret < 0)
1905 goto out;
1906
1907 ret = velocity_init_rd_ring(vptr);
1908 if (ret < 0)
1909 goto err_free_desc_rings;
1910
1911 ret = velocity_init_td_ring(vptr);
1912 if (ret < 0)
1913 goto err_free_rd_ring;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001914
1915 /* Ensure chip is running */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 pci_set_power_state(vptr->pdev, PCI_D0);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1919
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001920 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 dev->name, dev);
1922 if (ret < 0) {
1923 /* Power down the chip */
1924 pci_set_power_state(vptr->pdev, PCI_D3hot);
1925 goto err_free_td_ring;
1926 }
1927
1928 mac_enable_int(vptr->mac_regs);
1929 netif_start_queue(dev);
1930 vptr->flags |= VELOCITY_FLAGS_OPENED;
1931out:
1932 return ret;
1933
1934err_free_td_ring:
1935 velocity_free_td_ring(vptr);
1936err_free_rd_ring:
1937 velocity_free_rd_ring(vptr);
1938err_free_desc_rings:
1939 velocity_free_rings(vptr);
1940 goto out;
1941}
1942
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001943/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 * velocity_change_mtu - MTU change callback
1945 * @dev: network device
1946 * @new_mtu: desired MTU
1947 *
1948 * Handle requests from the networking layer for MTU change on
1949 * this interface. It gets called on a change by the network layer.
1950 * Return zero for success or negative posix error code.
1951 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001952
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1954{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04001955 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 unsigned long flags;
1957 int oldmtu = dev->mtu;
1958 int ret = 0;
1959
1960 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001961 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 vptr->dev->name);
1963 return -EINVAL;
1964 }
1965
Stephen Hemmingerbd7b3f32007-11-14 19:47:27 -08001966 if (!netif_running(dev)) {
1967 dev->mtu = new_mtu;
1968 return 0;
1969 }
1970
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 if (new_mtu != oldmtu) {
1972 spin_lock_irqsave(&vptr->lock, flags);
1973
1974 netif_stop_queue(dev);
1975 velocity_shutdown(vptr);
1976
1977 velocity_free_td_ring(vptr);
1978 velocity_free_rd_ring(vptr);
1979
1980 dev->mtu = new_mtu;
1981 if (new_mtu > 8192)
1982 vptr->rx_buf_sz = 9 * 1024;
1983 else if (new_mtu > 4096)
1984 vptr->rx_buf_sz = 8192;
1985 else
1986 vptr->rx_buf_sz = 4 * 1024;
1987
1988 ret = velocity_init_rd_ring(vptr);
1989 if (ret < 0)
1990 goto out_unlock;
1991
1992 ret = velocity_init_td_ring(vptr);
1993 if (ret < 0)
1994 goto out_unlock;
1995
1996 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1997
1998 mac_enable_int(vptr->mac_regs);
1999 netif_start_queue(dev);
2000out_unlock:
2001 spin_unlock_irqrestore(&vptr->lock, flags);
2002 }
2003
2004 return ret;
2005}
2006
2007/**
2008 * velocity_shutdown - shut down the chip
2009 * @vptr: velocity to deactivate
2010 *
2011 * Shuts down the internal operations of the velocity and
2012 * disables interrupts, autopolling, transmit and receive
2013 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002014
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015static void velocity_shutdown(struct velocity_info *vptr)
2016{
2017 struct mac_regs __iomem * regs = vptr->mac_regs;
2018 mac_disable_int(regs);
2019 writel(CR0_STOP, &regs->CR0Set);
2020 writew(0xFFFF, &regs->TDCSRClr);
2021 writeb(0xFF, &regs->RDCSRClr);
2022 safe_disable_mii_autopoll(regs);
2023 mac_clear_isr(regs);
2024}
2025
2026/**
2027 * velocity_close - close adapter callback
2028 * @dev: network device
2029 *
2030 * Callback from the network layer when the velocity is being
2031 * deactivated by the network layer
2032 */
2033
2034static int velocity_close(struct net_device *dev)
2035{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04002036 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
2038 netif_stop_queue(dev);
2039 velocity_shutdown(vptr);
2040
2041 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2042 velocity_get_ip(vptr);
2043 if (dev->irq != 0)
2044 free_irq(dev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002045
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 /* Power down the chip */
2047 pci_set_power_state(vptr->pdev, PCI_D3hot);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 /* Free the resources */
2050 velocity_free_td_ring(vptr);
2051 velocity_free_rd_ring(vptr);
2052 velocity_free_rings(vptr);
2053
2054 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2055 return 0;
2056}
2057
2058/**
2059 * velocity_xmit - transmit packet callback
2060 * @skb: buffer to transmit
2061 * @dev: network device
2062 *
2063 * Called by the networ layer to request a packet is queued to
2064 * the velocity. Returns zero on success.
2065 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2068{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04002069 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 int qnum = 0;
2071 struct tx_desc *td_ptr;
2072 struct velocity_td_info *tdinfo;
2073 unsigned long flags;
2074 int index;
2075
2076 int pktlen = skb->len;
2077
Herbert Xu364c6ba2006-06-09 16:10:40 -07002078#ifdef VELOCITY_ZERO_COPY_SUPPORT
2079 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2080 kfree_skb(skb);
2081 return 0;
2082 }
2083#endif
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 spin_lock_irqsave(&vptr->lock, flags);
2086
2087 index = vptr->td_curr[qnum];
2088 td_ptr = &(vptr->td_rings[qnum][index]);
2089 tdinfo = &(vptr->td_infos[qnum][index]);
2090
2091 td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
2092 td_ptr->tdesc1.TCR = TCR0_TIC;
2093 td_ptr->td_buf[0].queue = 0;
2094
2095 /*
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002096 * Pad short frames.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 */
2098 if (pktlen < ETH_ZLEN) {
2099 /* Cannot occur until ZC support */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 pktlen = ETH_ZLEN;
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002101 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
2103 tdinfo->skb = skb;
2104 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2105 td_ptr->tdesc0.pktsize = pktlen;
2106 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2107 td_ptr->td_buf[0].pa_high = 0;
2108 td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
2109 tdinfo->nskb_dma = 1;
2110 td_ptr->tdesc1.CMDZ = 2;
2111 } else
2112#ifdef VELOCITY_ZERO_COPY_SUPPORT
2113 if (skb_shinfo(skb)->nr_frags > 0) {
2114 int nfrags = skb_shinfo(skb)->nr_frags;
2115 tdinfo->skb = skb;
2116 if (nfrags > 6) {
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002117 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 tdinfo->skb_dma[0] = tdinfo->buf_dma;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002119 td_ptr->tdesc0.pktsize =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2121 td_ptr->td_buf[0].pa_high = 0;
2122 td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
2123 tdinfo->nskb_dma = 1;
2124 td_ptr->tdesc1.CMDZ = 2;
2125 } else {
2126 int i = 0;
2127 tdinfo->nskb_dma = 0;
2128 tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
2129
2130 td_ptr->tdesc0.pktsize = pktlen;
2131
2132 /* FIXME: support 48bit DMA later */
2133 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2134 td_ptr->td_buf[i].pa_high = 0;
2135 td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
2136
2137 for (i = 0; i < nfrags; i++) {
2138 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2139 void *addr = ((void *) page_address(frag->page + frag->page_offset));
2140
2141 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2142
2143 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2144 td_ptr->td_buf[i + 1].pa_high = 0;
2145 td_ptr->td_buf[i + 1].bufsize = frag->size;
2146 }
2147 tdinfo->nskb_dma = i - 1;
2148 td_ptr->tdesc1.CMDZ = i;
2149 }
2150
2151 } else
2152#endif
2153 {
2154 /*
2155 * Map the linear network buffer into PCI space and
2156 * add it to the transmit ring.
2157 */
2158 tdinfo->skb = skb;
2159 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2160 td_ptr->tdesc0.pktsize = pktlen;
2161 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2162 td_ptr->td_buf[0].pa_high = 0;
2163 td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
2164 tdinfo->nskb_dma = 1;
2165 td_ptr->tdesc1.CMDZ = 2;
2166 }
2167
Stephen Hemminger501e4d22007-08-24 13:56:49 -07002168 if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
2169 td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 td_ptr->tdesc1.pqinf.priority = 0;
2171 td_ptr->tdesc1.pqinf.CFI = 0;
2172 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2173 }
2174
2175 /*
2176 * Handle hardware checksum
2177 */
2178 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -07002179 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002180 const struct iphdr *ip = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 if (ip->protocol == IPPROTO_TCP)
2182 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2183 else if (ip->protocol == IPPROTO_UDP)
2184 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2185 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2186 }
2187 {
2188
2189 int prev = index - 1;
2190
2191 if (prev < 0)
2192 prev = vptr->options.numtx - 1;
2193 td_ptr->tdesc0.owner = OWNED_BY_NIC;
2194 vptr->td_used[qnum]++;
2195 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
2196
2197 if (AVAIL_TD(vptr, qnum) < 1)
2198 netif_stop_queue(dev);
2199
2200 td_ptr = &(vptr->td_rings[qnum][prev]);
2201 td_ptr->td_buf[0].queue = 1;
2202 mac_tx_queue_wake(vptr->mac_regs, qnum);
2203 }
2204 dev->trans_start = jiffies;
2205 spin_unlock_irqrestore(&vptr->lock, flags);
2206 return 0;
2207}
2208
2209/**
2210 * velocity_intr - interrupt callback
2211 * @irq: interrupt number
2212 * @dev_instance: interrupting device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 *
2214 * Called whenever an interrupt is generated by the velocity
2215 * adapter IRQ line. We may not be the source of the interrupt
2216 * and need to identify initially if we are, and if not exit as
2217 * efficiently as possible.
2218 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002219
David Howells7d12e782006-10-05 14:55:46 +01002220static int velocity_intr(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
2222 struct net_device *dev = dev_instance;
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04002223 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 u32 isr_status;
2225 int max_count = 0;
2226
2227
2228 spin_lock(&vptr->lock);
2229 isr_status = mac_read_isr(vptr->mac_regs);
2230
2231 /* Not us ? */
2232 if (isr_status == 0) {
2233 spin_unlock(&vptr->lock);
2234 return IRQ_NONE;
2235 }
2236
2237 mac_disable_int(vptr->mac_regs);
2238
2239 /*
2240 * Keep processing the ISR until we have completed
2241 * processing and the isr_status becomes zero
2242 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 while (isr_status != 0) {
2245 mac_write_isr(vptr->mac_regs, isr_status);
2246 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2247 velocity_error(vptr, isr_status);
2248 if (isr_status & (ISR_PRXI | ISR_PPRXI))
2249 max_count += velocity_rx_srv(vptr, isr_status);
2250 if (isr_status & (ISR_PTXI | ISR_PPTXI))
2251 max_count += velocity_tx_srv(vptr, isr_status);
2252 isr_status = mac_read_isr(vptr->mac_regs);
2253 if (max_count > vptr->options.int_works)
2254 {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002255 printk(KERN_WARNING "%s: excessive work at interrupt.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 dev->name);
2257 max_count = 0;
2258 }
2259 }
2260 spin_unlock(&vptr->lock);
2261 mac_enable_int(vptr->mac_regs);
2262 return IRQ_HANDLED;
2263
2264}
2265
2266
2267/**
2268 * velocity_set_multi - filter list change callback
2269 * @dev: network device
2270 *
2271 * Called by the network layer when the filter lists need to change
2272 * for a velocity adapter. Reload the CAMs with the new address
2273 * filter ruleset.
2274 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276static void velocity_set_multi(struct net_device *dev)
2277{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04002278 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 struct mac_regs __iomem * regs = vptr->mac_regs;
2280 u8 rx_mode;
2281 int i;
2282 struct dev_mc_list *mclist;
2283
2284 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 writel(0xffffffff, &regs->MARCAM[0]);
2286 writel(0xffffffff, &regs->MARCAM[4]);
2287 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
2288 } else if ((dev->mc_count > vptr->multicast_limit)
2289 || (dev->flags & IFF_ALLMULTI)) {
2290 writel(0xffffffff, &regs->MARCAM[0]);
2291 writel(0xffffffff, &regs->MARCAM[4]);
2292 rx_mode = (RCR_AM | RCR_AB);
2293 } else {
2294 int offset = MCAM_SIZE - vptr->multicast_limit;
Stephen Hemminger01faccb2007-08-24 14:40:45 -07002295 mac_get_cam_mask(regs, vptr->mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
2297 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
Stephen Hemminger01faccb2007-08-24 14:40:45 -07002298 mac_set_cam(regs, i + offset, mclist->dmi_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
2300 }
2301
Stephen Hemminger01faccb2007-08-24 14:40:45 -07002302 mac_set_cam_mask(regs, vptr->mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 rx_mode = (RCR_AM | RCR_AB);
2304 }
2305 if (dev->mtu > 1500)
2306 rx_mode |= RCR_AL;
2307
2308 BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
2309
2310}
2311
2312/**
2313 * velocity_get_status - statistics callback
2314 * @dev: network device
2315 *
2316 * Callback from the network layer to allow driver statistics
2317 * to be resynchronized with hardware collected state. In the
2318 * case of the velocity we need to pull the MIB counters from
2319 * the hardware into the counters before letting the network
2320 * layer display them.
2321 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2324{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04002325 struct velocity_info *vptr = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002326
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 /* If the hardware is down, don't touch MII */
2328 if(!netif_running(dev))
2329 return &vptr->stats;
2330
2331 spin_lock_irq(&vptr->lock);
2332 velocity_update_hw_mibs(vptr);
2333 spin_unlock_irq(&vptr->lock);
2334
2335 vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2336 vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2337 vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2338
2339// unsigned long rx_dropped; /* no space in linux buffers */
2340 vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2341 /* detailed rx_errors: */
2342// unsigned long rx_length_errors;
2343// unsigned long rx_over_errors; /* receiver ring buff overflow */
2344 vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2345// unsigned long rx_frame_errors; /* recv'd frame alignment error */
2346// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
2347// unsigned long rx_missed_errors; /* receiver missed packet */
2348
2349 /* detailed tx_errors */
2350// unsigned long tx_fifo_errors;
2351
2352 return &vptr->stats;
2353}
2354
2355
2356/**
2357 * velocity_ioctl - ioctl entry point
2358 * @dev: network device
2359 * @rq: interface request ioctl
2360 * @cmd: command code
2361 *
2362 * Called when the user issues an ioctl request to the network
2363 * device in question. The velocity interface supports MII.
2364 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2367{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04002368 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 int ret;
2370
2371 /* If we are asked for information and the device is power
2372 saving then we need to bring the device back up to talk to it */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 if (!netif_running(dev))
2375 pci_set_power_state(vptr->pdev, PCI_D0);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002376
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 switch (cmd) {
2378 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2379 case SIOCGMIIREG: /* Read MII PHY register. */
2380 case SIOCSMIIREG: /* Write to MII PHY register. */
2381 ret = velocity_mii_ioctl(dev, rq, cmd);
2382 break;
2383
2384 default:
2385 ret = -EOPNOTSUPP;
2386 }
2387 if (!netif_running(dev))
2388 pci_set_power_state(vptr->pdev, PCI_D3hot);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002389
2390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 return ret;
2392}
2393
2394/*
2395 * Definition for our device driver. The PCI layer interface
2396 * uses this to handle all our card discover and plugging
2397 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002398
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399static struct pci_driver velocity_driver = {
2400 .name = VELOCITY_NAME,
2401 .id_table = velocity_id_table,
2402 .probe = velocity_found1,
2403 .remove = __devexit_p(velocity_remove1),
2404#ifdef CONFIG_PM
2405 .suspend = velocity_suspend,
2406 .resume = velocity_resume,
2407#endif
2408};
2409
2410/**
2411 * velocity_init_module - load time function
2412 *
2413 * Called when the velocity module is loaded. The PCI driver
2414 * is registered with the PCI layer, and in turn will call
2415 * the probe functions for each velocity adapter installed
2416 * in the system.
2417 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002418
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419static int __init velocity_init_module(void)
2420{
2421 int ret;
2422
2423 velocity_register_notifier();
Jeff Garzik29917622006-08-19 17:48:59 -04002424 ret = pci_register_driver(&velocity_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 if (ret < 0)
2426 velocity_unregister_notifier();
2427 return ret;
2428}
2429
2430/**
2431 * velocity_cleanup - module unload
2432 *
2433 * When the velocity hardware is unloaded this function is called.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002434 * It will clean up the notifiers and the unregister the PCI
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 * driver interface for this hardware. This in turn cleans up
2436 * all discovered interfaces before returning from the function
2437 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002438
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439static void __exit velocity_cleanup_module(void)
2440{
2441 velocity_unregister_notifier();
2442 pci_unregister_driver(&velocity_driver);
2443}
2444
2445module_init(velocity_init_module);
2446module_exit(velocity_cleanup_module);
2447
2448
2449/*
2450 * MII access , media link mode setting functions
2451 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002452
2453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454/**
2455 * mii_init - set up MII
2456 * @vptr: velocity adapter
2457 * @mii_status: links tatus
2458 *
2459 * Set up the PHY for the current link state.
2460 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462static void mii_init(struct velocity_info *vptr, u32 mii_status)
2463{
2464 u16 BMCR;
2465
2466 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
2467 case PHYID_CICADA_CS8201:
2468 /*
2469 * Reset to hardware default
2470 */
2471 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2472 /*
2473 * Turn on ECHODIS bit in NWay-forced full mode and turn it
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002474 * off it in NWay-forced half mode for NWay-forced v.s.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 * legacy-forced issue.
2476 */
2477 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2478 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2479 else
2480 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2481 /*
2482 * Turn on Link/Activity LED enable bit for CIS8201
2483 */
2484 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
2485 break;
2486 case PHYID_VT3216_32BIT:
2487 case PHYID_VT3216_64BIT:
2488 /*
2489 * Reset to hardware default
2490 */
2491 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2492 /*
2493 * Turn on ECHODIS bit in NWay-forced full mode and turn it
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002494 * off it in NWay-forced half mode for NWay-forced v.s.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 * legacy-forced issue
2496 */
2497 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2498 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2499 else
2500 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2501 break;
2502
2503 case PHYID_MARVELL_1000:
2504 case PHYID_MARVELL_1000S:
2505 /*
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002506 * Assert CRS on Transmit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 */
2508 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
2509 /*
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002510 * Reset to hardware default
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 */
2512 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2513 break;
2514 default:
2515 ;
2516 }
2517 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
2518 if (BMCR & BMCR_ISO) {
2519 BMCR &= ~BMCR_ISO;
2520 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
2521 }
2522}
2523
2524/**
2525 * safe_disable_mii_autopoll - autopoll off
2526 * @regs: velocity registers
2527 *
2528 * Turn off the autopoll and wait for it to disable on the chip
2529 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531static void safe_disable_mii_autopoll(struct mac_regs __iomem * regs)
2532{
2533 u16 ww;
2534
2535 /* turn off MAUTO */
2536 writeb(0, &regs->MIICR);
2537 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2538 udelay(1);
2539 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2540 break;
2541 }
2542}
2543
2544/**
2545 * enable_mii_autopoll - turn on autopolling
2546 * @regs: velocity registers
2547 *
2548 * Enable the MII link status autopoll feature on the Velocity
2549 * hardware. Wait for it to enable.
2550 */
2551
2552static void enable_mii_autopoll(struct mac_regs __iomem * regs)
2553{
2554 int ii;
2555
2556 writeb(0, &(regs->MIICR));
2557 writeb(MIIADR_SWMPL, &regs->MIIADR);
2558
2559 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
2560 udelay(1);
2561 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2562 break;
2563 }
2564
2565 writeb(MIICR_MAUTO, &regs->MIICR);
2566
2567 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
2568 udelay(1);
2569 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2570 break;
2571 }
2572
2573}
2574
2575/**
2576 * velocity_mii_read - read MII data
2577 * @regs: velocity registers
2578 * @index: MII register index
2579 * @data: buffer for received data
2580 *
2581 * Perform a single read of an MII 16bit register. Returns zero
2582 * on success or -ETIMEDOUT if the PHY did not respond.
2583 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002584
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
2586{
2587 u16 ww;
2588
2589 /*
2590 * Disable MIICR_MAUTO, so that mii addr can be set normally
2591 */
2592 safe_disable_mii_autopoll(regs);
2593
2594 writeb(index, &regs->MIIADR);
2595
2596 BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
2597
2598 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2599 if (!(readb(&regs->MIICR) & MIICR_RCMD))
2600 break;
2601 }
2602
2603 *data = readw(&regs->MIIDATA);
2604
2605 enable_mii_autopoll(regs);
2606 if (ww == W_MAX_TIMEOUT)
2607 return -ETIMEDOUT;
2608 return 0;
2609}
2610
2611/**
2612 * velocity_mii_write - write MII data
2613 * @regs: velocity registers
2614 * @index: MII register index
2615 * @data: 16bit data for the MII register
2616 *
2617 * Perform a single write to an MII 16bit register. Returns zero
2618 * on success or -ETIMEDOUT if the PHY did not respond.
2619 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
2622{
2623 u16 ww;
2624
2625 /*
2626 * Disable MIICR_MAUTO, so that mii addr can be set normally
2627 */
2628 safe_disable_mii_autopoll(regs);
2629
2630 /* MII reg offset */
2631 writeb(mii_addr, &regs->MIIADR);
2632 /* set MII data */
2633 writew(data, &regs->MIIDATA);
2634
2635 /* turn on MIICR_WCMD */
2636 BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
2637
2638 /* W_MAX_TIMEOUT is the timeout period */
2639 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2640 udelay(5);
2641 if (!(readb(&regs->MIICR) & MIICR_WCMD))
2642 break;
2643 }
2644 enable_mii_autopoll(regs);
2645
2646 if (ww == W_MAX_TIMEOUT)
2647 return -ETIMEDOUT;
2648 return 0;
2649}
2650
2651/**
2652 * velocity_get_opt_media_mode - get media selection
2653 * @vptr: velocity adapter
2654 *
2655 * Get the media mode stored in EEPROM or module options and load
2656 * mii_status accordingly. The requested link state information
2657 * is also returned.
2658 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002659
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
2661{
2662 u32 status = 0;
2663
2664 switch (vptr->options.spd_dpx) {
2665 case SPD_DPX_AUTO:
2666 status = VELOCITY_AUTONEG_ENABLE;
2667 break;
2668 case SPD_DPX_100_FULL:
2669 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
2670 break;
2671 case SPD_DPX_10_FULL:
2672 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
2673 break;
2674 case SPD_DPX_100_HALF:
2675 status = VELOCITY_SPEED_100;
2676 break;
2677 case SPD_DPX_10_HALF:
2678 status = VELOCITY_SPEED_10;
2679 break;
2680 }
2681 vptr->mii_status = status;
2682 return status;
2683}
2684
2685/**
2686 * mii_set_auto_on - autonegotiate on
2687 * @vptr: velocity
2688 *
2689 * Enable autonegotation on this interface
2690 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002691
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692static void mii_set_auto_on(struct velocity_info *vptr)
2693{
2694 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
2695 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
2696 else
2697 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2698}
2699
2700
2701/*
2702static void mii_set_auto_off(struct velocity_info * vptr)
2703{
2704 MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2705}
2706*/
2707
2708/**
2709 * set_mii_flow_control - flow control setup
2710 * @vptr: velocity interface
2711 *
2712 * Set up the flow control on this interface according to
2713 * the supplied user/eeprom options.
2714 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716static void set_mii_flow_control(struct velocity_info *vptr)
2717{
2718 /*Enable or Disable PAUSE in ANAR */
2719 switch (vptr->options.flow_cntl) {
2720 case FLOW_CNTL_TX:
2721 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2722 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2723 break;
2724
2725 case FLOW_CNTL_RX:
2726 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2727 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2728 break;
2729
2730 case FLOW_CNTL_TX_RX:
2731 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2732 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2733 break;
2734
2735 case FLOW_CNTL_DISABLE:
2736 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2737 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2738 break;
2739 default:
2740 break;
2741 }
2742}
2743
2744/**
2745 * velocity_set_media_mode - set media mode
2746 * @mii_status: old MII link state
2747 *
2748 * Check the media link state and configure the flow control
2749 * PHY and also velocity hardware setup accordingly. In particular
2750 * we need to set up CD polling and frame bursting.
2751 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002752
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
2754{
2755 u32 curr_status;
2756 struct mac_regs __iomem * regs = vptr->mac_regs;
2757
2758 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
2759 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
2760
2761 /* Set mii link status */
2762 set_mii_flow_control(vptr);
2763
2764 /*
2765 Check if new status is consisent with current status
2766 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
2767 || (mii_status==curr_status)) {
2768 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
2769 vptr->mii_status=check_connection_type(vptr->mac_regs);
2770 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
2771 return 0;
2772 }
2773 */
2774
2775 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) {
2776 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
2777 }
2778
2779 /*
2780 * If connection type is AUTO
2781 */
2782 if (mii_status & VELOCITY_AUTONEG_ENABLE) {
2783 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
2784 /* clear force MAC mode bit */
2785 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
2786 /* set duplex mode of MAC according to duplex mode of MII */
2787 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
2788 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2789 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
2790
2791 /* enable AUTO-NEGO mode */
2792 mii_set_auto_on(vptr);
2793 } else {
2794 u16 ANAR;
2795 u8 CHIPGCR;
2796
2797 /*
2798 * 1. if it's 3119, disable frame bursting in halfduplex mode
2799 * and enable it in fullduplex mode
2800 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
2801 * 3. only enable CD heart beat counter in 10HD mode
2802 */
2803
2804 /* set force MAC mode bit */
2805 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2806
2807 CHIPGCR = readb(&regs->CHIPGCR);
2808 CHIPGCR &= ~CHIPGCR_FCGMII;
2809
2810 if (mii_status & VELOCITY_DUPLEX_FULL) {
2811 CHIPGCR |= CHIPGCR_FCFDX;
2812 writeb(CHIPGCR, &regs->CHIPGCR);
2813 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
2814 if (vptr->rev_id < REV_ID_VT3216_A0)
2815 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
2816 } else {
2817 CHIPGCR &= ~CHIPGCR_FCFDX;
2818 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
2819 writeb(CHIPGCR, &regs->CHIPGCR);
2820 if (vptr->rev_id < REV_ID_VT3216_A0)
2821 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
2822 }
2823
2824 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2825
2826 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) {
2827 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
2828 } else {
2829 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
2830 }
2831 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
2832 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
2833 ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
2834 if (mii_status & VELOCITY_SPEED_100) {
2835 if (mii_status & VELOCITY_DUPLEX_FULL)
2836 ANAR |= ANAR_TXFD;
2837 else
2838 ANAR |= ANAR_TX;
2839 } else {
2840 if (mii_status & VELOCITY_DUPLEX_FULL)
2841 ANAR |= ANAR_10FD;
2842 else
2843 ANAR |= ANAR_10;
2844 }
2845 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
2846 /* enable AUTO-NEGO mode */
2847 mii_set_auto_on(vptr);
2848 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
2849 }
2850 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
2851 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
2852 return VELOCITY_LINK_CHANGE;
2853}
2854
2855/**
2856 * mii_check_media_mode - check media state
2857 * @regs: velocity registers
2858 *
2859 * Check the current MII status and determine the link status
2860 * accordingly
2861 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002862
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863static u32 mii_check_media_mode(struct mac_regs __iomem * regs)
2864{
2865 u32 status = 0;
2866 u16 ANAR;
2867
2868 if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
2869 status |= VELOCITY_LINK_FAIL;
2870
2871 if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
2872 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
2873 else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
2874 status |= (VELOCITY_SPEED_1000);
2875 else {
2876 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2877 if (ANAR & ANAR_TXFD)
2878 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
2879 else if (ANAR & ANAR_TX)
2880 status |= VELOCITY_SPEED_100;
2881 else if (ANAR & ANAR_10FD)
2882 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
2883 else
2884 status |= (VELOCITY_SPEED_10);
2885 }
2886
2887 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
2888 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2889 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
2890 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
2891 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
2892 status |= VELOCITY_AUTONEG_ENABLE;
2893 }
2894 }
2895
2896 return status;
2897}
2898
2899static u32 check_connection_type(struct mac_regs __iomem * regs)
2900{
2901 u32 status = 0;
2902 u8 PHYSR0;
2903 u16 ANAR;
2904 PHYSR0 = readb(&regs->PHYSR0);
2905
2906 /*
2907 if (!(PHYSR0 & PHYSR0_LINKGD))
2908 status|=VELOCITY_LINK_FAIL;
2909 */
2910
2911 if (PHYSR0 & PHYSR0_FDPX)
2912 status |= VELOCITY_DUPLEX_FULL;
2913
2914 if (PHYSR0 & PHYSR0_SPDG)
2915 status |= VELOCITY_SPEED_1000;
Jay Cliburn59b693f2006-07-20 23:23:57 +02002916 else if (PHYSR0 & PHYSR0_SPD10)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 status |= VELOCITY_SPEED_10;
2918 else
2919 status |= VELOCITY_SPEED_100;
2920
2921 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
2922 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2923 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
2924 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
2925 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
2926 status |= VELOCITY_AUTONEG_ENABLE;
2927 }
2928 }
2929
2930 return status;
2931}
2932
2933/**
2934 * enable_flow_control_ability - flow control
2935 * @vptr: veloity to configure
2936 *
2937 * Set up flow control according to the flow control options
2938 * determined by the eeprom/configuration.
2939 */
2940
2941static void enable_flow_control_ability(struct velocity_info *vptr)
2942{
2943
2944 struct mac_regs __iomem * regs = vptr->mac_regs;
2945
2946 switch (vptr->options.flow_cntl) {
2947
2948 case FLOW_CNTL_DEFAULT:
2949 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
2950 writel(CR0_FDXRFCEN, &regs->CR0Set);
2951 else
2952 writel(CR0_FDXRFCEN, &regs->CR0Clr);
2953
2954 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
2955 writel(CR0_FDXTFCEN, &regs->CR0Set);
2956 else
2957 writel(CR0_FDXTFCEN, &regs->CR0Clr);
2958 break;
2959
2960 case FLOW_CNTL_TX:
2961 writel(CR0_FDXTFCEN, &regs->CR0Set);
2962 writel(CR0_FDXRFCEN, &regs->CR0Clr);
2963 break;
2964
2965 case FLOW_CNTL_RX:
2966 writel(CR0_FDXRFCEN, &regs->CR0Set);
2967 writel(CR0_FDXTFCEN, &regs->CR0Clr);
2968 break;
2969
2970 case FLOW_CNTL_TX_RX:
2971 writel(CR0_FDXTFCEN, &regs->CR0Set);
2972 writel(CR0_FDXRFCEN, &regs->CR0Set);
2973 break;
2974
2975 case FLOW_CNTL_DISABLE:
2976 writel(CR0_FDXRFCEN, &regs->CR0Clr);
2977 writel(CR0_FDXTFCEN, &regs->CR0Clr);
2978 break;
2979
2980 default:
2981 break;
2982 }
2983
2984}
2985
2986
2987/**
2988 * velocity_ethtool_up - pre hook for ethtool
2989 * @dev: network device
2990 *
2991 * Called before an ethtool operation. We need to make sure the
2992 * chip is out of D3 state before we poke at it.
2993 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002994
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995static int velocity_ethtool_up(struct net_device *dev)
2996{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04002997 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 if (!netif_running(dev))
2999 pci_set_power_state(vptr->pdev, PCI_D0);
3000 return 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003001}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002
3003/**
3004 * velocity_ethtool_down - post hook for ethtool
3005 * @dev: network device
3006 *
3007 * Called after an ethtool operation. Restore the chip back to D3
3008 * state if it isn't running.
3009 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003010
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011static void velocity_ethtool_down(struct net_device *dev)
3012{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003013 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 if (!netif_running(dev))
3015 pci_set_power_state(vptr->pdev, PCI_D3hot);
3016}
3017
3018static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3019{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003020 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 struct mac_regs __iomem * regs = vptr->mac_regs;
3022 u32 status;
3023 status = check_connection_type(vptr->mac_regs);
3024
Jay Cliburn59b693f2006-07-20 23:23:57 +02003025 cmd->supported = SUPPORTED_TP |
3026 SUPPORTED_Autoneg |
3027 SUPPORTED_10baseT_Half |
3028 SUPPORTED_10baseT_Full |
3029 SUPPORTED_100baseT_Half |
3030 SUPPORTED_100baseT_Full |
3031 SUPPORTED_1000baseT_Half |
3032 SUPPORTED_1000baseT_Full;
3033 if (status & VELOCITY_SPEED_1000)
3034 cmd->speed = SPEED_1000;
3035 else if (status & VELOCITY_SPEED_100)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 cmd->speed = SPEED_100;
3037 else
3038 cmd->speed = SPEED_10;
3039 cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3040 cmd->port = PORT_TP;
3041 cmd->transceiver = XCVR_INTERNAL;
3042 cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3043
3044 if (status & VELOCITY_DUPLEX_FULL)
3045 cmd->duplex = DUPLEX_FULL;
3046 else
3047 cmd->duplex = DUPLEX_HALF;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003048
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 return 0;
3050}
3051
3052static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3053{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003054 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 u32 curr_status;
3056 u32 new_status = 0;
3057 int ret = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003058
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 curr_status = check_connection_type(vptr->mac_regs);
3060 curr_status &= (~VELOCITY_LINK_FAIL);
3061
3062 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3063 new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3064 new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3065 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3066
3067 if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
3068 ret = -EINVAL;
3069 else
3070 velocity_set_media_mode(vptr, new_status);
3071
3072 return ret;
3073}
3074
3075static u32 velocity_get_link(struct net_device *dev)
3076{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003077 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 struct mac_regs __iomem * regs = vptr->mac_regs;
Jay Cliburn59b693f2006-07-20 23:23:57 +02003079 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080}
3081
3082static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3083{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003084 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 strcpy(info->driver, VELOCITY_NAME);
3086 strcpy(info->version, VELOCITY_VERSION);
3087 strcpy(info->bus_info, pci_name(vptr->pdev));
3088}
3089
3090static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3091{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003092 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3094 wol->wolopts |= WAKE_MAGIC;
3095 /*
3096 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3097 wol.wolopts|=WAKE_PHY;
3098 */
3099 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3100 wol->wolopts |= WAKE_UCAST;
3101 if (vptr->wol_opts & VELOCITY_WOL_ARP)
3102 wol->wolopts |= WAKE_ARP;
3103 memcpy(&wol->sopass, vptr->wol_passwd, 6);
3104}
3105
3106static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3107{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003108 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109
3110 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3111 return -EFAULT;
3112 vptr->wol_opts = VELOCITY_WOL_MAGIC;
3113
3114 /*
3115 if (wol.wolopts & WAKE_PHY) {
3116 vptr->wol_opts|=VELOCITY_WOL_PHY;
3117 vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3118 }
3119 */
3120
3121 if (wol->wolopts & WAKE_MAGIC) {
3122 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3123 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3124 }
3125 if (wol->wolopts & WAKE_UCAST) {
3126 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3127 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3128 }
3129 if (wol->wolopts & WAKE_ARP) {
3130 vptr->wol_opts |= VELOCITY_WOL_ARP;
3131 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3132 }
3133 memcpy(vptr->wol_passwd, wol->sopass, 6);
3134 return 0;
3135}
3136
3137static u32 velocity_get_msglevel(struct net_device *dev)
3138{
3139 return msglevel;
3140}
3141
3142static void velocity_set_msglevel(struct net_device *dev, u32 value)
3143{
3144 msglevel = value;
3145}
3146
Jeff Garzik7282d492006-09-13 14:30:00 -04003147static const struct ethtool_ops velocity_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 .get_settings = velocity_get_settings,
3149 .set_settings = velocity_set_settings,
3150 .get_drvinfo = velocity_get_drvinfo,
3151 .get_wol = velocity_ethtool_get_wol,
3152 .set_wol = velocity_ethtool_set_wol,
3153 .get_msglevel = velocity_get_msglevel,
3154 .set_msglevel = velocity_set_msglevel,
3155 .get_link = velocity_get_link,
3156 .begin = velocity_ethtool_up,
3157 .complete = velocity_ethtool_down
3158};
3159
3160/**
3161 * velocity_mii_ioctl - MII ioctl handler
3162 * @dev: network device
3163 * @ifr: the ifreq block for the ioctl
3164 * @cmd: the command
3165 *
3166 * Process MII requests made via ioctl from the network layer. These
3167 * are used by tools like kudzu to interrogate the link state of the
3168 * hardware
3169 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003170
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3172{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003173 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 struct mac_regs __iomem * regs = vptr->mac_regs;
3175 unsigned long flags;
3176 struct mii_ioctl_data *miidata = if_mii(ifr);
3177 int err;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003178
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 switch (cmd) {
3180 case SIOCGMIIPHY:
3181 miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
3182 break;
3183 case SIOCGMIIREG:
3184 if (!capable(CAP_NET_ADMIN))
3185 return -EPERM;
3186 if(velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
3187 return -ETIMEDOUT;
3188 break;
3189 case SIOCSMIIREG:
3190 if (!capable(CAP_NET_ADMIN))
3191 return -EPERM;
3192 spin_lock_irqsave(&vptr->lock, flags);
3193 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
3194 spin_unlock_irqrestore(&vptr->lock, flags);
3195 check_connection_type(vptr->mac_regs);
3196 if(err)
3197 return err;
3198 break;
3199 default:
3200 return -EOPNOTSUPP;
3201 }
3202 return 0;
3203}
3204
3205#ifdef CONFIG_PM
3206
3207/**
3208 * velocity_save_context - save registers
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003209 * @vptr: velocity
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 * @context: buffer for stored context
3211 *
3212 * Retrieve the current configuration from the velocity hardware
3213 * and stash it in the context structure, for use by the context
3214 * restore functions. This allows us to save things we need across
3215 * power down states
3216 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003217
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context)
3219{
3220 struct mac_regs __iomem * regs = vptr->mac_regs;
3221 u16 i;
3222 u8 __iomem *ptr = (u8 __iomem *)regs;
3223
3224 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3225 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3226
3227 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3228 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3229
3230 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3231 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3232
3233}
3234
3235/**
3236 * velocity_restore_context - restore registers
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003237 * @vptr: velocity
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 * @context: buffer for stored context
3239 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003240 * Reload the register configuration from the velocity context
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 * created by velocity_save_context.
3242 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003243
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3245{
3246 struct mac_regs __iomem * regs = vptr->mac_regs;
3247 int i;
3248 u8 __iomem *ptr = (u8 __iomem *)regs;
3249
3250 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) {
3251 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3252 }
3253
3254 /* Just skip cr0 */
3255 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3256 /* Clear */
3257 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3258 /* Set */
3259 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3260 }
3261
3262 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) {
3263 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3264 }
3265
3266 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) {
3267 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3268 }
3269
3270 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) {
3271 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3272 }
3273
3274}
3275
3276/**
3277 * wol_calc_crc - WOL CRC
3278 * @pattern: data pattern
3279 * @mask_pattern: mask
3280 *
3281 * Compute the wake on lan crc hashes for the packet header
3282 * we are interested in.
3283 */
3284
3285static u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern)
3286{
3287 u16 crc = 0xFFFF;
3288 u8 mask;
3289 int i, j;
3290
3291 for (i = 0; i < size; i++) {
3292 mask = mask_pattern[i];
3293
3294 /* Skip this loop if the mask equals to zero */
3295 if (mask == 0x00)
3296 continue;
3297
3298 for (j = 0; j < 8; j++) {
3299 if ((mask & 0x01) == 0) {
3300 mask >>= 1;
3301 continue;
3302 }
3303 mask >>= 1;
3304 crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
3305 }
3306 }
3307 /* Finally, invert the result once to get the correct data */
3308 crc = ~crc;
Akinobu Mita906d66d2006-12-08 02:36:25 -08003309 return bitrev32(crc) >> 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310}
3311
3312/**
3313 * velocity_set_wol - set up for wake on lan
3314 * @vptr: velocity to set WOL status on
3315 *
3316 * Set a card up for wake on lan either by unicast or by
3317 * ARP packet.
3318 *
3319 * FIXME: check static buffer is safe here
3320 */
3321
3322static int velocity_set_wol(struct velocity_info *vptr)
3323{
3324 struct mac_regs __iomem * regs = vptr->mac_regs;
3325 static u8 buf[256];
3326 int i;
3327
3328 static u32 mask_pattern[2][4] = {
3329 {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3330 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
3331 };
3332
3333 writew(0xFFFF, &regs->WOLCRClr);
3334 writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3335 writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3336
3337 /*
3338 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3339 writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3340 */
3341
3342 if (vptr->wol_opts & VELOCITY_WOL_UCAST) {
3343 writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3344 }
3345
3346 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3347 struct arp_packet *arp = (struct arp_packet *) buf;
3348 u16 crc;
3349 memset(buf, 0, sizeof(struct arp_packet) + 7);
3350
3351 for (i = 0; i < 4; i++)
3352 writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3353
3354 arp->type = htons(ETH_P_ARP);
3355 arp->ar_op = htons(1);
3356
3357 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3358
3359 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3360 (u8 *) & mask_pattern[0][0]);
3361
3362 writew(crc, &regs->PatternCRC[0]);
3363 writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3364 }
3365
3366 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3367 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3368
3369 writew(0x0FFF, &regs->WOLSRClr);
3370
3371 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3372 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3373 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
3374
3375 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
3376 }
3377
3378 if (vptr->mii_status & VELOCITY_SPEED_1000)
3379 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
3380
3381 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3382
3383 {
3384 u8 GCR;
3385 GCR = readb(&regs->CHIPGCR);
3386 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3387 writeb(GCR, &regs->CHIPGCR);
3388 }
3389
3390 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3391 /* Turn on SWPTAG just before entering power mode */
3392 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3393 /* Go to bed ..... */
3394 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3395
3396 return 0;
3397}
3398
3399static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3400{
3401 struct net_device *dev = pci_get_drvdata(pdev);
3402 struct velocity_info *vptr = netdev_priv(dev);
3403 unsigned long flags;
3404
3405 if(!netif_running(vptr->dev))
3406 return 0;
3407
3408 netif_device_detach(vptr->dev);
3409
3410 spin_lock_irqsave(&vptr->lock, flags);
3411 pci_save_state(pdev);
3412#ifdef ETHTOOL_GWOL
3413 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3414 velocity_get_ip(vptr);
3415 velocity_save_context(vptr, &vptr->context);
3416 velocity_shutdown(vptr);
3417 velocity_set_wol(vptr);
3418 pci_enable_wake(pdev, 3, 1);
3419 pci_set_power_state(pdev, PCI_D3hot);
3420 } else {
3421 velocity_save_context(vptr, &vptr->context);
3422 velocity_shutdown(vptr);
3423 pci_disable_device(pdev);
3424 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3425 }
3426#else
3427 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3428#endif
3429 spin_unlock_irqrestore(&vptr->lock, flags);
3430 return 0;
3431}
3432
3433static int velocity_resume(struct pci_dev *pdev)
3434{
3435 struct net_device *dev = pci_get_drvdata(pdev);
3436 struct velocity_info *vptr = netdev_priv(dev);
3437 unsigned long flags;
3438 int i;
3439
3440 if(!netif_running(vptr->dev))
3441 return 0;
3442
3443 pci_set_power_state(pdev, PCI_D0);
3444 pci_enable_wake(pdev, 0, 0);
3445 pci_restore_state(pdev);
3446
3447 mac_wol_reset(vptr->mac_regs);
3448
3449 spin_lock_irqsave(&vptr->lock, flags);
3450 velocity_restore_context(vptr, &vptr->context);
3451 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3452 mac_disable_int(vptr->mac_regs);
3453
3454 velocity_tx_srv(vptr, 0);
3455
3456 for (i = 0; i < vptr->num_txq; i++) {
3457 if (vptr->td_used[i]) {
3458 mac_tx_queue_wake(vptr->mac_regs, i);
3459 }
3460 }
3461
3462 mac_enable_int(vptr->mac_regs);
3463 spin_unlock_irqrestore(&vptr->lock, flags);
3464 netif_device_attach(vptr->dev);
3465
3466 return 0;
3467}
3468
Randy Dunlapce9f7fe2006-12-18 21:21:10 -08003469#ifdef CONFIG_INET
3470
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3472{
3473 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3474
3475 if (ifa) {
3476 struct net_device *dev = ifa->ifa_dev->dev;
3477 struct velocity_info *vptr;
3478 unsigned long flags;
3479
3480 spin_lock_irqsave(&velocity_dev_list_lock, flags);
3481 list_for_each_entry(vptr, &velocity_dev_list, list) {
3482 if (vptr->dev == dev) {
3483 velocity_get_ip(vptr);
3484 break;
3485 }
3486 }
3487 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3488 }
3489 return NOTIFY_DONE;
3490}
Randy Dunlapce9f7fe2006-12-18 21:21:10 -08003491
3492#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493#endif