blob: a8009c217bd6192bd44bf03f1cca5170dcbda1f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This code is derived from the VIA reference driver (copyright message
3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
4 * addition to the Linux kernel.
5 *
6 * The code has been merged into one source file, cleaned up to follow
7 * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned
8 * for 64bit hardware platforms.
9 *
10 * TODO
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * rx_copybreak/alignment
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * More testing
13 *
Alan Cox113aa832008-10-13 19:01:08 -070014 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * Additional fixes and clean up: Francois Romieu
16 *
17 * This source has not been verified for use in safety critical systems.
18 *
19 * Please direct queries about the revamped driver to the linux-kernel
20 * list not VIA.
21 *
22 * Original code:
23 *
24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
25 * All rights reserved.
26 *
27 * This software may be redistributed and/or modified under
28 * the terms of the GNU General Public License as published by the Free
29 * Software Foundation; either version 2 of the License, or
30 * any later version.
31 *
32 * This program is distributed in the hope that it will be useful, but
33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
35 * for more details.
36 *
37 * Author: Chuang Liang-Shing, AJ Jiang
38 *
39 * Date: Jan 24, 2003
40 *
41 * MODULE_LICENSE("GPL");
42 *
43 */
44
45
46#include <linux/module.h>
47#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/init.h>
49#include <linux/mm.h>
50#include <linux/errno.h>
51#include <linux/ioport.h>
52#include <linux/pci.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/delay.h>
58#include <linux/timer.h>
59#include <linux/slab.h>
60#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#include <linux/string.h>
62#include <linux/wait.h>
Dave Jonesc4067402009-07-20 17:35:21 +000063#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/if.h>
Dave Jonesc4067402009-07-20 17:35:21 +000065#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/inetdevice.h>
68#include <linux/reboot.h>
69#include <linux/ethtool.h>
70#include <linux/mii.h>
71#include <linux/in.h>
72#include <linux/if_arp.h>
Stephen Hemminger501e4d22007-08-24 13:56:49 -070073#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/ip.h>
75#include <linux/tcp.h>
76#include <linux/udp.h>
77#include <linux/crc-ccitt.h>
78#include <linux/crc32.h>
79
80#include "via-velocity.h"
81
82
Dave Jonesc4067402009-07-20 17:35:21 +000083static int velocity_nics;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084static int msglevel = MSG_LEVEL_INFO;
85
Stephen Hemminger01faccb2007-08-24 14:40:45 -070086/**
87 * mac_get_cam_mask - Read a CAM mask
88 * @regs: register block for this velocity
89 * @mask: buffer to store mask
90 *
91 * Fetch the mask bits of the selected CAM and store them into the
92 * provided mask buffer.
93 */
Dave Jonesc4067402009-07-20 17:35:21 +000094static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
Stephen Hemminger01faccb2007-08-24 14:40:45 -070095{
96 int i;
97
98 /* Select CAM mask */
99 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
100
101 writeb(0, &regs->CAMADDR);
102
103 /* read mask */
104 for (i = 0; i < 8; i++)
105 *mask++ = readb(&(regs->MARCAM[i]));
106
107 /* disable CAMEN */
108 writeb(0, &regs->CAMADDR);
109
110 /* Select mar */
111 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700112}
113
114
115/**
116 * mac_set_cam_mask - Set a CAM mask
117 * @regs: register block for this velocity
118 * @mask: CAM mask to load
119 *
120 * Store a new mask into a CAM
121 */
Dave Jonesc4067402009-07-20 17:35:21 +0000122static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700123{
124 int i;
125 /* Select CAM mask */
126 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
127
128 writeb(CAMADDR_CAMEN, &regs->CAMADDR);
129
Dave Jonesc4067402009-07-20 17:35:21 +0000130 for (i = 0; i < 8; i++)
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700131 writeb(*mask++, &(regs->MARCAM[i]));
Dave Jonesc4067402009-07-20 17:35:21 +0000132
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700133 /* disable CAMEN */
134 writeb(0, &regs->CAMADDR);
135
136 /* Select mar */
137 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
138}
139
Dave Jonesc4067402009-07-20 17:35:21 +0000140static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700141{
142 int i;
143 /* Select CAM mask */
144 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
145
146 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
147
Dave Jonesc4067402009-07-20 17:35:21 +0000148 for (i = 0; i < 8; i++)
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700149 writeb(*mask++, &(regs->MARCAM[i]));
Dave Jonesc4067402009-07-20 17:35:21 +0000150
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700151 /* disable CAMEN */
152 writeb(0, &regs->CAMADDR);
153
154 /* Select mar */
155 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
156}
157
158/**
159 * mac_set_cam - set CAM data
160 * @regs: register block of this velocity
161 * @idx: Cam index
162 * @addr: 2 or 6 bytes of CAM data
163 *
164 * Load an address or vlan tag into a CAM
165 */
Dave Jonesc4067402009-07-20 17:35:21 +0000166static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700167{
168 int i;
169
170 /* Select CAM mask */
171 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
172
173 idx &= (64 - 1);
174
175 writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
176
Dave Jonesc4067402009-07-20 17:35:21 +0000177 for (i = 0; i < 6; i++)
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700178 writeb(*addr++, &(regs->MARCAM[i]));
Dave Jonesc4067402009-07-20 17:35:21 +0000179
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700180 BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
181
182 udelay(10);
183
184 writeb(0, &regs->CAMADDR);
185
186 /* Select mar */
187 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
188}
189
Dave Jonesc4067402009-07-20 17:35:21 +0000190static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700191 const u8 *addr)
192{
193
194 /* Select CAM mask */
195 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
196
197 idx &= (64 - 1);
198
199 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
200 writew(*((u16 *) addr), &regs->MARCAM[0]);
201
202 BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
203
204 udelay(10);
205
206 writeb(0, &regs->CAMADDR);
207
208 /* Select mar */
209 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
210}
211
212
213/**
214 * mac_wol_reset - reset WOL after exiting low power
215 * @regs: register block of this velocity
216 *
217 * Called after we drop out of wake on lan mode in order to
218 * reset the Wake on lan features. This function doesn't restore
219 * the rest of the logic from the result of sleep/wakeup
220 */
Dave Jonesc4067402009-07-20 17:35:21 +0000221static void mac_wol_reset(struct mac_regs __iomem *regs)
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700222{
223
224 /* Turn off SWPTAG right after leaving power mode */
225 BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
226 /* clear sticky bits */
227 BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
228
229 BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
230 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
231 /* disable force PME-enable */
232 writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
233 /* disable power-event config bit */
234 writew(0xFFFF, &regs->WOLCRClr);
235 /* clear power status */
236 writew(0xFFFF, &regs->WOLSRClr);
237}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Jeff Garzik7282d492006-09-13 14:30:00 -0400239static const struct ethtool_ops velocity_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241/*
242 Define module options
243*/
244
245MODULE_AUTHOR("VIA Networking Technologies, Inc.");
246MODULE_LICENSE("GPL");
247MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
248
Dave Jonesc4067402009-07-20 17:35:21 +0000249#define VELOCITY_PARAM(N, D) \
250 static int N[MAX_UNITS] = OPTION_DEFAULT;\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 module_param_array(N, int, NULL, 0); \
Dave Jonesc4067402009-07-20 17:35:21 +0000252 MODULE_PARM_DESC(N, D);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254#define RX_DESC_MIN 64
255#define RX_DESC_MAX 255
256#define RX_DESC_DEF 64
257VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
258
259#define TX_DESC_MIN 16
260#define TX_DESC_MAX 256
261#define TX_DESC_DEF 64
262VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264#define RX_THRESH_MIN 0
265#define RX_THRESH_MAX 3
266#define RX_THRESH_DEF 0
267/* rx_thresh[] is used for controlling the receive fifo threshold.
268 0: indicate the rxfifo threshold is 128 bytes.
269 1: indicate the rxfifo threshold is 512 bytes.
270 2: indicate the rxfifo threshold is 1024 bytes.
271 3: indicate the rxfifo threshold is store & forward.
272*/
273VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
274
275#define DMA_LENGTH_MIN 0
276#define DMA_LENGTH_MAX 7
Simon Kagstrom2a5774f2009-11-25 22:10:34 +0000277#define DMA_LENGTH_DEF 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279/* DMA_length[] is used for controlling the DMA length
280 0: 8 DWORDs
281 1: 16 DWORDs
282 2: 32 DWORDs
283 3: 64 DWORDs
284 4: 128 DWORDs
285 5: 256 DWORDs
286 6: SF(flush till emply)
287 7: SF(flush till emply)
288*/
289VELOCITY_PARAM(DMA_length, "DMA length");
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291#define IP_ALIG_DEF 0
292/* IP_byte_align[] is used for IP header DWORD byte aligned
293 0: indicate the IP header won't be DWORD byte aligned.(Default) .
294 1: indicate the IP header will be DWORD byte aligned.
295 In some enviroment, the IP header should be DWORD byte aligned,
296 or the packet will be droped when we receive it. (eg: IPVS)
297*/
298VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
299
300#define TX_CSUM_DEF 1
301/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
302 (We only support RX checksum offload now)
303 0: disable csum_offload[checksum offload
304 1: enable checksum offload. (Default)
305*/
306VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
307
308#define FLOW_CNTL_DEF 1
309#define FLOW_CNTL_MIN 1
310#define FLOW_CNTL_MAX 5
311
312/* flow_control[] is used for setting the flow control ability of NIC.
313 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
314 2: enable TX flow control.
315 3: enable RX flow control.
316 4: enable RX/TX flow control.
317 5: disable
318*/
319VELOCITY_PARAM(flow_control, "Enable flow control ability");
320
321#define MED_LNK_DEF 0
322#define MED_LNK_MIN 0
323#define MED_LNK_MAX 4
324/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
325 0: indicate autonegotiation for both speed and duplex mode
326 1: indicate 100Mbps half duplex mode
327 2: indicate 100Mbps full duplex mode
328 3: indicate 10Mbps half duplex mode
329 4: indicate 10Mbps full duplex mode
330
331 Note:
Dave Jonesc4067402009-07-20 17:35:21 +0000332 if EEPROM have been set to the force mode, this option is ignored
333 by driver.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334*/
335VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
336
337#define VAL_PKT_LEN_DEF 0
338/* ValPktLen[] is used for setting the checksum offload ability of NIC.
339 0: Receive frame with invalid layer 2 length (Default)
340 1: Drop frame with invalid layer 2 length
341*/
342VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
343
344#define WOL_OPT_DEF 0
345#define WOL_OPT_MIN 0
346#define WOL_OPT_MAX 7
347/* wol_opts[] is used for controlling wake on lan behavior.
348 0: Wake up if recevied a magic packet. (Default)
349 1: Wake up if link status is on/off.
350 2: Wake up if recevied an arp packet.
351 4: Wake up if recevied any unicast packet.
352 Those value can be sumed up to support more than one option.
353*/
354VELOCITY_PARAM(wol_opts, "Wake On Lan options");
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356static int rx_copybreak = 200;
357module_param(rx_copybreak, int, 0644);
358MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360/*
361 * Internal board variants. At the moment we have only one
362 */
Andrew Morton4f14b922008-02-09 23:41:40 -0800363static struct velocity_info_tbl chip_info_table[] = {
Jeff Garzikcabb7662006-06-27 09:25:28 -0400364 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
365 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366};
367
368/*
369 * Describe the PCI device identifiers that we support in this
370 * device driver. Used for hotplug autoloading.
371 */
Jeff Garzike54f4892006-06-27 09:20:08 -0400372static const struct pci_device_id velocity_id_table[] __devinitdata = {
373 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
374 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375};
376
377MODULE_DEVICE_TABLE(pci, velocity_id_table);
378
379/**
380 * get_chip_name - identifier to name
381 * @id: chip identifier
382 *
383 * Given a chip identifier return a suitable description. Returns
384 * a pointer a static string valid while the driver is loaded.
385 */
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700386static const char __devinit *get_chip_name(enum chip_type chip_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 int i;
389 for (i = 0; chip_info_table[i].name != NULL; i++)
390 if (chip_info_table[i].chip_id == chip_id)
391 break;
392 return chip_info_table[i].name;
393}
394
395/**
396 * velocity_remove1 - device unplug
397 * @pdev: PCI device being removed
398 *
399 * Device unload callback. Called on an unplug or on module
400 * unload for each active device that is present. Disconnects
401 * the device from the network layer and frees all the resources
402 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403static void __devexit velocity_remove1(struct pci_dev *pdev)
404{
405 struct net_device *dev = pci_get_drvdata(pdev);
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -0400406 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 unregister_netdev(dev);
409 iounmap(vptr->mac_regs);
410 pci_release_regions(pdev);
411 pci_disable_device(pdev);
412 pci_set_drvdata(pdev, NULL);
413 free_netdev(dev);
414
415 velocity_nics--;
416}
417
418/**
419 * velocity_set_int_opt - parser for integer options
420 * @opt: pointer to option value
421 * @val: value the user requested (or -1 for default)
422 * @min: lowest value allowed
423 * @max: highest value allowed
424 * @def: default value
425 * @name: property name
426 * @dev: device name
427 *
428 * Set an integer property in the module options. This function does
429 * all the verification and checking as well as reporting so that
430 * we don't duplicate code for each option.
431 */
Sven Hartge07b5f6a2008-10-23 13:03:44 +0000432static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 if (val == -1)
435 *opt = def;
436 else if (val < min || val > max) {
437 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
438 devname, name, min, max);
439 *opt = def;
440 } else {
441 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
442 devname, name, val);
443 *opt = val;
444 }
445}
446
447/**
448 * velocity_set_bool_opt - parser for boolean options
449 * @opt: pointer to option value
450 * @val: value the user requested (or -1 for default)
451 * @def: default value (yes/no)
452 * @flag: numeric value to set for true.
453 * @name: property name
454 * @dev: device name
455 *
456 * Set a boolean property in the module options. This function does
457 * all the verification and checking as well as reporting so that
458 * we don't duplicate code for each option.
459 */
Dave Jonesc4067402009-07-20 17:35:21 +0000460static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461{
462 (*opt) &= (~flag);
463 if (val == -1)
464 *opt |= (def ? flag : 0);
465 else if (val < 0 || val > 1) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400466 printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 devname, name);
468 *opt |= (def ? flag : 0);
469 } else {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400470 printk(KERN_INFO "%s: set parameter %s to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 devname, name, val ? "TRUE" : "FALSE");
472 *opt |= (val ? flag : 0);
473 }
474}
475
476/**
477 * velocity_get_options - set options on device
478 * @opts: option structure for the device
479 * @index: index of option to use in module options array
480 * @devname: device name
481 *
482 * Turn the module and command options into a single structure
483 * for the current device
484 */
Sven Hartge07b5f6a2008-10-23 13:03:44 +0000485static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
487
488 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
489 velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
490 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
491 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
494 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
495 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
496 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
497 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
498 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 opts->numrx = (opts->numrx & ~3);
500}
501
502/**
503 * velocity_init_cam_filter - initialise CAM
504 * @vptr: velocity to program
505 *
506 * Initialize the content addressable memory used for filters. Load
507 * appropriately according to the presence of VLAN
508 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509static void velocity_init_cam_filter(struct velocity_info *vptr)
510{
Dave Jonesc4067402009-07-20 17:35:21 +0000511 struct mac_regs __iomem *regs = vptr->mac_regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
514 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
515 WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
516
517 /* Disable all CAMs */
518 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
519 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700520 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
521 mac_set_cam_mask(regs, vptr->mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Francois Romieud4f73c82008-04-24 23:32:33 +0200523 /* Enable VCAMs */
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700524 if (vptr->vlgrp) {
Francois Romieud4f73c82008-04-24 23:32:33 +0200525 unsigned int vid, i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Francois Romieud4f73c82008-04-24 23:32:33 +0200527 if (!vlan_group_get_device(vptr->vlgrp, 0))
528 WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
529
530 for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
531 if (vlan_group_get_device(vptr->vlgrp, vid)) {
532 mac_set_vlan_cam(regs, i, (u8 *) &vid);
533 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
534 if (++i >= VCAM_SIZE)
535 break;
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700536 }
537 }
Stephen Hemminger01faccb2007-08-24 14:40:45 -0700538 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 }
540}
541
Francois Romieud4f73c82008-04-24 23:32:33 +0200542static void velocity_vlan_rx_register(struct net_device *dev,
543 struct vlan_group *grp)
544{
545 struct velocity_info *vptr = netdev_priv(dev);
546
547 vptr->vlgrp = grp;
548}
549
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700550static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
551{
552 struct velocity_info *vptr = netdev_priv(dev);
553
Dave Jonesc4067402009-07-20 17:35:21 +0000554 spin_lock_irq(&vptr->lock);
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700555 velocity_init_cam_filter(vptr);
Dave Jonesc4067402009-07-20 17:35:21 +0000556 spin_unlock_irq(&vptr->lock);
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700557}
558
559static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
560{
561 struct velocity_info *vptr = netdev_priv(dev);
562
Dave Jonesc4067402009-07-20 17:35:21 +0000563 spin_lock_irq(&vptr->lock);
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700564 vlan_group_set_device(vptr->vlgrp, vid, NULL);
565 velocity_init_cam_filter(vptr);
Dave Jonesc4067402009-07-20 17:35:21 +0000566 spin_unlock_irq(&vptr->lock);
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700567}
568
Francois Romieu3c4dc712008-07-31 22:51:18 +0200569static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
570{
571 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
572}
Stephen Hemminger501e4d22007-08-24 13:56:49 -0700573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574/**
575 * velocity_rx_reset - handle a receive reset
576 * @vptr: velocity we are resetting
577 *
578 * Reset the ownership and status for the receive ring side.
579 * Hand all the receive queue to the NIC.
580 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581static void velocity_rx_reset(struct velocity_info *vptr)
582{
583
Dave Jonesc4067402009-07-20 17:35:21 +0000584 struct mac_regs __iomem *regs = vptr->mac_regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 int i;
586
Francois Romieu3c4dc712008-07-31 22:51:18 +0200587 velocity_init_rx_ring_indexes(vptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 /*
590 * Init state, all RD entries belong to the NIC
591 */
592 for (i = 0; i < vptr->options.numrx; ++i)
Francois Romieu0fe9f152008-07-31 22:10:10 +0200593 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 writew(vptr->options.numrx, &regs->RBRDU);
Francois Romieu0fe9f152008-07-31 22:10:10 +0200596 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 writew(0, &regs->RDIdx);
598 writew(vptr->options.numrx - 1, &regs->RDCSize);
599}
600
601/**
Dave Jones2cf71d22009-07-23 18:11:12 -0700602 * velocity_get_opt_media_mode - get media selection
603 * @vptr: velocity adapter
604 *
605 * Get the media mode stored in EEPROM or module options and load
606 * mii_status accordingly. The requested link state information
607 * is also returned.
608 */
609static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
610{
611 u32 status = 0;
612
613 switch (vptr->options.spd_dpx) {
614 case SPD_DPX_AUTO:
615 status = VELOCITY_AUTONEG_ENABLE;
616 break;
617 case SPD_DPX_100_FULL:
618 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
619 break;
620 case SPD_DPX_10_FULL:
621 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
622 break;
623 case SPD_DPX_100_HALF:
624 status = VELOCITY_SPEED_100;
625 break;
626 case SPD_DPX_10_HALF:
627 status = VELOCITY_SPEED_10;
628 break;
629 }
630 vptr->mii_status = status;
631 return status;
632}
633
634/**
635 * safe_disable_mii_autopoll - autopoll off
636 * @regs: velocity registers
637 *
638 * Turn off the autopoll and wait for it to disable on the chip
639 */
640static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
641{
642 u16 ww;
643
644 /* turn off MAUTO */
645 writeb(0, &regs->MIICR);
646 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
647 udelay(1);
648 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
649 break;
650 }
651}
652
653/**
654 * enable_mii_autopoll - turn on autopolling
655 * @regs: velocity registers
656 *
657 * Enable the MII link status autopoll feature on the Velocity
658 * hardware. Wait for it to enable.
659 */
660static void enable_mii_autopoll(struct mac_regs __iomem *regs)
661{
662 int ii;
663
664 writeb(0, &(regs->MIICR));
665 writeb(MIIADR_SWMPL, &regs->MIIADR);
666
667 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
668 udelay(1);
669 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
670 break;
671 }
672
673 writeb(MIICR_MAUTO, &regs->MIICR);
674
675 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
676 udelay(1);
677 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
678 break;
679 }
680
681}
682
683/**
684 * velocity_mii_read - read MII data
685 * @regs: velocity registers
686 * @index: MII register index
687 * @data: buffer for received data
688 *
689 * Perform a single read of an MII 16bit register. Returns zero
690 * on success or -ETIMEDOUT if the PHY did not respond.
691 */
692static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
693{
694 u16 ww;
695
696 /*
697 * Disable MIICR_MAUTO, so that mii addr can be set normally
698 */
699 safe_disable_mii_autopoll(regs);
700
701 writeb(index, &regs->MIIADR);
702
703 BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
704
705 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
706 if (!(readb(&regs->MIICR) & MIICR_RCMD))
707 break;
708 }
709
710 *data = readw(&regs->MIIDATA);
711
712 enable_mii_autopoll(regs);
713 if (ww == W_MAX_TIMEOUT)
714 return -ETIMEDOUT;
715 return 0;
716}
717
718
719/**
720 * mii_check_media_mode - check media state
721 * @regs: velocity registers
722 *
723 * Check the current MII status and determine the link status
724 * accordingly
725 */
726static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
727{
728 u32 status = 0;
729 u16 ANAR;
730
731 if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
732 status |= VELOCITY_LINK_FAIL;
733
734 if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
735 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
736 else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
737 status |= (VELOCITY_SPEED_1000);
738 else {
739 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
740 if (ANAR & ANAR_TXFD)
741 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
742 else if (ANAR & ANAR_TX)
743 status |= VELOCITY_SPEED_100;
744 else if (ANAR & ANAR_10FD)
745 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
746 else
747 status |= (VELOCITY_SPEED_10);
748 }
749
750 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
751 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
752 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
753 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
754 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
755 status |= VELOCITY_AUTONEG_ENABLE;
756 }
757 }
758
759 return status;
760}
761
762/**
763 * velocity_mii_write - write MII data
764 * @regs: velocity registers
765 * @index: MII register index
766 * @data: 16bit data for the MII register
767 *
768 * Perform a single write to an MII 16bit register. Returns zero
769 * on success or -ETIMEDOUT if the PHY did not respond.
770 */
771static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
772{
773 u16 ww;
774
775 /*
776 * Disable MIICR_MAUTO, so that mii addr can be set normally
777 */
778 safe_disable_mii_autopoll(regs);
779
780 /* MII reg offset */
781 writeb(mii_addr, &regs->MIIADR);
782 /* set MII data */
783 writew(data, &regs->MIIDATA);
784
785 /* turn on MIICR_WCMD */
786 BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
787
788 /* W_MAX_TIMEOUT is the timeout period */
789 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
790 udelay(5);
791 if (!(readb(&regs->MIICR) & MIICR_WCMD))
792 break;
793 }
794 enable_mii_autopoll(regs);
795
796 if (ww == W_MAX_TIMEOUT)
797 return -ETIMEDOUT;
798 return 0;
799}
800
801/**
802 * set_mii_flow_control - flow control setup
803 * @vptr: velocity interface
804 *
805 * Set up the flow control on this interface according to
806 * the supplied user/eeprom options.
807 */
808static void set_mii_flow_control(struct velocity_info *vptr)
809{
810 /*Enable or Disable PAUSE in ANAR */
811 switch (vptr->options.flow_cntl) {
812 case FLOW_CNTL_TX:
813 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
814 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
815 break;
816
817 case FLOW_CNTL_RX:
818 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
819 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
820 break;
821
822 case FLOW_CNTL_TX_RX:
823 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
824 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
825 break;
826
827 case FLOW_CNTL_DISABLE:
828 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
829 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
830 break;
831 default:
832 break;
833 }
834}
835
836/**
837 * mii_set_auto_on - autonegotiate on
838 * @vptr: velocity
839 *
840 * Enable autonegotation on this interface
841 */
842static void mii_set_auto_on(struct velocity_info *vptr)
843{
844 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
845 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
846 else
847 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
848}
849
850static u32 check_connection_type(struct mac_regs __iomem *regs)
851{
852 u32 status = 0;
853 u8 PHYSR0;
854 u16 ANAR;
855 PHYSR0 = readb(&regs->PHYSR0);
856
857 /*
858 if (!(PHYSR0 & PHYSR0_LINKGD))
859 status|=VELOCITY_LINK_FAIL;
860 */
861
862 if (PHYSR0 & PHYSR0_FDPX)
863 status |= VELOCITY_DUPLEX_FULL;
864
865 if (PHYSR0 & PHYSR0_SPDG)
866 status |= VELOCITY_SPEED_1000;
867 else if (PHYSR0 & PHYSR0_SPD10)
868 status |= VELOCITY_SPEED_10;
869 else
870 status |= VELOCITY_SPEED_100;
871
872 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
873 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
874 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
875 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
876 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
877 status |= VELOCITY_AUTONEG_ENABLE;
878 }
879 }
880
881 return status;
882}
883
884
885
886/**
887 * velocity_set_media_mode - set media mode
888 * @mii_status: old MII link state
889 *
890 * Check the media link state and configure the flow control
891 * PHY and also velocity hardware setup accordingly. In particular
892 * we need to set up CD polling and frame bursting.
893 */
894static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
895{
896 u32 curr_status;
897 struct mac_regs __iomem *regs = vptr->mac_regs;
898
899 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
900 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
901
902 /* Set mii link status */
903 set_mii_flow_control(vptr);
904
905 /*
906 Check if new status is consisent with current status
907 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
908 || (mii_status==curr_status)) {
909 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
910 vptr->mii_status=check_connection_type(vptr->mac_regs);
911 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
912 return 0;
913 }
914 */
915
916 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
917 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
918
919 /*
920 * If connection type is AUTO
921 */
922 if (mii_status & VELOCITY_AUTONEG_ENABLE) {
923 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
924 /* clear force MAC mode bit */
925 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
926 /* set duplex mode of MAC according to duplex mode of MII */
927 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
928 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
929 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
930
931 /* enable AUTO-NEGO mode */
932 mii_set_auto_on(vptr);
933 } else {
934 u16 ANAR;
935 u8 CHIPGCR;
936
937 /*
938 * 1. if it's 3119, disable frame bursting in halfduplex mode
939 * and enable it in fullduplex mode
940 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
941 * 3. only enable CD heart beat counter in 10HD mode
942 */
943
944 /* set force MAC mode bit */
945 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
946
947 CHIPGCR = readb(&regs->CHIPGCR);
948 CHIPGCR &= ~CHIPGCR_FCGMII;
949
950 if (mii_status & VELOCITY_DUPLEX_FULL) {
951 CHIPGCR |= CHIPGCR_FCFDX;
952 writeb(CHIPGCR, &regs->CHIPGCR);
953 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
954 if (vptr->rev_id < REV_ID_VT3216_A0)
955 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
956 } else {
957 CHIPGCR &= ~CHIPGCR_FCFDX;
958 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
959 writeb(CHIPGCR, &regs->CHIPGCR);
960 if (vptr->rev_id < REV_ID_VT3216_A0)
961 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
962 }
963
964 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
965
966 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
967 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
968 else
969 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
970
971 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
972 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
973 ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
974 if (mii_status & VELOCITY_SPEED_100) {
975 if (mii_status & VELOCITY_DUPLEX_FULL)
976 ANAR |= ANAR_TXFD;
977 else
978 ANAR |= ANAR_TX;
979 } else {
980 if (mii_status & VELOCITY_DUPLEX_FULL)
981 ANAR |= ANAR_10FD;
982 else
983 ANAR |= ANAR_10;
984 }
985 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
986 /* enable AUTO-NEGO mode */
987 mii_set_auto_on(vptr);
988 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
989 }
990 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
991 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
992 return VELOCITY_LINK_CHANGE;
993}
994
995/**
996 * velocity_print_link_status - link status reporting
997 * @vptr: velocity to report on
998 *
999 * Turn the link status of the velocity card into a kernel log
1000 * description of the new link state, detailing speed and duplex
1001 * status
1002 */
1003static void velocity_print_link_status(struct velocity_info *vptr)
1004{
1005
1006 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1007 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1008 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1009 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1010
1011 if (vptr->mii_status & VELOCITY_SPEED_1000)
1012 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1013 else if (vptr->mii_status & VELOCITY_SPEED_100)
1014 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1015 else
1016 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1017
1018 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1019 VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1020 else
1021 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1022 } else {
1023 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1024 switch (vptr->options.spd_dpx) {
1025 case SPD_DPX_100_HALF:
1026 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1027 break;
1028 case SPD_DPX_100_FULL:
1029 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1030 break;
1031 case SPD_DPX_10_HALF:
1032 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1033 break;
1034 case SPD_DPX_10_FULL:
1035 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1036 break;
1037 default:
1038 break;
1039 }
1040 }
1041}
1042
1043/**
1044 * enable_flow_control_ability - flow control
1045 * @vptr: veloity to configure
1046 *
1047 * Set up flow control according to the flow control options
1048 * determined by the eeprom/configuration.
1049 */
1050static void enable_flow_control_ability(struct velocity_info *vptr)
1051{
1052
1053 struct mac_regs __iomem *regs = vptr->mac_regs;
1054
1055 switch (vptr->options.flow_cntl) {
1056
1057 case FLOW_CNTL_DEFAULT:
1058 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1059 writel(CR0_FDXRFCEN, &regs->CR0Set);
1060 else
1061 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1062
1063 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1064 writel(CR0_FDXTFCEN, &regs->CR0Set);
1065 else
1066 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1067 break;
1068
1069 case FLOW_CNTL_TX:
1070 writel(CR0_FDXTFCEN, &regs->CR0Set);
1071 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1072 break;
1073
1074 case FLOW_CNTL_RX:
1075 writel(CR0_FDXRFCEN, &regs->CR0Set);
1076 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1077 break;
1078
1079 case FLOW_CNTL_TX_RX:
1080 writel(CR0_FDXTFCEN, &regs->CR0Set);
1081 writel(CR0_FDXRFCEN, &regs->CR0Set);
1082 break;
1083
1084 case FLOW_CNTL_DISABLE:
1085 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1086 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1087 break;
1088
1089 default:
1090 break;
1091 }
1092
1093}
1094
1095/**
1096 * velocity_soft_reset - soft reset
1097 * @vptr: velocity to reset
1098 *
1099 * Kick off a soft reset of the velocity adapter and then poll
1100 * until the reset sequence has completed before returning.
1101 */
1102static int velocity_soft_reset(struct velocity_info *vptr)
1103{
1104 struct mac_regs __iomem *regs = vptr->mac_regs;
1105 int i = 0;
1106
1107 writel(CR0_SFRST, &regs->CR0Set);
1108
1109 for (i = 0; i < W_MAX_TIMEOUT; i++) {
1110 udelay(5);
1111 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1112 break;
1113 }
1114
1115 if (i == W_MAX_TIMEOUT) {
1116 writel(CR0_FORSRST, &regs->CR0Set);
1117 /* FIXME: PCI POSTING */
1118 /* delay 2ms */
1119 mdelay(2);
1120 }
1121 return 0;
1122}
1123
1124/**
1125 * velocity_set_multi - filter list change callback
1126 * @dev: network device
1127 *
1128 * Called by the network layer when the filter lists need to change
1129 * for a velocity adapter. Reload the CAMs with the new address
1130 * filter ruleset.
1131 */
1132static void velocity_set_multi(struct net_device *dev)
1133{
1134 struct velocity_info *vptr = netdev_priv(dev);
1135 struct mac_regs __iomem *regs = vptr->mac_regs;
1136 u8 rx_mode;
1137 int i;
1138 struct dev_mc_list *mclist;
1139
1140 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1141 writel(0xffffffff, &regs->MARCAM[0]);
1142 writel(0xffffffff, &regs->MARCAM[4]);
1143 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1144 } else if ((dev->mc_count > vptr->multicast_limit)
1145 || (dev->flags & IFF_ALLMULTI)) {
1146 writel(0xffffffff, &regs->MARCAM[0]);
1147 writel(0xffffffff, &regs->MARCAM[4]);
1148 rx_mode = (RCR_AM | RCR_AB);
1149 } else {
1150 int offset = MCAM_SIZE - vptr->multicast_limit;
1151 mac_get_cam_mask(regs, vptr->mCAMmask);
1152
1153 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
1154 mac_set_cam(regs, i + offset, mclist->dmi_addr);
1155 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1156 }
1157
1158 mac_set_cam_mask(regs, vptr->mCAMmask);
1159 rx_mode = RCR_AM | RCR_AB | RCR_AP;
1160 }
1161 if (dev->mtu > 1500)
1162 rx_mode |= RCR_AL;
1163
1164 BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1165
1166}
1167
1168/*
1169 * MII access , media link mode setting functions
1170 */
1171
1172/**
1173 * mii_init - set up MII
1174 * @vptr: velocity adapter
1175 * @mii_status: links tatus
1176 *
1177 * Set up the PHY for the current link state.
1178 */
1179static void mii_init(struct velocity_info *vptr, u32 mii_status)
1180{
1181 u16 BMCR;
1182
1183 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1184 case PHYID_CICADA_CS8201:
1185 /*
1186 * Reset to hardware default
1187 */
1188 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
1189 /*
1190 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1191 * off it in NWay-forced half mode for NWay-forced v.s.
1192 * legacy-forced issue.
1193 */
1194 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1195 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
1196 else
1197 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
1198 /*
1199 * Turn on Link/Activity LED enable bit for CIS8201
1200 */
1201 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
1202 break;
1203 case PHYID_VT3216_32BIT:
1204 case PHYID_VT3216_64BIT:
1205 /*
1206 * Reset to hardware default
1207 */
1208 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
1209 /*
1210 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1211 * off it in NWay-forced half mode for NWay-forced v.s.
1212 * legacy-forced issue
1213 */
1214 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1215 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
1216 else
1217 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
1218 break;
1219
1220 case PHYID_MARVELL_1000:
1221 case PHYID_MARVELL_1000S:
1222 /*
1223 * Assert CRS on Transmit
1224 */
1225 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1226 /*
1227 * Reset to hardware default
1228 */
1229 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
1230 break;
1231 default:
1232 ;
1233 }
1234 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
1235 if (BMCR & BMCR_ISO) {
1236 BMCR &= ~BMCR_ISO;
1237 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
1238 }
1239}
1240
Simon Kagstrom6dfc4b92009-11-25 22:10:12 +00001241/**
1242 * setup_queue_timers - Setup interrupt timers
1243 *
1244 * Setup interrupt frequency during suppression (timeout if the frame
1245 * count isn't filled).
1246 */
1247static void setup_queue_timers(struct velocity_info *vptr)
1248{
1249 /* Only for newer revisions */
1250 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1251 u8 txqueue_timer = 0;
1252 u8 rxqueue_timer = 0;
1253
1254 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1255 VELOCITY_SPEED_100)) {
1256 txqueue_timer = vptr->options.txqueue_timer;
1257 rxqueue_timer = vptr->options.rxqueue_timer;
1258 }
1259
1260 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1261 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1262 }
1263}
1264/**
1265 * setup_adaptive_interrupts - Setup interrupt suppression
1266 *
1267 * @vptr velocity adapter
1268 *
1269 * The velocity is able to suppress interrupt during high interrupt load.
1270 * This function turns on that feature.
1271 */
1272static void setup_adaptive_interrupts(struct velocity_info *vptr)
1273{
1274 struct mac_regs __iomem *regs = vptr->mac_regs;
1275 u16 tx_intsup = vptr->options.tx_intsup;
1276 u16 rx_intsup = vptr->options.rx_intsup;
1277
1278 /* Setup default interrupt mask (will be changed below) */
1279 vptr->int_mask = INT_MASK_DEF;
1280
1281 /* Set Tx Interrupt Suppression Threshold */
1282 writeb(CAMCR_PS0, &regs->CAMCR);
1283 if (tx_intsup != 0) {
1284 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1285 ISR_PTX2I | ISR_PTX3I);
1286 writew(tx_intsup, &regs->ISRCTL);
1287 } else
1288 writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1289
1290 /* Set Rx Interrupt Suppression Threshold */
1291 writeb(CAMCR_PS1, &regs->CAMCR);
1292 if (rx_intsup != 0) {
1293 vptr->int_mask &= ~ISR_PRXI;
1294 writew(rx_intsup, &regs->ISRCTL);
1295 } else
1296 writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1297
1298 /* Select page to interrupt hold timer */
1299 writeb(0, &regs->CAMCR);
1300}
Dave Jones2cf71d22009-07-23 18:11:12 -07001301
1302/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 * velocity_init_registers - initialise MAC registers
1304 * @vptr: velocity to init
1305 * @type: type of initialisation (hot or cold)
1306 *
1307 * Initialise the MAC on a reset or on first set up on the
1308 * hardware.
1309 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001310static void velocity_init_registers(struct velocity_info *vptr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 enum velocity_init_type type)
1312{
Dave Jonesc4067402009-07-20 17:35:21 +00001313 struct mac_regs __iomem *regs = vptr->mac_regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 int i, mii_status;
1315
1316 mac_wol_reset(regs);
1317
1318 switch (type) {
1319 case VELOCITY_INIT_RESET:
1320 case VELOCITY_INIT_WOL:
1321
1322 netif_stop_queue(vptr->dev);
1323
1324 /*
1325 * Reset RX to prevent RX pointer not on the 4X location
1326 */
1327 velocity_rx_reset(vptr);
1328 mac_rx_queue_run(regs);
1329 mac_rx_queue_wake(regs);
1330
1331 mii_status = velocity_get_opt_media_mode(vptr);
1332 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1333 velocity_print_link_status(vptr);
1334 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1335 netif_wake_queue(vptr->dev);
1336 }
1337
1338 enable_flow_control_ability(vptr);
1339
1340 mac_clear_isr(regs);
1341 writel(CR0_STOP, &regs->CR0Clr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001342 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 &regs->CR0Set);
1344
1345 break;
1346
1347 case VELOCITY_INIT_COLD:
1348 default:
1349 /*
1350 * Do reset
1351 */
1352 velocity_soft_reset(vptr);
1353 mdelay(5);
1354
1355 mac_eeprom_reload(regs);
Dave Jonesc4067402009-07-20 17:35:21 +00001356 for (i = 0; i < 6; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
Dave Jonesc4067402009-07-20 17:35:21 +00001358
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 /*
1360 * clear Pre_ACPI bit.
1361 */
1362 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1363 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1364 mac_set_dma_length(regs, vptr->options.DMA_length);
1365
1366 writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1367 /*
1368 * Back off algorithm use original IEEE standard
1369 */
1370 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1371
1372 /*
1373 * Init CAM filter
1374 */
1375 velocity_init_cam_filter(vptr);
1376
1377 /*
1378 * Set packet filter: Receive directed and broadcast address
1379 */
1380 velocity_set_multi(vptr->dev);
1381
1382 /*
1383 * Enable MII auto-polling
1384 */
1385 enable_mii_autopoll(regs);
1386
Simon Kagstrom6dfc4b92009-11-25 22:10:12 +00001387 setup_adaptive_interrupts(vptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Francois Romieu0fe9f152008-07-31 22:10:10 +02001389 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 writew(vptr->options.numrx - 1, &regs->RDCSize);
1391 mac_rx_queue_run(regs);
1392 mac_rx_queue_wake(regs);
1393
1394 writew(vptr->options.numtx - 1, &regs->TDCSize);
1395
Francois Romieu0fe9f152008-07-31 22:10:10 +02001396 for (i = 0; i < vptr->tx.numq; i++) {
1397 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 mac_tx_queue_run(regs, i);
1399 }
1400
1401 init_flow_control_register(vptr);
1402
1403 writel(CR0_STOP, &regs->CR0Clr);
1404 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1405
1406 mii_status = velocity_get_opt_media_mode(vptr);
1407 netif_stop_queue(vptr->dev);
1408
1409 mii_init(vptr, mii_status);
1410
1411 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1412 velocity_print_link_status(vptr);
1413 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1414 netif_wake_queue(vptr->dev);
1415 }
1416
1417 enable_flow_control_ability(vptr);
1418 mac_hw_mibs_init(regs);
1419 mac_write_int_mask(vptr->int_mask, regs);
1420 mac_clear_isr(regs);
1421
1422 }
1423}
1424
Dave Jones2cf71d22009-07-23 18:11:12 -07001425static void velocity_give_many_rx_descs(struct velocity_info *vptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
Dave Jonesc4067402009-07-20 17:35:21 +00001427 struct mac_regs __iomem *regs = vptr->mac_regs;
Dave Jones2cf71d22009-07-23 18:11:12 -07001428 int avail, dirty, unusable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
Dave Jones2cf71d22009-07-23 18:11:12 -07001430 /*
1431 * RD number must be equal to 4X per hardware spec
1432 * (programming guide rev 1.20, p.13)
1433 */
1434 if (vptr->rx.filled < 4)
1435 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Dave Jones2cf71d22009-07-23 18:11:12 -07001437 wmb();
1438
1439 unusable = vptr->rx.filled & 0x0003;
1440 dirty = vptr->rx.dirty - unusable;
1441 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1442 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1443 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 }
1445
Dave Jones2cf71d22009-07-23 18:11:12 -07001446 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1447 vptr->rx.filled = unusable;
1448}
1449
1450/**
1451 * velocity_init_dma_rings - set up DMA rings
1452 * @vptr: Velocity to set up
1453 *
1454 * Allocate PCI mapped DMA rings for the receive and transmit layer
1455 * to use.
1456 */
1457static int velocity_init_dma_rings(struct velocity_info *vptr)
1458{
1459 struct velocity_opt *opt = &vptr->options;
1460 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1461 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1462 struct pci_dev *pdev = vptr->pdev;
1463 dma_addr_t pool_dma;
1464 void *pool;
1465 unsigned int i;
1466
1467 /*
1468 * Allocate all RD/TD rings a single pool.
1469 *
1470 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1471 * alignment
1472 */
1473 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1474 rx_ring_size, &pool_dma);
1475 if (!pool) {
1476 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1477 vptr->dev->name);
1478 return -ENOMEM;
1479 }
1480
1481 vptr->rx.ring = pool;
1482 vptr->rx.pool_dma = pool_dma;
1483
1484 pool += rx_ring_size;
1485 pool_dma += rx_ring_size;
1486
1487 for (i = 0; i < vptr->tx.numq; i++) {
1488 vptr->tx.rings[i] = pool;
1489 vptr->tx.pool_dma[i] = pool_dma;
1490 pool += tx_ring_size;
1491 pool_dma += tx_ring_size;
1492 }
1493
1494 return 0;
1495}
1496
1497static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1498{
1499 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1500}
1501
1502/**
1503 * velocity_alloc_rx_buf - allocate aligned receive buffer
1504 * @vptr: velocity
1505 * @idx: ring index
1506 *
1507 * Allocate a new full sized buffer for the reception of a frame and
1508 * map it into PCI space for the hardware to use. The hardware
1509 * requires *64* byte alignment of the buffer which makes life
1510 * less fun than would be ideal.
1511 */
1512static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1513{
1514 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1516
1517 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1518 if (rd_info->skb == NULL)
1519 return -ENOMEM;
1520
1521 /*
1522 * Do the gymnastics to get the buffer head for data at
1523 * 64byte alignment.
1524 */
Simon Kagstromda95b2d2009-11-25 22:09:53 +00001525 skb_reserve(rd_info->skb,
1526 64 - ((unsigned long) rd_info->skb->data & 63));
Dave Jones2cf71d22009-07-23 18:11:12 -07001527 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1528 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1529
1530 /*
1531 * Fill in the descriptor to match
1532 */
1533
1534 *((u32 *) & (rd->rdesc0)) = 0;
1535 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1536 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1537 rd->pa_high = 0;
1538 return 0;
1539}
1540
1541
1542static int velocity_rx_refill(struct velocity_info *vptr)
1543{
1544 int dirty = vptr->rx.dirty, done = 0;
1545
1546 do {
1547 struct rx_desc *rd = vptr->rx.ring + dirty;
1548
1549 /* Fine for an all zero Rx desc at init time as well */
1550 if (rd->rdesc0.len & OWNED_BY_NIC)
1551 break;
1552
1553 if (!vptr->rx.info[dirty].skb) {
1554 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1555 break;
1556 }
1557 done++;
1558 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1559 } while (dirty != vptr->rx.curr);
1560
1561 if (done) {
1562 vptr->rx.dirty = dirty;
1563 vptr->rx.filled += done;
1564 }
1565
1566 return done;
1567}
1568
1569/**
1570 * velocity_free_rd_ring - free receive ring
1571 * @vptr: velocity to clean up
1572 *
1573 * Free the receive buffers for each ring slot and any
1574 * attached socket buffers that need to go away.
1575 */
1576static void velocity_free_rd_ring(struct velocity_info *vptr)
1577{
1578 int i;
1579
1580 if (vptr->rx.info == NULL)
1581 return;
1582
1583 for (i = 0; i < vptr->options.numrx; i++) {
1584 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1585 struct rx_desc *rd = vptr->rx.ring + i;
1586
1587 memset(rd, 0, sizeof(*rd));
1588
1589 if (!rd_info->skb)
1590 continue;
1591 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1592 PCI_DMA_FROMDEVICE);
1593 rd_info->skb_dma = 0;
1594
1595 dev_kfree_skb(rd_info->skb);
1596 rd_info->skb = NULL;
1597 }
1598
1599 kfree(vptr->rx.info);
1600 vptr->rx.info = NULL;
1601}
1602
1603
1604
1605/**
1606 * velocity_init_rd_ring - set up receive ring
1607 * @vptr: velocity to configure
1608 *
1609 * Allocate and set up the receive buffers for each ring slot and
1610 * assign them to the network adapter.
1611 */
1612static int velocity_init_rd_ring(struct velocity_info *vptr)
1613{
1614 int ret = -ENOMEM;
1615
1616 vptr->rx.info = kcalloc(vptr->options.numrx,
1617 sizeof(struct velocity_rd_info), GFP_KERNEL);
1618 if (!vptr->rx.info)
1619 goto out;
1620
1621 velocity_init_rx_ring_indexes(vptr);
1622
1623 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1624 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1625 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1626 velocity_free_rd_ring(vptr);
1627 goto out;
1628 }
1629
1630 ret = 0;
1631out:
1632 return ret;
1633}
1634
1635/**
1636 * velocity_init_td_ring - set up transmit ring
1637 * @vptr: velocity
1638 *
1639 * Set up the transmit ring and chain the ring pointers together.
1640 * Returns zero on success or a negative posix errno code for
1641 * failure.
1642 */
1643static int velocity_init_td_ring(struct velocity_info *vptr)
1644{
Dave Jones2cf71d22009-07-23 18:11:12 -07001645 int j;
1646
1647 /* Init the TD ring entries */
1648 for (j = 0; j < vptr->tx.numq; j++) {
Dave Jones2cf71d22009-07-23 18:11:12 -07001649
1650 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1651 sizeof(struct velocity_td_info),
1652 GFP_KERNEL);
1653 if (!vptr->tx.infos[j]) {
1654 while (--j >= 0)
1655 kfree(vptr->tx.infos[j]);
1656 return -ENOMEM;
1657 }
1658
1659 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 }
1661 return 0;
1662}
1663
Dave Jones2cf71d22009-07-23 18:11:12 -07001664/**
1665 * velocity_free_dma_rings - free PCI ring pointers
1666 * @vptr: Velocity to free from
1667 *
1668 * Clean up the PCI ring buffers allocated to this velocity.
1669 */
1670static void velocity_free_dma_rings(struct velocity_info *vptr)
1671{
1672 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1673 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1674
1675 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1676}
1677
1678
1679static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1680{
1681 int ret;
1682
1683 velocity_set_rxbufsize(vptr, mtu);
1684
1685 ret = velocity_init_dma_rings(vptr);
1686 if (ret < 0)
1687 goto out;
1688
1689 ret = velocity_init_rd_ring(vptr);
1690 if (ret < 0)
1691 goto err_free_dma_rings_0;
1692
1693 ret = velocity_init_td_ring(vptr);
1694 if (ret < 0)
1695 goto err_free_rd_ring_1;
1696out:
1697 return ret;
1698
1699err_free_rd_ring_1:
1700 velocity_free_rd_ring(vptr);
1701err_free_dma_rings_0:
1702 velocity_free_dma_rings(vptr);
1703 goto out;
1704}
1705
1706/**
1707 * velocity_free_tx_buf - free transmit buffer
1708 * @vptr: velocity
1709 * @tdinfo: buffer
1710 *
1711 * Release an transmit buffer. If the buffer was preallocated then
1712 * recycle it, if not then unmap the buffer.
1713 */
Simon Kagstromc79992f2009-11-25 22:10:43 +00001714static void velocity_free_tx_buf(struct velocity_info *vptr,
1715 struct velocity_td_info *tdinfo, struct tx_desc *td)
Dave Jones2cf71d22009-07-23 18:11:12 -07001716{
1717 struct sk_buff *skb = tdinfo->skb;
Dave Jones2cf71d22009-07-23 18:11:12 -07001718
1719 /*
1720 * Don't unmap the pre-allocated tx_bufs
1721 */
1722 if (tdinfo->skb_dma) {
Simon Kagstromc79992f2009-11-25 22:10:43 +00001723 int i;
Dave Jones2cf71d22009-07-23 18:11:12 -07001724
Dave Jones2cf71d22009-07-23 18:11:12 -07001725 for (i = 0; i < tdinfo->nskb_dma; i++) {
Simon Kagstromc79992f2009-11-25 22:10:43 +00001726 size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1727
1728 /* For scatter-gather */
1729 if (skb_shinfo(skb)->nr_frags > 0)
1730 pktlen = max_t(size_t, pktlen,
1731 td->td_buf[i].size & ~TD_QUEUE);
1732
1733 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1734 le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
Dave Jones2cf71d22009-07-23 18:11:12 -07001735 }
1736 }
1737 dev_kfree_skb_irq(skb);
1738 tdinfo->skb = NULL;
1739}
1740
1741
1742/*
1743 * FIXME: could we merge this with velocity_free_tx_buf ?
1744 */
1745static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1746 int q, int n)
1747{
1748 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1749 int i;
1750
1751 if (td_info == NULL)
1752 return;
1753
1754 if (td_info->skb) {
1755 for (i = 0; i < td_info->nskb_dma; i++) {
1756 if (td_info->skb_dma[i]) {
1757 pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1758 td_info->skb->len, PCI_DMA_TODEVICE);
1759 td_info->skb_dma[i] = 0;
1760 }
1761 }
1762 dev_kfree_skb(td_info->skb);
1763 td_info->skb = NULL;
1764 }
1765}
1766
1767/**
1768 * velocity_free_td_ring - free td ring
1769 * @vptr: velocity
1770 *
1771 * Free up the transmit ring for this particular velocity adapter.
1772 * We free the ring contents but not the ring itself.
1773 */
1774static void velocity_free_td_ring(struct velocity_info *vptr)
1775{
1776 int i, j;
1777
1778 for (j = 0; j < vptr->tx.numq; j++) {
1779 if (vptr->tx.infos[j] == NULL)
1780 continue;
1781 for (i = 0; i < vptr->options.numtx; i++)
1782 velocity_free_td_ring_entry(vptr, j, i);
1783
1784 kfree(vptr->tx.infos[j]);
1785 vptr->tx.infos[j] = NULL;
1786 }
1787}
1788
1789
1790static void velocity_free_rings(struct velocity_info *vptr)
1791{
1792 velocity_free_td_ring(vptr);
1793 velocity_free_rd_ring(vptr);
1794 velocity_free_dma_rings(vptr);
1795}
1796
1797/**
1798 * velocity_error - handle error from controller
1799 * @vptr: velocity
1800 * @status: card status
1801 *
1802 * Process an error report from the hardware and attempt to recover
1803 * the card itself. At the moment we cannot recover from some
1804 * theoretically impossible errors but this could be fixed using
1805 * the pci_device_failed logic to bounce the hardware
1806 *
1807 */
1808static void velocity_error(struct velocity_info *vptr, int status)
1809{
1810
1811 if (status & ISR_TXSTLI) {
1812 struct mac_regs __iomem *regs = vptr->mac_regs;
1813
1814 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1815 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1816 writew(TRDCSR_RUN, &regs->TDCSRClr);
1817 netif_stop_queue(vptr->dev);
1818
1819 /* FIXME: port over the pci_device_failed code and use it
1820 here */
1821 }
1822
1823 if (status & ISR_SRCI) {
1824 struct mac_regs __iomem *regs = vptr->mac_regs;
1825 int linked;
1826
1827 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1828 vptr->mii_status = check_connection_type(regs);
1829
1830 /*
1831 * If it is a 3119, disable frame bursting in
1832 * halfduplex mode and enable it in fullduplex
1833 * mode
1834 */
1835 if (vptr->rev_id < REV_ID_VT3216_A0) {
David S. Miller6cdee2f2009-09-02 00:32:56 -07001836 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
Dave Jones2cf71d22009-07-23 18:11:12 -07001837 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1838 else
1839 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1840 }
1841 /*
1842 * Only enable CD heart beat counter in 10HD mode
1843 */
1844 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1845 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1846 else
1847 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
Simon Kagstrom6dfc4b92009-11-25 22:10:12 +00001848
1849 setup_queue_timers(vptr);
Dave Jones2cf71d22009-07-23 18:11:12 -07001850 }
1851 /*
1852 * Get link status from PHYSR0
1853 */
1854 linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1855
1856 if (linked) {
1857 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1858 netif_carrier_on(vptr->dev);
1859 } else {
1860 vptr->mii_status |= VELOCITY_LINK_FAIL;
1861 netif_carrier_off(vptr->dev);
1862 }
1863
1864 velocity_print_link_status(vptr);
1865 enable_flow_control_ability(vptr);
1866
1867 /*
1868 * Re-enable auto-polling because SRCI will disable
1869 * auto-polling
1870 */
1871
1872 enable_mii_autopoll(regs);
1873
1874 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1875 netif_stop_queue(vptr->dev);
1876 else
1877 netif_wake_queue(vptr->dev);
1878
1879 };
1880 if (status & ISR_MIBFI)
1881 velocity_update_hw_mibs(vptr);
1882 if (status & ISR_LSTEI)
1883 mac_rx_queue_wake(vptr->mac_regs);
1884}
1885
1886/**
1887 * tx_srv - transmit interrupt service
1888 * @vptr; Velocity
1889 * @status:
1890 *
1891 * Scan the queues looking for transmitted packets that
1892 * we can complete and clean up. Update any statistics as
1893 * necessary/
1894 */
1895static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1896{
1897 struct tx_desc *td;
1898 int qnum;
1899 int full = 0;
1900 int idx;
1901 int works = 0;
1902 struct velocity_td_info *tdinfo;
1903 struct net_device_stats *stats = &vptr->dev->stats;
1904
1905 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1906 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1907 idx = (idx + 1) % vptr->options.numtx) {
1908
1909 /*
1910 * Get Tx Descriptor
1911 */
1912 td = &(vptr->tx.rings[qnum][idx]);
1913 tdinfo = &(vptr->tx.infos[qnum][idx]);
1914
1915 if (td->tdesc0.len & OWNED_BY_NIC)
1916 break;
1917
1918 if ((works++ > 15))
1919 break;
1920
1921 if (td->tdesc0.TSR & TSR0_TERR) {
1922 stats->tx_errors++;
1923 stats->tx_dropped++;
1924 if (td->tdesc0.TSR & TSR0_CDH)
1925 stats->tx_heartbeat_errors++;
1926 if (td->tdesc0.TSR & TSR0_CRS)
1927 stats->tx_carrier_errors++;
1928 if (td->tdesc0.TSR & TSR0_ABT)
1929 stats->tx_aborted_errors++;
1930 if (td->tdesc0.TSR & TSR0_OWC)
1931 stats->tx_window_errors++;
1932 } else {
1933 stats->tx_packets++;
1934 stats->tx_bytes += tdinfo->skb->len;
1935 }
Simon Kagstromc79992f2009-11-25 22:10:43 +00001936 velocity_free_tx_buf(vptr, tdinfo, td);
Dave Jones2cf71d22009-07-23 18:11:12 -07001937 vptr->tx.used[qnum]--;
1938 }
1939 vptr->tx.tail[qnum] = idx;
1940
1941 if (AVAIL_TD(vptr, qnum) < 1)
1942 full = 1;
1943 }
1944 /*
1945 * Look to see if we should kick the transmit network
1946 * layer for more work.
1947 */
1948 if (netif_queue_stopped(vptr->dev) && (full == 0)
1949 && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1950 netif_wake_queue(vptr->dev);
1951 }
1952 return works;
1953}
1954
1955/**
1956 * velocity_rx_csum - checksum process
1957 * @rd: receive packet descriptor
1958 * @skb: network layer packet buffer
1959 *
1960 * Process the status bits for the received packet and determine
1961 * if the checksum was computed and verified by the hardware
1962 */
1963static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1964{
1965 skb->ip_summed = CHECKSUM_NONE;
1966
1967 if (rd->rdesc1.CSM & CSM_IPKT) {
1968 if (rd->rdesc1.CSM & CSM_IPOK) {
1969 if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1970 (rd->rdesc1.CSM & CSM_UDPKT)) {
1971 if (!(rd->rdesc1.CSM & CSM_TUPOK))
1972 return;
1973 }
1974 skb->ip_summed = CHECKSUM_UNNECESSARY;
1975 }
1976 }
1977}
1978
1979/**
1980 * velocity_rx_copy - in place Rx copy for small packets
1981 * @rx_skb: network layer packet buffer candidate
1982 * @pkt_size: received data size
1983 * @rd: receive packet descriptor
1984 * @dev: network device
1985 *
1986 * Replace the current skb that is scheduled for Rx processing by a
1987 * shorter, immediatly allocated skb, if the received packet is small
1988 * enough. This function returns a negative value if the received
1989 * packet is too big or if memory is exhausted.
1990 */
1991static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1992 struct velocity_info *vptr)
1993{
1994 int ret = -1;
1995 if (pkt_size < rx_copybreak) {
1996 struct sk_buff *new_skb;
1997
Eric Dumazet89d71a62009-10-13 05:34:20 +00001998 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
Dave Jones2cf71d22009-07-23 18:11:12 -07001999 if (new_skb) {
2000 new_skb->ip_summed = rx_skb[0]->ip_summed;
Dave Jones2cf71d22009-07-23 18:11:12 -07002001 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2002 *rx_skb = new_skb;
2003 ret = 0;
2004 }
2005
2006 }
2007 return ret;
2008}
2009
2010/**
2011 * velocity_iph_realign - IP header alignment
2012 * @vptr: velocity we are handling
2013 * @skb: network layer packet buffer
2014 * @pkt_size: received data size
2015 *
2016 * Align IP header on a 2 bytes boundary. This behavior can be
2017 * configured by the user.
2018 */
2019static inline void velocity_iph_realign(struct velocity_info *vptr,
2020 struct sk_buff *skb, int pkt_size)
2021{
2022 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2023 memmove(skb->data + 2, skb->data, pkt_size);
2024 skb_reserve(skb, 2);
2025 }
2026}
2027
2028
2029/**
2030 * velocity_receive_frame - received packet processor
2031 * @vptr: velocity we are handling
2032 * @idx: ring index
2033 *
2034 * A packet has arrived. We process the packet and if appropriate
2035 * pass the frame up the network stack
2036 */
2037static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2038{
2039 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2040 struct net_device_stats *stats = &vptr->dev->stats;
2041 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2042 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2043 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2044 struct sk_buff *skb;
2045
2046 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2047 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2048 stats->rx_length_errors++;
2049 return -EINVAL;
2050 }
2051
2052 if (rd->rdesc0.RSR & RSR_MAR)
2053 stats->multicast++;
2054
2055 skb = rd_info->skb;
2056
2057 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2058 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2059
2060 /*
2061 * Drop frame not meeting IEEE 802.3
2062 */
2063
2064 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2065 if (rd->rdesc0.RSR & RSR_RL) {
2066 stats->rx_length_errors++;
2067 return -EINVAL;
2068 }
2069 }
2070
2071 pci_action = pci_dma_sync_single_for_device;
2072
2073 velocity_rx_csum(rd, skb);
2074
2075 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2076 velocity_iph_realign(vptr, skb, pkt_len);
2077 pci_action = pci_unmap_single;
2078 rd_info->skb = NULL;
2079 }
2080
2081 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2082 PCI_DMA_FROMDEVICE);
2083
2084 skb_put(skb, pkt_len - 4);
2085 skb->protocol = eth_type_trans(skb, vptr->dev);
2086
2087 if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
2088 vlan_hwaccel_rx(skb, vptr->vlgrp,
2089 swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
2090 } else
2091 netif_rx(skb);
2092
2093 stats->rx_bytes += pkt_len;
2094
2095 return 0;
2096}
2097
2098
2099/**
2100 * velocity_rx_srv - service RX interrupt
2101 * @vptr: velocity
2102 * @status: adapter status (unused)
2103 *
2104 * Walk the receive ring of the velocity adapter and remove
2105 * any received packets from the receive queue. Hand the ring
2106 * slots back to the adapter for reuse.
2107 */
Simon Kagstromdfff7142009-11-25 22:10:26 +00002108static int velocity_rx_srv(struct velocity_info *vptr, int status,
2109 int budget_left)
Dave Jones2cf71d22009-07-23 18:11:12 -07002110{
2111 struct net_device_stats *stats = &vptr->dev->stats;
2112 int rd_curr = vptr->rx.curr;
2113 int works = 0;
2114
Simon Kagstromdfff7142009-11-25 22:10:26 +00002115 while (works < budget_left) {
Dave Jones2cf71d22009-07-23 18:11:12 -07002116 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2117
2118 if (!vptr->rx.info[rd_curr].skb)
2119 break;
2120
2121 if (rd->rdesc0.len & OWNED_BY_NIC)
2122 break;
2123
2124 rmb();
2125
2126 /*
2127 * Don't drop CE or RL error frame although RXOK is off
2128 */
2129 if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2130 if (velocity_receive_frame(vptr, rd_curr) < 0)
2131 stats->rx_dropped++;
2132 } else {
2133 if (rd->rdesc0.RSR & RSR_CRC)
2134 stats->rx_crc_errors++;
2135 if (rd->rdesc0.RSR & RSR_FAE)
2136 stats->rx_frame_errors++;
2137
2138 stats->rx_dropped++;
2139 }
2140
2141 rd->size |= RX_INTEN;
2142
2143 rd_curr++;
2144 if (rd_curr >= vptr->options.numrx)
2145 rd_curr = 0;
Simon Kagstromdfff7142009-11-25 22:10:26 +00002146 works++;
2147 }
Dave Jones2cf71d22009-07-23 18:11:12 -07002148
2149 vptr->rx.curr = rd_curr;
2150
2151 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2152 velocity_give_many_rx_descs(vptr);
2153
2154 VAR_USED(stats);
2155 return works;
2156}
2157
Simon Kagstromdfff7142009-11-25 22:10:26 +00002158static int velocity_poll(struct napi_struct *napi, int budget)
2159{
2160 struct velocity_info *vptr = container_of(napi,
2161 struct velocity_info, napi);
2162 unsigned int rx_done;
2163 u32 isr_status;
2164
2165 spin_lock(&vptr->lock);
2166 isr_status = mac_read_isr(vptr->mac_regs);
2167
2168 /* Ack the interrupt */
2169 mac_write_isr(vptr->mac_regs, isr_status);
2170 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2171 velocity_error(vptr, isr_status);
2172
2173 /*
2174 * Do rx and tx twice for performance (taken from the VIA
2175 * out-of-tree driver).
2176 */
2177 rx_done = velocity_rx_srv(vptr, isr_status, budget / 2);
2178 velocity_tx_srv(vptr, isr_status);
2179 rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done);
2180 velocity_tx_srv(vptr, isr_status);
2181
2182 spin_unlock(&vptr->lock);
2183
2184 /* If budget not fully consumed, exit the polling mode */
2185 if (rx_done < budget) {
2186 napi_complete(napi);
2187 mac_enable_int(vptr->mac_regs);
2188 }
2189
2190 return rx_done;
2191}
Dave Jones2cf71d22009-07-23 18:11:12 -07002192
2193/**
2194 * velocity_intr - interrupt callback
2195 * @irq: interrupt number
2196 * @dev_instance: interrupting device
2197 *
2198 * Called whenever an interrupt is generated by the velocity
2199 * adapter IRQ line. We may not be the source of the interrupt
2200 * and need to identify initially if we are, and if not exit as
2201 * efficiently as possible.
2202 */
2203static irqreturn_t velocity_intr(int irq, void *dev_instance)
2204{
2205 struct net_device *dev = dev_instance;
2206 struct velocity_info *vptr = netdev_priv(dev);
2207 u32 isr_status;
Dave Jones2cf71d22009-07-23 18:11:12 -07002208
2209 spin_lock(&vptr->lock);
2210 isr_status = mac_read_isr(vptr->mac_regs);
2211
2212 /* Not us ? */
2213 if (isr_status == 0) {
2214 spin_unlock(&vptr->lock);
2215 return IRQ_NONE;
2216 }
2217
Simon Kagstromdfff7142009-11-25 22:10:26 +00002218 if (likely(napi_schedule_prep(&vptr->napi))) {
2219 mac_disable_int(vptr->mac_regs);
2220 __napi_schedule(&vptr->napi);
Dave Jones2cf71d22009-07-23 18:11:12 -07002221 }
2222 spin_unlock(&vptr->lock);
Dave Jones2cf71d22009-07-23 18:11:12 -07002223
Simon Kagstromdfff7142009-11-25 22:10:26 +00002224 return IRQ_HANDLED;
Dave Jones2cf71d22009-07-23 18:11:12 -07002225}
2226
2227/**
2228 * velocity_open - interface activation callback
2229 * @dev: network layer device to open
2230 *
2231 * Called when the network layer brings the interface up. Returns
2232 * a negative posix error code on failure, or zero on success.
2233 *
2234 * All the ring allocation and set up is done on open for this
2235 * adapter to minimise memory usage when inactive
2236 */
2237static int velocity_open(struct net_device *dev)
2238{
2239 struct velocity_info *vptr = netdev_priv(dev);
2240 int ret;
2241
2242 ret = velocity_init_rings(vptr, dev->mtu);
2243 if (ret < 0)
2244 goto out;
2245
2246 /* Ensure chip is running */
2247 pci_set_power_state(vptr->pdev, PCI_D0);
2248
2249 velocity_give_many_rx_descs(vptr);
2250
2251 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2252
Julia Lawall1ede9b52009-11-18 08:24:13 +00002253 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
Dave Jones2cf71d22009-07-23 18:11:12 -07002254 dev->name, dev);
2255 if (ret < 0) {
2256 /* Power down the chip */
2257 pci_set_power_state(vptr->pdev, PCI_D3hot);
2258 velocity_free_rings(vptr);
2259 goto out;
2260 }
2261
2262 mac_enable_int(vptr->mac_regs);
2263 netif_start_queue(dev);
Simon Kagstromdfff7142009-11-25 22:10:26 +00002264 napi_enable(&vptr->napi);
Dave Jones2cf71d22009-07-23 18:11:12 -07002265 vptr->flags |= VELOCITY_FLAGS_OPENED;
2266out:
2267 return ret;
2268}
2269
2270/**
2271 * velocity_shutdown - shut down the chip
2272 * @vptr: velocity to deactivate
2273 *
2274 * Shuts down the internal operations of the velocity and
2275 * disables interrupts, autopolling, transmit and receive
2276 */
2277static void velocity_shutdown(struct velocity_info *vptr)
2278{
2279 struct mac_regs __iomem *regs = vptr->mac_regs;
2280 mac_disable_int(regs);
2281 writel(CR0_STOP, &regs->CR0Set);
2282 writew(0xFFFF, &regs->TDCSRClr);
2283 writeb(0xFF, &regs->RDCSRClr);
2284 safe_disable_mii_autopoll(regs);
2285 mac_clear_isr(regs);
2286}
2287
2288/**
2289 * velocity_change_mtu - MTU change callback
2290 * @dev: network device
2291 * @new_mtu: desired MTU
2292 *
2293 * Handle requests from the networking layer for MTU change on
2294 * this interface. It gets called on a change by the network layer.
2295 * Return zero for success or negative posix error code.
2296 */
2297static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2298{
2299 struct velocity_info *vptr = netdev_priv(dev);
2300 int ret = 0;
2301
2302 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2303 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2304 vptr->dev->name);
2305 ret = -EINVAL;
2306 goto out_0;
2307 }
2308
2309 if (!netif_running(dev)) {
2310 dev->mtu = new_mtu;
2311 goto out_0;
2312 }
2313
2314 if (dev->mtu != new_mtu) {
2315 struct velocity_info *tmp_vptr;
2316 unsigned long flags;
2317 struct rx_info rx;
2318 struct tx_info tx;
2319
2320 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2321 if (!tmp_vptr) {
2322 ret = -ENOMEM;
2323 goto out_0;
2324 }
2325
2326 tmp_vptr->dev = dev;
2327 tmp_vptr->pdev = vptr->pdev;
2328 tmp_vptr->options = vptr->options;
2329 tmp_vptr->tx.numq = vptr->tx.numq;
2330
2331 ret = velocity_init_rings(tmp_vptr, new_mtu);
2332 if (ret < 0)
2333 goto out_free_tmp_vptr_1;
2334
2335 spin_lock_irqsave(&vptr->lock, flags);
2336
2337 netif_stop_queue(dev);
2338 velocity_shutdown(vptr);
2339
2340 rx = vptr->rx;
2341 tx = vptr->tx;
2342
2343 vptr->rx = tmp_vptr->rx;
2344 vptr->tx = tmp_vptr->tx;
2345
2346 tmp_vptr->rx = rx;
2347 tmp_vptr->tx = tx;
2348
2349 dev->mtu = new_mtu;
2350
2351 velocity_give_many_rx_descs(vptr);
2352
2353 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2354
2355 mac_enable_int(vptr->mac_regs);
2356 netif_start_queue(dev);
2357
2358 spin_unlock_irqrestore(&vptr->lock, flags);
2359
2360 velocity_free_rings(tmp_vptr);
2361
2362out_free_tmp_vptr_1:
2363 kfree(tmp_vptr);
2364 }
2365out_0:
2366 return ret;
2367}
2368
2369/**
2370 * velocity_mii_ioctl - MII ioctl handler
2371 * @dev: network device
2372 * @ifr: the ifreq block for the ioctl
2373 * @cmd: the command
2374 *
2375 * Process MII requests made via ioctl from the network layer. These
2376 * are used by tools like kudzu to interrogate the link state of the
2377 * hardware
2378 */
2379static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2380{
2381 struct velocity_info *vptr = netdev_priv(dev);
2382 struct mac_regs __iomem *regs = vptr->mac_regs;
2383 unsigned long flags;
2384 struct mii_ioctl_data *miidata = if_mii(ifr);
2385 int err;
2386
2387 switch (cmd) {
2388 case SIOCGMIIPHY:
2389 miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2390 break;
2391 case SIOCGMIIREG:
Dave Jones2cf71d22009-07-23 18:11:12 -07002392 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2393 return -ETIMEDOUT;
2394 break;
2395 case SIOCSMIIREG:
Dave Jones2cf71d22009-07-23 18:11:12 -07002396 spin_lock_irqsave(&vptr->lock, flags);
2397 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2398 spin_unlock_irqrestore(&vptr->lock, flags);
2399 check_connection_type(vptr->mac_regs);
2400 if (err)
2401 return err;
2402 break;
2403 default:
2404 return -EOPNOTSUPP;
2405 }
2406 return 0;
2407}
2408
2409
2410/**
2411 * velocity_ioctl - ioctl entry point
2412 * @dev: network device
2413 * @rq: interface request ioctl
2414 * @cmd: command code
2415 *
2416 * Called when the user issues an ioctl request to the network
2417 * device in question. The velocity interface supports MII.
2418 */
2419static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2420{
2421 struct velocity_info *vptr = netdev_priv(dev);
2422 int ret;
2423
2424 /* If we are asked for information and the device is power
2425 saving then we need to bring the device back up to talk to it */
2426
2427 if (!netif_running(dev))
2428 pci_set_power_state(vptr->pdev, PCI_D0);
2429
2430 switch (cmd) {
2431 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2432 case SIOCGMIIREG: /* Read MII PHY register. */
2433 case SIOCSMIIREG: /* Write to MII PHY register. */
2434 ret = velocity_mii_ioctl(dev, rq, cmd);
2435 break;
2436
2437 default:
2438 ret = -EOPNOTSUPP;
2439 }
2440 if (!netif_running(dev))
2441 pci_set_power_state(vptr->pdev, PCI_D3hot);
2442
2443
2444 return ret;
2445}
2446
2447/**
2448 * velocity_get_status - statistics callback
2449 * @dev: network device
2450 *
2451 * Callback from the network layer to allow driver statistics
2452 * to be resynchronized with hardware collected state. In the
2453 * case of the velocity we need to pull the MIB counters from
2454 * the hardware into the counters before letting the network
2455 * layer display them.
2456 */
2457static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2458{
2459 struct velocity_info *vptr = netdev_priv(dev);
2460
2461 /* If the hardware is down, don't touch MII */
2462 if (!netif_running(dev))
2463 return &dev->stats;
2464
2465 spin_lock_irq(&vptr->lock);
2466 velocity_update_hw_mibs(vptr);
2467 spin_unlock_irq(&vptr->lock);
2468
2469 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2470 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2471 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2472
2473// unsigned long rx_dropped; /* no space in linux buffers */
2474 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2475 /* detailed rx_errors: */
2476// unsigned long rx_length_errors;
2477// unsigned long rx_over_errors; /* receiver ring buff overflow */
2478 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2479// unsigned long rx_frame_errors; /* recv'd frame alignment error */
2480// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
2481// unsigned long rx_missed_errors; /* receiver missed packet */
2482
2483 /* detailed tx_errors */
2484// unsigned long tx_fifo_errors;
2485
2486 return &dev->stats;
2487}
2488
2489/**
2490 * velocity_close - close adapter callback
2491 * @dev: network device
2492 *
2493 * Callback from the network layer when the velocity is being
2494 * deactivated by the network layer
2495 */
2496static int velocity_close(struct net_device *dev)
2497{
2498 struct velocity_info *vptr = netdev_priv(dev);
2499
Simon Kagstromdfff7142009-11-25 22:10:26 +00002500 napi_disable(&vptr->napi);
Dave Jones2cf71d22009-07-23 18:11:12 -07002501 netif_stop_queue(dev);
2502 velocity_shutdown(vptr);
2503
2504 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2505 velocity_get_ip(vptr);
2506 if (dev->irq != 0)
2507 free_irq(dev->irq, dev);
2508
2509 /* Power down the chip */
2510 pci_set_power_state(vptr->pdev, PCI_D3hot);
2511
2512 velocity_free_rings(vptr);
2513
2514 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2515 return 0;
2516}
2517
2518/**
2519 * velocity_xmit - transmit packet callback
2520 * @skb: buffer to transmit
2521 * @dev: network device
2522 *
2523 * Called by the networ layer to request a packet is queued to
2524 * the velocity. Returns zero on success.
2525 */
Stephen Hemminger613573252009-08-31 19:50:58 +00002526static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2527 struct net_device *dev)
Dave Jones2cf71d22009-07-23 18:11:12 -07002528{
2529 struct velocity_info *vptr = netdev_priv(dev);
2530 int qnum = 0;
2531 struct tx_desc *td_ptr;
2532 struct velocity_td_info *tdinfo;
2533 unsigned long flags;
2534 int pktlen;
Simon Kagstromc79992f2009-11-25 22:10:43 +00002535 int index, prev;
2536 int i = 0;
Dave Jones2cf71d22009-07-23 18:11:12 -07002537
2538 if (skb_padto(skb, ETH_ZLEN))
2539 goto out;
Dave Jones2cf71d22009-07-23 18:11:12 -07002540
Simon Kagstromc79992f2009-11-25 22:10:43 +00002541 /* The hardware can handle at most 7 memory segments, so merge
2542 * the skb if there are more */
2543 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2544 kfree_skb(skb);
2545 return NETDEV_TX_OK;
2546 }
2547
2548 pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2549 max_t(unsigned int, skb->len, ETH_ZLEN) :
2550 skb_headlen(skb);
Dave Jones2cf71d22009-07-23 18:11:12 -07002551
2552 spin_lock_irqsave(&vptr->lock, flags);
2553
2554 index = vptr->tx.curr[qnum];
2555 td_ptr = &(vptr->tx.rings[qnum][index]);
2556 tdinfo = &(vptr->tx.infos[qnum][index]);
2557
2558 td_ptr->tdesc1.TCR = TCR0_TIC;
2559 td_ptr->td_buf[0].size &= ~TD_QUEUE;
2560
2561 /*
2562 * Map the linear network buffer into PCI space and
2563 * add it to the transmit ring.
2564 */
2565 tdinfo->skb = skb;
2566 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
Simon Kagstromc79992f2009-11-25 22:10:43 +00002567 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
Dave Jones2cf71d22009-07-23 18:11:12 -07002568 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2569 td_ptr->td_buf[0].pa_high = 0;
Simon Kagstromc79992f2009-11-25 22:10:43 +00002570 td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2571
2572 /* Handle fragments */
2573 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2574 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2575
2576 tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
2577 frag->page_offset, frag->size,
2578 PCI_DMA_TODEVICE);
2579
2580 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2581 td_ptr->td_buf[i + 1].pa_high = 0;
2582 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
2583 }
2584 tdinfo->nskb_dma = i + 1;
Dave Jones2cf71d22009-07-23 18:11:12 -07002585
2586 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2587
2588 if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
2589 td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2590 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2591 }
2592
2593 /*
2594 * Handle hardware checksum
2595 */
2596 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2597 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2598 const struct iphdr *ip = ip_hdr(skb);
2599 if (ip->protocol == IPPROTO_TCP)
2600 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2601 else if (ip->protocol == IPPROTO_UDP)
2602 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2603 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2604 }
Dave Jones2cf71d22009-07-23 18:11:12 -07002605
Simon Kagstromc79992f2009-11-25 22:10:43 +00002606 prev = index - 1;
2607 if (prev < 0)
2608 prev = vptr->options.numtx - 1;
2609 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2610 vptr->tx.used[qnum]++;
2611 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
Dave Jones2cf71d22009-07-23 18:11:12 -07002612
Simon Kagstromc79992f2009-11-25 22:10:43 +00002613 if (AVAIL_TD(vptr, qnum) < 1)
2614 netif_stop_queue(dev);
Dave Jones2cf71d22009-07-23 18:11:12 -07002615
Simon Kagstromc79992f2009-11-25 22:10:43 +00002616 td_ptr = &(vptr->tx.rings[qnum][prev]);
2617 td_ptr->td_buf[0].size |= TD_QUEUE;
2618 mac_tx_queue_wake(vptr->mac_regs, qnum);
Dave Jones2cf71d22009-07-23 18:11:12 -07002619
Dave Jones2cf71d22009-07-23 18:11:12 -07002620 dev->trans_start = jiffies;
2621 spin_unlock_irqrestore(&vptr->lock, flags);
2622out:
2623 return NETDEV_TX_OK;
2624}
2625
2626
Stephen Hemminger39a11bd2008-11-19 22:19:33 -08002627static const struct net_device_ops velocity_netdev_ops = {
2628 .ndo_open = velocity_open,
2629 .ndo_stop = velocity_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08002630 .ndo_start_xmit = velocity_xmit,
Stephen Hemminger39a11bd2008-11-19 22:19:33 -08002631 .ndo_get_stats = velocity_get_stats,
2632 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +00002633 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger39a11bd2008-11-19 22:19:33 -08002634 .ndo_set_multicast_list = velocity_set_multi,
2635 .ndo_change_mtu = velocity_change_mtu,
2636 .ndo_do_ioctl = velocity_ioctl,
2637 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2638 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2639 .ndo_vlan_rx_register = velocity_vlan_rx_register,
2640};
2641
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642/**
Dave Jones2cf71d22009-07-23 18:11:12 -07002643 * velocity_init_info - init private data
2644 * @pdev: PCI device
2645 * @vptr: Velocity info
2646 * @info: Board type
2647 *
2648 * Set up the initial velocity_info struct for the device that has been
2649 * discovered.
2650 */
2651static void __devinit velocity_init_info(struct pci_dev *pdev,
2652 struct velocity_info *vptr,
2653 const struct velocity_info_tbl *info)
2654{
2655 memset(vptr, 0, sizeof(struct velocity_info));
2656
2657 vptr->pdev = pdev;
2658 vptr->chip_id = info->chip_id;
2659 vptr->tx.numq = info->txqueue;
2660 vptr->multicast_limit = MCAM_SIZE;
2661 spin_lock_init(&vptr->lock);
Dave Jones2cf71d22009-07-23 18:11:12 -07002662}
2663
2664/**
2665 * velocity_get_pci_info - retrieve PCI info for device
2666 * @vptr: velocity device
2667 * @pdev: PCI device it matches
2668 *
2669 * Retrieve the PCI configuration space data that interests us from
2670 * the kernel PCI layer
2671 */
2672static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
2673{
2674 vptr->rev_id = pdev->revision;
2675
2676 pci_set_master(pdev);
2677
2678 vptr->ioaddr = pci_resource_start(pdev, 0);
2679 vptr->memaddr = pci_resource_start(pdev, 1);
2680
2681 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2682 dev_err(&pdev->dev,
2683 "region #0 is not an I/O resource, aborting.\n");
2684 return -EINVAL;
2685 }
2686
2687 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2688 dev_err(&pdev->dev,
2689 "region #1 is an I/O resource, aborting.\n");
2690 return -EINVAL;
2691 }
2692
2693 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2694 dev_err(&pdev->dev, "region #1 is too small.\n");
2695 return -EINVAL;
2696 }
2697 vptr->pdev = pdev;
2698
2699 return 0;
2700}
2701
2702/**
2703 * velocity_print_info - per driver data
2704 * @vptr: velocity
2705 *
2706 * Print per driver data as the kernel driver finds Velocity
2707 * hardware
2708 */
2709static void __devinit velocity_print_info(struct velocity_info *vptr)
2710{
2711 struct net_device *dev = vptr->dev;
2712
2713 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2714 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
2715 dev->name,
2716 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2717 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2718}
2719
2720static u32 velocity_get_link(struct net_device *dev)
2721{
2722 struct velocity_info *vptr = netdev_priv(dev);
2723 struct mac_regs __iomem *regs = vptr->mac_regs;
2724 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2725}
2726
2727
2728/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 * velocity_found1 - set up discovered velocity card
2730 * @pdev: PCI device
2731 * @ent: PCI device table entry that matched
2732 *
2733 * Configure a discovered adapter from scratch. Return a negative
2734 * errno error code on failure paths.
2735 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
2737{
2738 static int first = 1;
2739 struct net_device *dev;
2740 int i;
Sven Hartge07b5f6a2008-10-23 13:03:44 +00002741 const char *drv_string;
Jeff Garzikcabb7662006-06-27 09:25:28 -04002742 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 struct velocity_info *vptr;
Dave Jonesc4067402009-07-20 17:35:21 +00002744 struct mac_regs __iomem *regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 int ret = -ENOMEM;
2746
Jeff Garzike54f4892006-06-27 09:20:08 -04002747 /* FIXME: this driver, like almost all other ethernet drivers,
2748 * can support more than MAX_UNITS.
2749 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 if (velocity_nics >= MAX_UNITS) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002751 dev_notice(&pdev->dev, "already found %d NICs.\n",
Jeff Garzike54f4892006-06-27 09:20:08 -04002752 velocity_nics);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 return -ENODEV;
2754 }
2755
2756 dev = alloc_etherdev(sizeof(struct velocity_info));
Jeff Garzike54f4892006-06-27 09:20:08 -04002757 if (!dev) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04002758 dev_err(&pdev->dev, "allocate net device failed.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 goto out;
2760 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002761
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 /* Chain it all together */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002763
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 SET_NETDEV_DEV(dev, &pdev->dev);
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04002765 vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
2767
2768 if (first) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002769 printk(KERN_INFO "%s Ver. %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2771 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2772 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2773 first = 0;
2774 }
2775
2776 velocity_init_info(pdev, vptr, info);
2777
2778 vptr->dev = dev;
2779
2780 dev->irq = pdev->irq;
2781
2782 ret = pci_enable_device(pdev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002783 if (ret < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 goto err_free_dev;
2785
2786 ret = velocity_get_pci_info(vptr, pdev);
2787 if (ret < 0) {
Jeff Garzike54f4892006-06-27 09:20:08 -04002788 /* error message already printed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 goto err_disable;
2790 }
2791
2792 ret = pci_request_regions(pdev, VELOCITY_NAME);
2793 if (ret < 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04002794 dev_err(&pdev->dev, "No PCI resources.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 goto err_disable;
2796 }
2797
Jeff Garzikcabb7662006-06-27 09:25:28 -04002798 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 if (regs == NULL) {
2800 ret = -EIO;
2801 goto err_release_res;
2802 }
2803
2804 vptr->mac_regs = regs;
2805
2806 mac_wol_reset(regs);
2807
2808 dev->base_addr = vptr->ioaddr;
2809
2810 for (i = 0; i < 6; i++)
2811 dev->dev_addr[i] = readb(&regs->PAR[i]);
2812
2813
Sven Hartge07b5f6a2008-10-23 13:03:44 +00002814 drv_string = dev_driver_string(&pdev->dev);
2815
2816 velocity_get_options(&vptr->options, velocity_nics, drv_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002818 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 * Mask out the options cannot be set to the chip
2820 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002821
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 vptr->options.flags &= info->flags;
2823
2824 /*
2825 * Enable the chip specified capbilities
2826 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002827
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2829
2830 vptr->wol_opts = vptr->options.wol_opts;
2831 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2832
2833 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2834
2835 dev->irq = pdev->irq;
Stephen Hemminger39a11bd2008-11-19 22:19:33 -08002836 dev->netdev_ops = &velocity_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 dev->ethtool_ops = &velocity_ethtool_ops;
Simon Kagstromdfff7142009-11-25 22:10:26 +00002838 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
Stephen Hemminger501e4d22007-08-24 13:56:49 -07002839
Francois Romieud4f73c82008-04-24 23:32:33 +02002840 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2841 NETIF_F_HW_VLAN_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842
Stephen Hemminger501e4d22007-08-24 13:56:49 -07002843 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
John W. Linville9f3f46b2005-12-09 10:36:09 -05002844 dev->features |= NETIF_F_IP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846 ret = register_netdev(dev);
2847 if (ret < 0)
2848 goto err_iounmap;
2849
Séguier Régisd3b238a2009-06-16 11:25:49 +00002850 if (!velocity_get_link(dev)) {
Francois Romieu8a22ddd2006-06-23 00:47:06 +02002851 netif_carrier_off(dev);
Séguier Régisd3b238a2009-06-16 11:25:49 +00002852 vptr->mii_status |= VELOCITY_LINK_FAIL;
2853 }
Francois Romieu8a22ddd2006-06-23 00:47:06 +02002854
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 velocity_print_info(vptr);
2856 pci_set_drvdata(pdev, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 /* and leave the chip powered down */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002859
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 pci_set_power_state(pdev, PCI_D3hot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 velocity_nics++;
2862out:
2863 return ret;
2864
2865err_iounmap:
2866 iounmap(regs);
2867err_release_res:
2868 pci_release_regions(pdev);
2869err_disable:
2870 pci_disable_device(pdev);
2871err_free_dev:
2872 free_netdev(dev);
2873 goto out;
2874}
2875
Dave Jones2cf71d22009-07-23 18:11:12 -07002876
2877#ifdef CONFIG_PM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878/**
Dave Jones2cf71d22009-07-23 18:11:12 -07002879 * wol_calc_crc - WOL CRC
2880 * @pattern: data pattern
2881 * @mask_pattern: mask
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 *
Dave Jones2cf71d22009-07-23 18:11:12 -07002883 * Compute the wake on lan crc hashes for the packet header
2884 * we are interested in.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 */
Dave Jones2cf71d22009-07-23 18:11:12 -07002886static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887{
Dave Jones2cf71d22009-07-23 18:11:12 -07002888 u16 crc = 0xFFFF;
2889 u8 mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 int i, j;
2891
Dave Jones2cf71d22009-07-23 18:11:12 -07002892 for (i = 0; i < size; i++) {
2893 mask = mask_pattern[i];
2894
2895 /* Skip this loop if the mask equals to zero */
2896 if (mask == 0x00)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898
Dave Jones2cf71d22009-07-23 18:11:12 -07002899 for (j = 0; j < 8; j++) {
2900 if ((mask & 0x01) == 0) {
2901 mask >>= 1;
2902 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 }
Dave Jones2cf71d22009-07-23 18:11:12 -07002904 mask >>= 1;
2905 crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 }
2907 }
Dave Jones2cf71d22009-07-23 18:11:12 -07002908 /* Finally, invert the result once to get the correct data */
2909 crc = ~crc;
2910 return bitrev32(crc) >> 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911}
2912
2913/**
Dave Jones2cf71d22009-07-23 18:11:12 -07002914 * velocity_set_wol - set up for wake on lan
2915 * @vptr: velocity to set WOL status on
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 *
Dave Jones2cf71d22009-07-23 18:11:12 -07002917 * Set a card up for wake on lan either by unicast or by
2918 * ARP packet.
2919 *
2920 * FIXME: check static buffer is safe here
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 */
Dave Jones2cf71d22009-07-23 18:11:12 -07002922static int velocity_set_wol(struct velocity_info *vptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923{
Dave Jonesc4067402009-07-20 17:35:21 +00002924 struct mac_regs __iomem *regs = vptr->mac_regs;
Dave Jones2cf71d22009-07-23 18:11:12 -07002925 static u8 buf[256];
2926 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
Dave Jones2cf71d22009-07-23 18:11:12 -07002928 static u32 mask_pattern[2][4] = {
2929 {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2930 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
2931 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932
Dave Jones2cf71d22009-07-23 18:11:12 -07002933 writew(0xFFFF, &regs->WOLCRClr);
2934 writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2935 writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936
Dave Jones2cf71d22009-07-23 18:11:12 -07002937 /*
2938 if (vptr->wol_opts & VELOCITY_WOL_PHY)
2939 writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2940 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941
Dave Jones2cf71d22009-07-23 18:11:12 -07002942 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2943 writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002944
Dave Jones2cf71d22009-07-23 18:11:12 -07002945 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2946 struct arp_packet *arp = (struct arp_packet *) buf;
2947 u16 crc;
2948 memset(buf, 0, sizeof(struct arp_packet) + 7);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002949
Dave Jones2cf71d22009-07-23 18:11:12 -07002950 for (i = 0; i < 4; i++)
2951 writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952
Dave Jones2cf71d22009-07-23 18:11:12 -07002953 arp->type = htons(ETH_P_ARP);
2954 arp->ar_op = htons(1);
2955
2956 memcpy(arp->ar_tip, vptr->ip_addr, 4);
2957
2958 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2959 (u8 *) & mask_pattern[0][0]);
2960
2961 writew(crc, &regs->PatternCRC[0]);
2962 writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2963 }
2964
2965 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2966 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2967
2968 writew(0x0FFF, &regs->WOLSRClr);
2969
2970 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2971 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2972 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
2973
2974 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2975 }
2976
2977 if (vptr->mii_status & VELOCITY_SPEED_1000)
2978 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
2979
2980 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2981
2982 {
2983 u8 GCR;
2984 GCR = readb(&regs->CHIPGCR);
2985 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2986 writeb(GCR, &regs->CHIPGCR);
2987 }
2988
2989 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2990 /* Turn on SWPTAG just before entering power mode */
2991 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2992 /* Go to bed ..... */
2993 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2994
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 return 0;
2996}
2997
2998/**
Dave Jones2cf71d22009-07-23 18:11:12 -07002999 * velocity_save_context - save registers
3000 * @vptr: velocity
3001 * @context: buffer for stored context
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 *
Dave Jones2cf71d22009-07-23 18:11:12 -07003003 * Retrieve the current configuration from the velocity hardware
3004 * and stash it in the context structure, for use by the context
3005 * restore functions. This allows us to save things we need across
3006 * power down states
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 */
Dave Jones2cf71d22009-07-23 18:11:12 -07003008static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009{
Dave Jones2cf71d22009-07-23 18:11:12 -07003010 struct mac_regs __iomem *regs = vptr->mac_regs;
3011 u16 i;
3012 u8 __iomem *ptr = (u8 __iomem *)regs;
3013
3014 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3015 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3016
3017 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3018 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3019
3020 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3021 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3022
3023}
3024
3025static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3026{
3027 struct net_device *dev = pci_get_drvdata(pdev);
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003028 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 unsigned long flags;
Francois Romieu580a6902008-07-11 00:03:44 +02003030
Dave Jones2cf71d22009-07-23 18:11:12 -07003031 if (!netif_running(vptr->dev))
3032 return 0;
Francois Romieu580a6902008-07-11 00:03:44 +02003033
Dave Jones2cf71d22009-07-23 18:11:12 -07003034 netif_device_detach(vptr->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035
3036 spin_lock_irqsave(&vptr->lock, flags);
Dave Jones2cf71d22009-07-23 18:11:12 -07003037 pci_save_state(pdev);
3038#ifdef ETHTOOL_GWOL
3039 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3040 velocity_get_ip(vptr);
3041 velocity_save_context(vptr, &vptr->context);
3042 velocity_shutdown(vptr);
3043 velocity_set_wol(vptr);
3044 pci_enable_wake(pdev, PCI_D3hot, 1);
3045 pci_set_power_state(pdev, PCI_D3hot);
3046 } else {
3047 velocity_save_context(vptr, &vptr->context);
3048 velocity_shutdown(vptr);
3049 pci_disable_device(pdev);
3050 pci_set_power_state(pdev, pci_choose_state(pdev, state));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 }
Dave Jones2cf71d22009-07-23 18:11:12 -07003052#else
3053 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3054#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 spin_unlock_irqrestore(&vptr->lock, flags);
Dave Jones2cf71d22009-07-23 18:11:12 -07003056 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057}
3058
3059/**
Dave Jones2cf71d22009-07-23 18:11:12 -07003060 * velocity_restore_context - restore registers
3061 * @vptr: velocity
3062 * @context: buffer for stored context
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 *
Dave Jones2cf71d22009-07-23 18:11:12 -07003064 * Reload the register configuration from the velocity context
3065 * created by velocity_save_context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066 */
Dave Jones2cf71d22009-07-23 18:11:12 -07003067static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068{
Dave Jones2cf71d22009-07-23 18:11:12 -07003069 struct mac_regs __iomem *regs = vptr->mac_regs;
3070 int i;
3071 u8 __iomem *ptr = (u8 __iomem *)regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072
Dave Jones2cf71d22009-07-23 18:11:12 -07003073 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3074 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075
Dave Jones2cf71d22009-07-23 18:11:12 -07003076 /* Just skip cr0 */
3077 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3078 /* Clear */
3079 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3080 /* Set */
3081 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 }
3083
Dave Jones2cf71d22009-07-23 18:11:12 -07003084 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3085 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3086
3087 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3088 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3089
3090 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3091 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3092}
3093
3094static int velocity_resume(struct pci_dev *pdev)
3095{
3096 struct net_device *dev = pci_get_drvdata(pdev);
3097 struct velocity_info *vptr = netdev_priv(dev);
3098 unsigned long flags;
3099 int i;
3100
3101 if (!netif_running(vptr->dev))
3102 return 0;
3103
3104 pci_set_power_state(pdev, PCI_D0);
3105 pci_enable_wake(pdev, 0, 0);
3106 pci_restore_state(pdev);
3107
3108 mac_wol_reset(vptr->mac_regs);
3109
3110 spin_lock_irqsave(&vptr->lock, flags);
3111 velocity_restore_context(vptr, &vptr->context);
3112 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 mac_disable_int(vptr->mac_regs);
3114
Dave Jones2cf71d22009-07-23 18:11:12 -07003115 velocity_tx_srv(vptr, 0);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003116
Dave Jones2cf71d22009-07-23 18:11:12 -07003117 for (i = 0; i < vptr->tx.numq; i++) {
3118 if (vptr->tx.used[i])
3119 mac_tx_queue_wake(vptr->mac_regs, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 }
Dave Jones2cf71d22009-07-23 18:11:12 -07003121
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122 mac_enable_int(vptr->mac_regs);
Dave Jones2cf71d22009-07-23 18:11:12 -07003123 spin_unlock_irqrestore(&vptr->lock, flags);
3124 netif_device_attach(vptr->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125
Dave Jones2cf71d22009-07-23 18:11:12 -07003126 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127}
Dave Jones2cf71d22009-07-23 18:11:12 -07003128#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129
3130/*
3131 * Definition for our device driver. The PCI layer interface
3132 * uses this to handle all our card discover and plugging
3133 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134static struct pci_driver velocity_driver = {
3135 .name = VELOCITY_NAME,
3136 .id_table = velocity_id_table,
3137 .probe = velocity_found1,
3138 .remove = __devexit_p(velocity_remove1),
3139#ifdef CONFIG_PM
3140 .suspend = velocity_suspend,
3141 .resume = velocity_resume,
3142#endif
3143};
3144
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145
3146/**
3147 * velocity_ethtool_up - pre hook for ethtool
3148 * @dev: network device
3149 *
3150 * Called before an ethtool operation. We need to make sure the
3151 * chip is out of D3 state before we poke at it.
3152 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153static int velocity_ethtool_up(struct net_device *dev)
3154{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003155 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 if (!netif_running(dev))
3157 pci_set_power_state(vptr->pdev, PCI_D0);
3158 return 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003159}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160
3161/**
3162 * velocity_ethtool_down - post hook for ethtool
3163 * @dev: network device
3164 *
3165 * Called after an ethtool operation. Restore the chip back to D3
3166 * state if it isn't running.
3167 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168static void velocity_ethtool_down(struct net_device *dev)
3169{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003170 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 if (!netif_running(dev))
3172 pci_set_power_state(vptr->pdev, PCI_D3hot);
3173}
3174
3175static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3176{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003177 struct velocity_info *vptr = netdev_priv(dev);
Dave Jonesc4067402009-07-20 17:35:21 +00003178 struct mac_regs __iomem *regs = vptr->mac_regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 u32 status;
3180 status = check_connection_type(vptr->mac_regs);
3181
Jay Cliburn59b693f2006-07-20 23:23:57 +02003182 cmd->supported = SUPPORTED_TP |
3183 SUPPORTED_Autoneg |
3184 SUPPORTED_10baseT_Half |
3185 SUPPORTED_10baseT_Full |
3186 SUPPORTED_100baseT_Half |
3187 SUPPORTED_100baseT_Full |
3188 SUPPORTED_1000baseT_Half |
3189 SUPPORTED_1000baseT_Full;
3190 if (status & VELOCITY_SPEED_1000)
3191 cmd->speed = SPEED_1000;
3192 else if (status & VELOCITY_SPEED_100)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 cmd->speed = SPEED_100;
3194 else
3195 cmd->speed = SPEED_10;
3196 cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3197 cmd->port = PORT_TP;
3198 cmd->transceiver = XCVR_INTERNAL;
3199 cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3200
3201 if (status & VELOCITY_DUPLEX_FULL)
3202 cmd->duplex = DUPLEX_FULL;
3203 else
3204 cmd->duplex = DUPLEX_HALF;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003205
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 return 0;
3207}
3208
3209static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3210{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003211 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 u32 curr_status;
3213 u32 new_status = 0;
3214 int ret = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003215
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 curr_status = check_connection_type(vptr->mac_regs);
3217 curr_status &= (~VELOCITY_LINK_FAIL);
3218
3219 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3220 new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3221 new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3222 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3223
3224 if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
3225 ret = -EINVAL;
3226 else
3227 velocity_set_media_mode(vptr, new_status);
3228
3229 return ret;
3230}
3231
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3233{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003234 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 strcpy(info->driver, VELOCITY_NAME);
3236 strcpy(info->version, VELOCITY_VERSION);
3237 strcpy(info->bus_info, pci_name(vptr->pdev));
3238}
3239
3240static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3241{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003242 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3244 wol->wolopts |= WAKE_MAGIC;
3245 /*
3246 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3247 wol.wolopts|=WAKE_PHY;
3248 */
3249 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3250 wol->wolopts |= WAKE_UCAST;
3251 if (vptr->wol_opts & VELOCITY_WOL_ARP)
3252 wol->wolopts |= WAKE_ARP;
3253 memcpy(&wol->sopass, vptr->wol_passwd, 6);
3254}
3255
3256static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3257{
Jeff Garzik8ab6f3f2006-06-27 08:56:23 -04003258 struct velocity_info *vptr = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259
3260 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3261 return -EFAULT;
3262 vptr->wol_opts = VELOCITY_WOL_MAGIC;
3263
3264 /*
3265 if (wol.wolopts & WAKE_PHY) {
3266 vptr->wol_opts|=VELOCITY_WOL_PHY;
3267 vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3268 }
3269 */
3270
3271 if (wol->wolopts & WAKE_MAGIC) {
3272 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3273 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3274 }
3275 if (wol->wolopts & WAKE_UCAST) {
3276 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3277 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3278 }
3279 if (wol->wolopts & WAKE_ARP) {
3280 vptr->wol_opts |= VELOCITY_WOL_ARP;
3281 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3282 }
3283 memcpy(vptr->wol_passwd, wol->sopass, 6);
3284 return 0;
3285}
3286
3287static u32 velocity_get_msglevel(struct net_device *dev)
3288{
3289 return msglevel;
3290}
3291
3292static void velocity_set_msglevel(struct net_device *dev, u32 value)
3293{
3294 msglevel = value;
3295}
3296
Simon Kagstrom6dfc4b92009-11-25 22:10:12 +00003297static int get_pending_timer_val(int val)
3298{
3299 int mult_bits = val >> 6;
3300 int mult = 1;
3301
3302 switch (mult_bits)
3303 {
3304 case 1:
3305 mult = 4; break;
3306 case 2:
3307 mult = 16; break;
3308 case 3:
3309 mult = 64; break;
3310 case 0:
3311 default:
3312 break;
3313 }
3314
3315 return (val & 0x3f) * mult;
3316}
3317
3318static void set_pending_timer_val(int *val, u32 us)
3319{
3320 u8 mult = 0;
3321 u8 shift = 0;
3322
3323 if (us >= 0x3f) {
3324 mult = 1; /* mult with 4 */
3325 shift = 2;
3326 }
3327 if (us >= 0x3f * 4) {
3328 mult = 2; /* mult with 16 */
3329 shift = 4;
3330 }
3331 if (us >= 0x3f * 16) {
3332 mult = 3; /* mult with 64 */
3333 shift = 6;
3334 }
3335
3336 *val = (mult << 6) | ((us >> shift) & 0x3f);
3337}
3338
3339
3340static int velocity_get_coalesce(struct net_device *dev,
3341 struct ethtool_coalesce *ecmd)
3342{
3343 struct velocity_info *vptr = netdev_priv(dev);
3344
3345 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3346 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3347
3348 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3349 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3350
3351 return 0;
3352}
3353
3354static int velocity_set_coalesce(struct net_device *dev,
3355 struct ethtool_coalesce *ecmd)
3356{
3357 struct velocity_info *vptr = netdev_priv(dev);
3358 int max_us = 0x3f * 64;
3359
3360 /* 6 bits of */
3361 if (ecmd->tx_coalesce_usecs > max_us)
3362 return -EINVAL;
3363 if (ecmd->rx_coalesce_usecs > max_us)
3364 return -EINVAL;
3365
3366 if (ecmd->tx_max_coalesced_frames > 0xff)
3367 return -EINVAL;
3368 if (ecmd->rx_max_coalesced_frames > 0xff)
3369 return -EINVAL;
3370
3371 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3372 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3373
3374 set_pending_timer_val(&vptr->options.rxqueue_timer,
3375 ecmd->rx_coalesce_usecs);
3376 set_pending_timer_val(&vptr->options.txqueue_timer,
3377 ecmd->tx_coalesce_usecs);
3378
3379 /* Setup the interrupt suppression and queue timers */
3380 mac_disable_int(vptr->mac_regs);
3381 setup_adaptive_interrupts(vptr);
3382 setup_queue_timers(vptr);
3383
3384 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3385 mac_clear_isr(vptr->mac_regs);
3386 mac_enable_int(vptr->mac_regs);
3387
3388 return 0;
3389}
3390
Jeff Garzik7282d492006-09-13 14:30:00 -04003391static const struct ethtool_ops velocity_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 .get_settings = velocity_get_settings,
3393 .set_settings = velocity_set_settings,
3394 .get_drvinfo = velocity_get_drvinfo,
3395 .get_wol = velocity_ethtool_get_wol,
3396 .set_wol = velocity_ethtool_set_wol,
3397 .get_msglevel = velocity_get_msglevel,
3398 .set_msglevel = velocity_set_msglevel,
Simon Kagstromc79992f2009-11-25 22:10:43 +00003399 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 .get_link = velocity_get_link,
Simon Kagstrom6dfc4b92009-11-25 22:10:12 +00003401 .get_coalesce = velocity_get_coalesce,
3402 .set_coalesce = velocity_set_coalesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 .begin = velocity_ethtool_up,
3404 .complete = velocity_ethtool_down
3405};
3406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407#ifdef CONFIG_PM
Randy Dunlapce9f7fe2006-12-18 21:21:10 -08003408#ifdef CONFIG_INET
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3410{
3411 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
Denis V. Luneva3374992008-02-28 20:44:27 -08003412 struct net_device *dev = ifa->ifa_dev->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413
Ben Hutchings516b4df2009-10-28 04:01:46 -07003414 if (dev_net(dev) == &init_net &&
3415 dev->netdev_ops == &velocity_netdev_ops)
3416 velocity_get_ip(netdev_priv(dev));
Denis V. Luneva3374992008-02-28 20:44:27 -08003417
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 return NOTIFY_DONE;
3419}
Dave Jones2cf71d22009-07-23 18:11:12 -07003420#endif /* CONFIG_INET */
3421#endif /* CONFIG_PM */
Randy Dunlapce9f7fe2006-12-18 21:21:10 -08003422
Dave Jones2cf71d22009-07-23 18:11:12 -07003423#if defined(CONFIG_PM) && defined(CONFIG_INET)
3424static struct notifier_block velocity_inetaddr_notifier = {
3425 .notifier_call = velocity_netdev_event,
3426};
3427
3428static void velocity_register_notifier(void)
3429{
3430 register_inetaddr_notifier(&velocity_inetaddr_notifier);
3431}
3432
3433static void velocity_unregister_notifier(void)
3434{
3435 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3436}
3437
3438#else
3439
3440#define velocity_register_notifier() do {} while (0)
3441#define velocity_unregister_notifier() do {} while (0)
3442
3443#endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3444
3445/**
3446 * velocity_init_module - load time function
3447 *
3448 * Called when the velocity module is loaded. The PCI driver
3449 * is registered with the PCI layer, and in turn will call
3450 * the probe functions for each velocity adapter installed
3451 * in the system.
3452 */
3453static int __init velocity_init_module(void)
3454{
3455 int ret;
3456
3457 velocity_register_notifier();
3458 ret = pci_register_driver(&velocity_driver);
3459 if (ret < 0)
3460 velocity_unregister_notifier();
3461 return ret;
3462}
3463
3464/**
3465 * velocity_cleanup - module unload
3466 *
3467 * When the velocity hardware is unloaded this function is called.
3468 * It will clean up the notifiers and the unregister the PCI
3469 * driver interface for this hardware. This in turn cleans up
3470 * all discovered interfaces before returning from the function
3471 */
3472static void __exit velocity_cleanup_module(void)
3473{
3474 velocity_unregister_notifier();
3475 pci_unregister_driver(&velocity_driver);
3476}
3477
3478module_init(velocity_init_module);
3479module_exit(velocity_cleanup_module);