blob: 145b67cc1d2ec7a7402d2f268aae5e9c28f9f4b1 [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Ray1d68e932007-01-30 19:44:35 -08002 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
Divy Le Ray2e283962007-03-18 13:10:06 -070045#include <linux/firmware.h>
Divy Le Ray4d22de32007-01-18 22:04:14 -050046#include <asm/uaccess.h>
47
48#include "common.h"
49#include "cxgb3_ioctl.h"
50#include "regs.h"
51#include "cxgb3_offload.h"
52#include "version.h"
53
54#include "cxgb3_ctl_defs.h"
55#include "t3_cpl.h"
56#include "firmware_exports.h"
57
58enum {
59 MAX_TXQ_ENTRIES = 16384,
60 MAX_CTRL_TXQ_ENTRIES = 1024,
61 MAX_RSPQ_ENTRIES = 16384,
62 MAX_RX_BUFFERS = 16384,
63 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_TXQ_ENTRIES = 4,
65 MIN_CTRL_TXQ_ENTRIES = 4,
66 MIN_RSPQ_ENTRIES = 32,
67 MIN_FL_ENTRIES = 32
68};
69
70#define PORT_MASK ((1 << MAX_NPORTS) - 1)
71
72#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75
76#define EEPROM_MAGIC 0x38E2F10C
77
Divy Le Ray4d22de32007-01-18 22:04:14 -050078#define CH_DEVICE(devid, ssid, idx) \
79 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80
81static const struct pci_device_id cxgb3_pci_tbl[] = {
82 CH_DEVICE(0x20, 1, 0), /* PE9000 */
83 CH_DEVICE(0x21, 1, 1), /* T302E */
84 CH_DEVICE(0x22, 1, 2), /* T310E */
85 CH_DEVICE(0x23, 1, 3), /* T320X */
86 CH_DEVICE(0x24, 1, 1), /* T302X */
87 CH_DEVICE(0x25, 1, 3), /* T320E */
88 CH_DEVICE(0x26, 1, 2), /* T310X */
89 CH_DEVICE(0x30, 1, 2), /* T3B10 */
90 CH_DEVICE(0x31, 1, 3), /* T3B20 */
91 CH_DEVICE(0x32, 1, 1), /* T3B02 */
92 {0,}
93};
94
95MODULE_DESCRIPTION(DRV_DESC);
96MODULE_AUTHOR("Chelsio Communications");
Divy Le Ray1d68e932007-01-30 19:44:35 -080097MODULE_LICENSE("Dual BSD/GPL");
Divy Le Ray4d22de32007-01-18 22:04:14 -050098MODULE_VERSION(DRV_VERSION);
99MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100
101static int dflt_msg_enable = DFLT_MSG_ENABLE;
102
103module_param(dflt_msg_enable, int, 0644);
104MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
105
106/*
107 * The driver uses the best interrupt scheme available on a platform in the
108 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
109 * of these schemes the driver may consider as follows:
110 *
111 * msi = 2: choose from among all three options
112 * msi = 1: only consider MSI and pin interrupts
113 * msi = 0: force pin interrupts
114 */
115static int msi = 2;
116
117module_param(msi, int, 0644);
118MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
119
120/*
121 * The driver enables offload as a default.
122 * To disable it, use ofld_disable = 1.
123 */
124
125static int ofld_disable = 0;
126
127module_param(ofld_disable, int, 0644);
128MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
129
130/*
131 * We have work elements that we need to cancel when an interface is taken
132 * down. Normally the work elements would be executed by keventd but that
133 * can deadlock because of linkwatch. If our close method takes the rtnl
134 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136 * for our work to complete. Get our own work queue to solve this.
137 */
138static struct workqueue_struct *cxgb3_wq;
139
140/**
141 * link_report - show link status and link speed/duplex
142 * @p: the port whose settings are to be reported
143 *
144 * Shows the link status, speed, and duplex of a port.
145 */
146static void link_report(struct net_device *dev)
147{
148 if (!netif_carrier_ok(dev))
149 printk(KERN_INFO "%s: link down\n", dev->name);
150 else {
151 const char *s = "10Mbps";
152 const struct port_info *p = netdev_priv(dev);
153
154 switch (p->link_config.speed) {
155 case SPEED_10000:
156 s = "10Gbps";
157 break;
158 case SPEED_1000:
159 s = "1000Mbps";
160 break;
161 case SPEED_100:
162 s = "100Mbps";
163 break;
164 }
165
166 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
168 }
169}
170
171/**
172 * t3_os_link_changed - handle link status changes
173 * @adapter: the adapter associated with the link change
174 * @port_id: the port index whose limk status has changed
175 * @link_stat: the new status of the link
176 * @speed: the new speed setting
177 * @duplex: the new duplex setting
178 * @pause: the new flow-control setting
179 *
180 * This is the OS-dependent handler for link status changes. The OS
181 * neutral handler takes care of most of the processing for these events,
182 * then calls this handler for any OS-specific processing.
183 */
184void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185 int speed, int duplex, int pause)
186{
187 struct net_device *dev = adapter->port[port_id];
188
189 /* Skip changes from disabled ports. */
190 if (!netif_running(dev))
191 return;
192
193 if (link_stat != netif_carrier_ok(dev)) {
194 if (link_stat)
195 netif_carrier_on(dev);
196 else
197 netif_carrier_off(dev);
198 link_report(dev);
199 }
200}
201
202static void cxgb_set_rxmode(struct net_device *dev)
203{
204 struct t3_rx_mode rm;
205 struct port_info *pi = netdev_priv(dev);
206
207 init_rx_mode(&rm, dev, dev->mc_list);
208 t3_mac_set_rx_mode(&pi->mac, &rm);
209}
210
211/**
212 * link_start - enable a port
213 * @dev: the device to enable
214 *
215 * Performs the MAC and PHY actions needed to enable a port.
216 */
217static void link_start(struct net_device *dev)
218{
219 struct t3_rx_mode rm;
220 struct port_info *pi = netdev_priv(dev);
221 struct cmac *mac = &pi->mac;
222
223 init_rx_mode(&rm, dev, dev->mc_list);
224 t3_mac_reset(mac);
225 t3_mac_set_mtu(mac, dev->mtu);
226 t3_mac_set_address(mac, 0, dev->dev_addr);
227 t3_mac_set_rx_mode(mac, &rm);
228 t3_link_start(&pi->phy, mac, &pi->link_config);
229 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
230}
231
232static inline void cxgb_disable_msi(struct adapter *adapter)
233{
234 if (adapter->flags & USING_MSIX) {
235 pci_disable_msix(adapter->pdev);
236 adapter->flags &= ~USING_MSIX;
237 } else if (adapter->flags & USING_MSI) {
238 pci_disable_msi(adapter->pdev);
239 adapter->flags &= ~USING_MSI;
240 }
241}
242
243/*
244 * Interrupt handler for asynchronous events used with MSI-X.
245 */
246static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
247{
248 t3_slow_intr_handler(cookie);
249 return IRQ_HANDLED;
250}
251
252/*
253 * Name the MSI-X interrupts.
254 */
255static void name_msix_vecs(struct adapter *adap)
256{
257 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
258
259 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
260 adap->msix_info[0].desc[n] = 0;
261
262 for_each_port(adap, j) {
263 struct net_device *d = adap->port[j];
264 const struct port_info *pi = netdev_priv(d);
265
266 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
267 snprintf(adap->msix_info[msi_idx].desc, n,
268 "%s (queue %d)", d->name, i);
269 adap->msix_info[msi_idx].desc[n] = 0;
270 }
271 }
272}
273
274static int request_msix_data_irqs(struct adapter *adap)
275{
276 int i, j, err, qidx = 0;
277
278 for_each_port(adap, i) {
279 int nqsets = adap2pinfo(adap, i)->nqsets;
280
281 for (j = 0; j < nqsets; ++j) {
282 err = request_irq(adap->msix_info[qidx + 1].vec,
283 t3_intr_handler(adap,
284 adap->sge.qs[qidx].
285 rspq.polling), 0,
286 adap->msix_info[qidx + 1].desc,
287 &adap->sge.qs[qidx]);
288 if (err) {
289 while (--qidx >= 0)
290 free_irq(adap->msix_info[qidx + 1].vec,
291 &adap->sge.qs[qidx]);
292 return err;
293 }
294 qidx++;
295 }
296 }
297 return 0;
298}
299
300/**
301 * setup_rss - configure RSS
302 * @adap: the adapter
303 *
304 * Sets up RSS to distribute packets to multiple receive queues. We
305 * configure the RSS CPU lookup table to distribute to the number of HW
306 * receive queues, and the response queue lookup table to narrow that
307 * down to the response queues actually configured for each port.
308 * We always configure the RSS mapping for two ports since the mapping
309 * table has plenty of entries.
310 */
311static void setup_rss(struct adapter *adap)
312{
313 int i;
314 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
315 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
316 u8 cpus[SGE_QSETS + 1];
317 u16 rspq_map[RSS_TABLE_SIZE];
318
319 for (i = 0; i < SGE_QSETS; ++i)
320 cpus[i] = i;
321 cpus[SGE_QSETS] = 0xff; /* terminator */
322
323 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
324 rspq_map[i] = i % nq0;
325 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
326 }
327
328 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
329 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
330 V_RRCPLCPUSIZE(6), cpus, rspq_map);
331}
332
333/*
334 * If we have multiple receive queues per port serviced by NAPI we need one
335 * netdevice per queue as NAPI operates on netdevices. We already have one
336 * netdevice, namely the one associated with the interface, so we use dummy
337 * ones for any additional queues. Note that these netdevices exist purely
338 * so that NAPI has something to work with, they do not represent network
339 * ports and are not registered.
340 */
341static int init_dummy_netdevs(struct adapter *adap)
342{
343 int i, j, dummy_idx = 0;
344 struct net_device *nd;
345
346 for_each_port(adap, i) {
347 struct net_device *dev = adap->port[i];
348 const struct port_info *pi = netdev_priv(dev);
349
350 for (j = 0; j < pi->nqsets - 1; j++) {
351 if (!adap->dummy_netdev[dummy_idx]) {
352 nd = alloc_netdev(0, "", ether_setup);
353 if (!nd)
354 goto free_all;
355
356 nd->priv = adap;
357 nd->weight = 64;
358 set_bit(__LINK_STATE_START, &nd->state);
359 adap->dummy_netdev[dummy_idx] = nd;
360 }
361 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
362 dummy_idx++;
363 }
364 }
365 return 0;
366
367free_all:
368 while (--dummy_idx >= 0) {
369 free_netdev(adap->dummy_netdev[dummy_idx]);
370 adap->dummy_netdev[dummy_idx] = NULL;
371 }
372 return -ENOMEM;
373}
374
375/*
376 * Wait until all NAPI handlers are descheduled. This includes the handlers of
377 * both netdevices representing interfaces and the dummy ones for the extra
378 * queues.
379 */
380static void quiesce_rx(struct adapter *adap)
381{
382 int i;
383 struct net_device *dev;
384
385 for_each_port(adap, i) {
386 dev = adap->port[i];
387 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
388 msleep(1);
389 }
390
391 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
392 dev = adap->dummy_netdev[i];
393 if (dev)
394 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
395 msleep(1);
396 }
397}
398
399/**
400 * setup_sge_qsets - configure SGE Tx/Rx/response queues
401 * @adap: the adapter
402 *
403 * Determines how many sets of SGE queues to use and initializes them.
404 * We support multiple queue sets per port if we have MSI-X, otherwise
405 * just one queue set per port.
406 */
407static int setup_sge_qsets(struct adapter *adap)
408{
409 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700410 unsigned int ntxq = SGE_TXQ_PER_SET;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500411
412 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
413 irq_idx = -1;
414
415 for_each_port(adap, i) {
416 struct net_device *dev = adap->port[i];
417 const struct port_info *pi = netdev_priv(dev);
418
419 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
420 err = t3_sge_alloc_qset(adap, qset_idx, 1,
421 (adap->flags & USING_MSIX) ? qset_idx + 1 :
422 irq_idx,
423 &adap->params.sge.qset[qset_idx], ntxq,
424 j == 0 ? dev :
425 adap-> dummy_netdev[dummy_dev_idx++]);
426 if (err) {
427 t3_free_sge_resources(adap);
428 return err;
429 }
430 }
431 }
432
433 return 0;
434}
435
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800436static ssize_t attr_show(struct device *d, struct device_attribute *attr,
437 char *buf,
Divy Le Ray896392e2007-02-24 16:43:50 -0800438 ssize_t(*format) (struct net_device *, char *))
Divy Le Ray4d22de32007-01-18 22:04:14 -0500439{
440 ssize_t len;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500441
442 /* Synchronize with ioctls that may shut down the device */
443 rtnl_lock();
Divy Le Ray896392e2007-02-24 16:43:50 -0800444 len = (*format) (to_net_dev(d), buf);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500445 rtnl_unlock();
446 return len;
447}
448
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800449static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len,
Divy Le Ray896392e2007-02-24 16:43:50 -0800451 ssize_t(*set) (struct net_device *, unsigned int),
Divy Le Ray4d22de32007-01-18 22:04:14 -0500452 unsigned int min_val, unsigned int max_val)
453{
454 char *endp;
455 ssize_t ret;
456 unsigned int val;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500457
458 if (!capable(CAP_NET_ADMIN))
459 return -EPERM;
460
461 val = simple_strtoul(buf, &endp, 0);
462 if (endp == buf || val < min_val || val > max_val)
463 return -EINVAL;
464
465 rtnl_lock();
Divy Le Ray896392e2007-02-24 16:43:50 -0800466 ret = (*set) (to_net_dev(d), val);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500467 if (!ret)
468 ret = len;
469 rtnl_unlock();
470 return ret;
471}
472
473#define CXGB3_SHOW(name, val_expr) \
Divy Le Ray896392e2007-02-24 16:43:50 -0800474static ssize_t format_##name(struct net_device *dev, char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500475{ \
Divy Le Ray896392e2007-02-24 16:43:50 -0800476 struct adapter *adap = dev->priv; \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500477 return sprintf(buf, "%u\n", val_expr); \
478} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800479static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
480 char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500481{ \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800482 return attr_show(d, attr, buf, format_##name); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500483}
484
Divy Le Ray896392e2007-02-24 16:43:50 -0800485static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500486{
Divy Le Ray896392e2007-02-24 16:43:50 -0800487 struct adapter *adap = dev->priv;
Divy Le Ray9f238482007-03-31 00:23:13 -0700488 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
Divy Le Ray896392e2007-02-24 16:43:50 -0800489
Divy Le Ray4d22de32007-01-18 22:04:14 -0500490 if (adap->flags & FULL_INIT_DONE)
491 return -EBUSY;
492 if (val && adap->params.rev == 0)
493 return -EINVAL;
Divy Le Ray9f238482007-03-31 00:23:13 -0700494 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
495 min_tids)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500496 return -EINVAL;
497 adap->params.mc5.nfilters = val;
498 return 0;
499}
500
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800501static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
502 const char *buf, size_t len)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500503{
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800504 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500505}
506
Divy Le Ray896392e2007-02-24 16:43:50 -0800507static ssize_t set_nservers(struct net_device *dev, unsigned int val)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500508{
Divy Le Ray896392e2007-02-24 16:43:50 -0800509 struct adapter *adap = dev->priv;
510
Divy Le Ray4d22de32007-01-18 22:04:14 -0500511 if (adap->flags & FULL_INIT_DONE)
512 return -EBUSY;
Divy Le Ray9f238482007-03-31 00:23:13 -0700513 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
514 MC5_MIN_TIDS)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500515 return -EINVAL;
516 adap->params.mc5.nservers = val;
517 return 0;
518}
519
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800520static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
521 const char *buf, size_t len)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500522{
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800523 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500524}
525
526#define CXGB3_ATTR_R(name, val_expr) \
527CXGB3_SHOW(name, val_expr) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800528static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500529
530#define CXGB3_ATTR_RW(name, val_expr, store_method) \
531CXGB3_SHOW(name, val_expr) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800532static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500533
534CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
535CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
536CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
537
538static struct attribute *cxgb3_attrs[] = {
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800539 &dev_attr_cam_size.attr,
540 &dev_attr_nfilters.attr,
541 &dev_attr_nservers.attr,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500542 NULL
543};
544
545static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
546
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800547static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
548 char *buf, int sched)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500549{
550 ssize_t len;
551 unsigned int v, addr, bpt, cpt;
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800552 struct adapter *adap = to_net_dev(d)->priv;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500553
554 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
555 rtnl_lock();
556 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
557 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
558 if (sched & 1)
559 v >>= 16;
560 bpt = (v >> 8) & 0xff;
561 cpt = v & 0xff;
562 if (!cpt)
563 len = sprintf(buf, "disabled\n");
564 else {
565 v = (adap->params.vpd.cclk * 1000) / cpt;
566 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
567 }
568 rtnl_unlock();
569 return len;
570}
571
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800572static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
573 const char *buf, size_t len, int sched)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500574{
575 char *endp;
576 ssize_t ret;
577 unsigned int val;
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800578 struct adapter *adap = to_net_dev(d)->priv;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500579
580 if (!capable(CAP_NET_ADMIN))
581 return -EPERM;
582
583 val = simple_strtoul(buf, &endp, 0);
584 if (endp == buf || val > 10000000)
585 return -EINVAL;
586
587 rtnl_lock();
588 ret = t3_config_sched(adap, val, sched);
589 if (!ret)
590 ret = len;
591 rtnl_unlock();
592 return ret;
593}
594
595#define TM_ATTR(name, sched) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800596static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
597 char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500598{ \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800599 return tm_attr_show(d, attr, buf, sched); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500600} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800601static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
602 const char *buf, size_t len) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500603{ \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800604 return tm_attr_store(d, attr, buf, len, sched); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500605} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800606static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500607
608TM_ATTR(sched0, 0);
609TM_ATTR(sched1, 1);
610TM_ATTR(sched2, 2);
611TM_ATTR(sched3, 3);
612TM_ATTR(sched4, 4);
613TM_ATTR(sched5, 5);
614TM_ATTR(sched6, 6);
615TM_ATTR(sched7, 7);
616
617static struct attribute *offload_attrs[] = {
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800618 &dev_attr_sched0.attr,
619 &dev_attr_sched1.attr,
620 &dev_attr_sched2.attr,
621 &dev_attr_sched3.attr,
622 &dev_attr_sched4.attr,
623 &dev_attr_sched5.attr,
624 &dev_attr_sched6.attr,
625 &dev_attr_sched7.attr,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500626 NULL
627};
628
629static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
630
631/*
632 * Sends an sk_buff to an offload queue driver
633 * after dealing with any active network taps.
634 */
635static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
636{
637 int ret;
638
639 local_bh_disable();
640 ret = t3_offload_tx(tdev, skb);
641 local_bh_enable();
642 return ret;
643}
644
645static int write_smt_entry(struct adapter *adapter, int idx)
646{
647 struct cpl_smt_write_req *req;
648 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
649
650 if (!skb)
651 return -ENOMEM;
652
653 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
654 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
655 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
656 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
657 req->iff = idx;
658 memset(req->src_mac1, 0, sizeof(req->src_mac1));
659 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
660 skb->priority = 1;
661 offload_tx(&adapter->tdev, skb);
662 return 0;
663}
664
665static int init_smt(struct adapter *adapter)
666{
667 int i;
668
669 for_each_port(adapter, i)
670 write_smt_entry(adapter, i);
671 return 0;
672}
673
674static void init_port_mtus(struct adapter *adapter)
675{
676 unsigned int mtus = adapter->port[0]->mtu;
677
678 if (adapter->port[1])
679 mtus |= adapter->port[1]->mtu << 16;
680 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
681}
682
Divy Le Ray14ab9892007-01-30 19:43:50 -0800683static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
684 int hi, int port)
685{
686 struct sk_buff *skb;
687 struct mngt_pktsched_wr *req;
688
689 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
690 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
691 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
692 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
693 req->sched = sched;
694 req->idx = qidx;
695 req->min = lo;
696 req->max = hi;
697 req->binding = port;
698 t3_mgmt_tx(adap, skb);
699}
700
701static void bind_qsets(struct adapter *adap)
702{
703 int i, j;
704
705 for_each_port(adap, i) {
706 const struct port_info *pi = adap2pinfo(adap, i);
707
708 for (j = 0; j < pi->nqsets; ++j)
709 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
710 -1, i);
711 }
712}
713
Divy Le Ray2e283962007-03-18 13:10:06 -0700714#define FW_FNAME "t3fw-%d.%d.bin"
715
716static int upgrade_fw(struct adapter *adap)
717{
718 int ret;
719 char buf[64];
720 const struct firmware *fw;
721 struct device *dev = &adap->pdev->dev;
722
723 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
724 FW_VERSION_MINOR);
725 ret = request_firmware(&fw, buf, dev);
726 if (ret < 0) {
727 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
728 buf);
729 return ret;
730 }
731 ret = t3_load_fw(adap, fw->data, fw->size);
732 release_firmware(fw);
733 return ret;
734}
735
Divy Le Ray4d22de32007-01-18 22:04:14 -0500736/**
737 * cxgb_up - enable the adapter
738 * @adapter: adapter being enabled
739 *
740 * Called when the first port is enabled, this function performs the
741 * actions necessary to make an adapter operational, such as completing
742 * the initialization of HW modules, and enabling interrupts.
743 *
744 * Must be called with the rtnl lock held.
745 */
746static int cxgb_up(struct adapter *adap)
747{
748 int err = 0;
749
750 if (!(adap->flags & FULL_INIT_DONE)) {
751 err = t3_check_fw_version(adap);
Divy Le Ray2e283962007-03-18 13:10:06 -0700752 if (err == -EINVAL)
753 err = upgrade_fw(adap);
Divy Le Ray4aac3892007-01-30 19:43:45 -0800754 if (err)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500755 goto out;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500756
757 err = init_dummy_netdevs(adap);
758 if (err)
759 goto out;
760
761 err = t3_init_hw(adap, 0);
762 if (err)
763 goto out;
764
765 err = setup_sge_qsets(adap);
766 if (err)
767 goto out;
768
769 setup_rss(adap);
770 adap->flags |= FULL_INIT_DONE;
771 }
772
773 t3_intr_clear(adap);
774
775 if (adap->flags & USING_MSIX) {
776 name_msix_vecs(adap);
777 err = request_irq(adap->msix_info[0].vec,
778 t3_async_intr_handler, 0,
779 adap->msix_info[0].desc, adap);
780 if (err)
781 goto irq_err;
782
783 if (request_msix_data_irqs(adap)) {
784 free_irq(adap->msix_info[0].vec, adap);
785 goto irq_err;
786 }
787 } else if ((err = request_irq(adap->pdev->irq,
788 t3_intr_handler(adap,
789 adap->sge.qs[0].rspq.
790 polling),
Thomas Gleixner2db63462007-02-14 00:33:20 -0800791 (adap->flags & USING_MSI) ?
792 0 : IRQF_SHARED,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500793 adap->name, adap)))
794 goto irq_err;
795
796 t3_sge_start(adap);
797 t3_intr_enable(adap);
Divy Le Ray14ab9892007-01-30 19:43:50 -0800798
799 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
800 bind_qsets(adap);
801 adap->flags |= QUEUES_BOUND;
802
Divy Le Ray4d22de32007-01-18 22:04:14 -0500803out:
804 return err;
805irq_err:
806 CH_ERR(adap, "request_irq failed, err %d\n", err);
807 goto out;
808}
809
810/*
811 * Release resources when all the ports and offloading have been stopped.
812 */
813static void cxgb_down(struct adapter *adapter)
814{
815 t3_sge_stop(adapter);
816 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
817 t3_intr_disable(adapter);
818 spin_unlock_irq(&adapter->work_lock);
819
820 if (adapter->flags & USING_MSIX) {
821 int i, n = 0;
822
823 free_irq(adapter->msix_info[0].vec, adapter);
824 for_each_port(adapter, i)
825 n += adap2pinfo(adapter, i)->nqsets;
826
827 for (i = 0; i < n; ++i)
828 free_irq(adapter->msix_info[i + 1].vec,
829 &adapter->sge.qs[i]);
830 } else
831 free_irq(adapter->pdev->irq, adapter);
832
833 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
834 quiesce_rx(adapter);
835}
836
837static void schedule_chk_task(struct adapter *adap)
838{
839 unsigned int timeo;
840
841 timeo = adap->params.linkpoll_period ?
842 (HZ * adap->params.linkpoll_period) / 10 :
843 adap->params.stats_update_period * HZ;
844 if (timeo)
845 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
846}
847
848static int offload_open(struct net_device *dev)
849{
850 struct adapter *adapter = dev->priv;
851 struct t3cdev *tdev = T3CDEV(dev);
852 int adap_up = adapter->open_device_map & PORT_MASK;
853 int err = 0;
854
855 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
856 return 0;
857
858 if (!adap_up && (err = cxgb_up(adapter)) < 0)
859 return err;
860
861 t3_tp_set_offload_mode(adapter, 1);
862 tdev->lldev = adapter->port[0];
863 err = cxgb3_offload_activate(adapter);
864 if (err)
865 goto out;
866
867 init_port_mtus(adapter);
868 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
869 adapter->params.b_wnd,
870 adapter->params.rev == 0 ?
871 adapter->port[0]->mtu : 0xffff);
872 init_smt(adapter);
873
874 /* Never mind if the next step fails */
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800875 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500876
877 /* Call back all registered clients */
878 cxgb3_add_clients(tdev);
879
880out:
881 /* restore them in case the offload module has changed them */
882 if (err) {
883 t3_tp_set_offload_mode(adapter, 0);
884 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
885 cxgb3_set_dummy_ops(tdev);
886 }
887 return err;
888}
889
890static int offload_close(struct t3cdev *tdev)
891{
892 struct adapter *adapter = tdev2adap(tdev);
893
894 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
895 return 0;
896
897 /* Call back all registered clients */
898 cxgb3_remove_clients(tdev);
899
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800900 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500901
902 tdev->lldev = NULL;
903 cxgb3_set_dummy_ops(tdev);
904 t3_tp_set_offload_mode(adapter, 0);
905 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
906
907 if (!adapter->open_device_map)
908 cxgb_down(adapter);
909
910 cxgb3_offload_deactivate(adapter);
911 return 0;
912}
913
914static int cxgb_open(struct net_device *dev)
915{
916 int err;
917 struct adapter *adapter = dev->priv;
918 struct port_info *pi = netdev_priv(dev);
919 int other_ports = adapter->open_device_map & PORT_MASK;
920
921 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
922 return err;
923
924 set_bit(pi->port_id, &adapter->open_device_map);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700925 if (is_offload(adapter) && !ofld_disable) {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500926 err = offload_open(dev);
927 if (err)
928 printk(KERN_WARNING
929 "Could not initialize offload capabilities\n");
930 }
931
932 link_start(dev);
933 t3_port_intr_enable(adapter, pi->port_id);
934 netif_start_queue(dev);
935 if (!other_ports)
936 schedule_chk_task(adapter);
937
938 return 0;
939}
940
941static int cxgb_close(struct net_device *dev)
942{
943 struct adapter *adapter = dev->priv;
944 struct port_info *p = netdev_priv(dev);
945
946 t3_port_intr_disable(adapter, p->port_id);
947 netif_stop_queue(dev);
948 p->phy.ops->power_down(&p->phy, 1);
949 netif_carrier_off(dev);
950 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
951
952 spin_lock(&adapter->work_lock); /* sync with update task */
953 clear_bit(p->port_id, &adapter->open_device_map);
954 spin_unlock(&adapter->work_lock);
955
956 if (!(adapter->open_device_map & PORT_MASK))
957 cancel_rearming_delayed_workqueue(cxgb3_wq,
958 &adapter->adap_check_task);
959
960 if (!adapter->open_device_map)
961 cxgb_down(adapter);
962
963 return 0;
964}
965
966static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
967{
968 struct adapter *adapter = dev->priv;
969 struct port_info *p = netdev_priv(dev);
970 struct net_device_stats *ns = &p->netstats;
971 const struct mac_stats *pstats;
972
973 spin_lock(&adapter->stats_lock);
974 pstats = t3_mac_update_stats(&p->mac);
975 spin_unlock(&adapter->stats_lock);
976
977 ns->tx_bytes = pstats->tx_octets;
978 ns->tx_packets = pstats->tx_frames;
979 ns->rx_bytes = pstats->rx_octets;
980 ns->rx_packets = pstats->rx_frames;
981 ns->multicast = pstats->rx_mcast_frames;
982
983 ns->tx_errors = pstats->tx_underrun;
984 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
985 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
986 pstats->rx_fifo_ovfl;
987
988 /* detailed rx_errors */
989 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
990 ns->rx_over_errors = 0;
991 ns->rx_crc_errors = pstats->rx_fcs_errs;
992 ns->rx_frame_errors = pstats->rx_symbol_errs;
993 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
994 ns->rx_missed_errors = pstats->rx_cong_drops;
995
996 /* detailed tx_errors */
997 ns->tx_aborted_errors = 0;
998 ns->tx_carrier_errors = 0;
999 ns->tx_fifo_errors = pstats->tx_underrun;
1000 ns->tx_heartbeat_errors = 0;
1001 ns->tx_window_errors = 0;
1002 return ns;
1003}
1004
1005static u32 get_msglevel(struct net_device *dev)
1006{
1007 struct adapter *adapter = dev->priv;
1008
1009 return adapter->msg_enable;
1010}
1011
1012static void set_msglevel(struct net_device *dev, u32 val)
1013{
1014 struct adapter *adapter = dev->priv;
1015
1016 adapter->msg_enable = val;
1017}
1018
1019static char stats_strings[][ETH_GSTRING_LEN] = {
1020 "TxOctetsOK ",
1021 "TxFramesOK ",
1022 "TxMulticastFramesOK",
1023 "TxBroadcastFramesOK",
1024 "TxPauseFrames ",
1025 "TxUnderrun ",
1026 "TxExtUnderrun ",
1027
1028 "TxFrames64 ",
1029 "TxFrames65To127 ",
1030 "TxFrames128To255 ",
1031 "TxFrames256To511 ",
1032 "TxFrames512To1023 ",
1033 "TxFrames1024To1518 ",
1034 "TxFrames1519ToMax ",
1035
1036 "RxOctetsOK ",
1037 "RxFramesOK ",
1038 "RxMulticastFramesOK",
1039 "RxBroadcastFramesOK",
1040 "RxPauseFrames ",
1041 "RxFCSErrors ",
1042 "RxSymbolErrors ",
1043 "RxShortErrors ",
1044 "RxJabberErrors ",
1045 "RxLengthErrors ",
1046 "RxFIFOoverflow ",
1047
1048 "RxFrames64 ",
1049 "RxFrames65To127 ",
1050 "RxFrames128To255 ",
1051 "RxFrames256To511 ",
1052 "RxFrames512To1023 ",
1053 "RxFrames1024To1518 ",
1054 "RxFrames1519ToMax ",
1055
1056 "PhyFIFOErrors ",
1057 "TSO ",
1058 "VLANextractions ",
1059 "VLANinsertions ",
1060 "TxCsumOffload ",
1061 "RxCsumGood ",
Divy Le Rayfc906642007-03-18 13:10:12 -07001062 "RxDrops ",
1063
1064 "CheckTXEnToggled ",
1065 "CheckResets ",
1066
Divy Le Ray4d22de32007-01-18 22:04:14 -05001067};
1068
1069static int get_stats_count(struct net_device *dev)
1070{
1071 return ARRAY_SIZE(stats_strings);
1072}
1073
1074#define T3_REGMAP_SIZE (3 * 1024)
1075
1076static int get_regs_len(struct net_device *dev)
1077{
1078 return T3_REGMAP_SIZE;
1079}
1080
1081static int get_eeprom_len(struct net_device *dev)
1082{
1083 return EEPROMSIZE;
1084}
1085
1086static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1087{
1088 u32 fw_vers = 0;
1089 struct adapter *adapter = dev->priv;
1090
1091 t3_get_fw_version(adapter, &fw_vers);
1092
1093 strcpy(info->driver, DRV_NAME);
1094 strcpy(info->version, DRV_VERSION);
1095 strcpy(info->bus_info, pci_name(adapter->pdev));
1096 if (!fw_vers)
1097 strcpy(info->fw_version, "N/A");
Divy Le Ray4aac3892007-01-30 19:43:45 -08001098 else {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001099 snprintf(info->fw_version, sizeof(info->fw_version),
Divy Le Ray4aac3892007-01-30 19:43:45 -08001100 "%s %u.%u.%u",
1101 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1102 G_FW_VERSION_MAJOR(fw_vers),
1103 G_FW_VERSION_MINOR(fw_vers),
1104 G_FW_VERSION_MICRO(fw_vers));
1105 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001106}
1107
1108static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1109{
1110 if (stringset == ETH_SS_STATS)
1111 memcpy(data, stats_strings, sizeof(stats_strings));
1112}
1113
1114static unsigned long collect_sge_port_stats(struct adapter *adapter,
1115 struct port_info *p, int idx)
1116{
1117 int i;
1118 unsigned long tot = 0;
1119
1120 for (i = 0; i < p->nqsets; ++i)
1121 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1122 return tot;
1123}
1124
1125static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1126 u64 *data)
1127{
1128 struct adapter *adapter = dev->priv;
1129 struct port_info *pi = netdev_priv(dev);
1130 const struct mac_stats *s;
1131
1132 spin_lock(&adapter->stats_lock);
1133 s = t3_mac_update_stats(&pi->mac);
1134 spin_unlock(&adapter->stats_lock);
1135
1136 *data++ = s->tx_octets;
1137 *data++ = s->tx_frames;
1138 *data++ = s->tx_mcast_frames;
1139 *data++ = s->tx_bcast_frames;
1140 *data++ = s->tx_pause;
1141 *data++ = s->tx_underrun;
1142 *data++ = s->tx_fifo_urun;
1143
1144 *data++ = s->tx_frames_64;
1145 *data++ = s->tx_frames_65_127;
1146 *data++ = s->tx_frames_128_255;
1147 *data++ = s->tx_frames_256_511;
1148 *data++ = s->tx_frames_512_1023;
1149 *data++ = s->tx_frames_1024_1518;
1150 *data++ = s->tx_frames_1519_max;
1151
1152 *data++ = s->rx_octets;
1153 *data++ = s->rx_frames;
1154 *data++ = s->rx_mcast_frames;
1155 *data++ = s->rx_bcast_frames;
1156 *data++ = s->rx_pause;
1157 *data++ = s->rx_fcs_errs;
1158 *data++ = s->rx_symbol_errs;
1159 *data++ = s->rx_short;
1160 *data++ = s->rx_jabber;
1161 *data++ = s->rx_too_long;
1162 *data++ = s->rx_fifo_ovfl;
1163
1164 *data++ = s->rx_frames_64;
1165 *data++ = s->rx_frames_65_127;
1166 *data++ = s->rx_frames_128_255;
1167 *data++ = s->rx_frames_256_511;
1168 *data++ = s->rx_frames_512_1023;
1169 *data++ = s->rx_frames_1024_1518;
1170 *data++ = s->rx_frames_1519_max;
1171
1172 *data++ = pi->phy.fifo_errors;
1173
1174 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1175 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1176 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1177 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1178 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1179 *data++ = s->rx_cong_drops;
Divy Le Rayfc906642007-03-18 13:10:12 -07001180
1181 *data++ = s->num_toggled;
1182 *data++ = s->num_resets;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001183}
1184
1185static inline void reg_block_dump(struct adapter *ap, void *buf,
1186 unsigned int start, unsigned int end)
1187{
1188 u32 *p = buf + start;
1189
1190 for (; start <= end; start += sizeof(u32))
1191 *p++ = t3_read_reg(ap, start);
1192}
1193
1194static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1195 void *buf)
1196{
1197 struct adapter *ap = dev->priv;
1198
1199 /*
1200 * Version scheme:
1201 * bits 0..9: chip version
1202 * bits 10..15: chip revision
1203 * bit 31: set for PCIe cards
1204 */
1205 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1206
1207 /*
1208 * We skip the MAC statistics registers because they are clear-on-read.
1209 * Also reading multi-register stats would need to synchronize with the
1210 * periodic mac stats accumulation. Hard to justify the complexity.
1211 */
1212 memset(buf, 0, T3_REGMAP_SIZE);
1213 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1214 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1215 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1216 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1217 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1218 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1219 XGM_REG(A_XGM_SERDES_STAT3, 1));
1220 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1221 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1222}
1223
1224static int restart_autoneg(struct net_device *dev)
1225{
1226 struct port_info *p = netdev_priv(dev);
1227
1228 if (!netif_running(dev))
1229 return -EAGAIN;
1230 if (p->link_config.autoneg != AUTONEG_ENABLE)
1231 return -EINVAL;
1232 p->phy.ops->autoneg_restart(&p->phy);
1233 return 0;
1234}
1235
1236static int cxgb3_phys_id(struct net_device *dev, u32 data)
1237{
1238 int i;
1239 struct adapter *adapter = dev->priv;
1240
1241 if (data == 0)
1242 data = 2;
1243
1244 for (i = 0; i < data * 2; i++) {
1245 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1246 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1247 if (msleep_interruptible(500))
1248 break;
1249 }
1250 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1251 F_GPIO0_OUT_VAL);
1252 return 0;
1253}
1254
1255static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1256{
1257 struct port_info *p = netdev_priv(dev);
1258
1259 cmd->supported = p->link_config.supported;
1260 cmd->advertising = p->link_config.advertising;
1261
1262 if (netif_carrier_ok(dev)) {
1263 cmd->speed = p->link_config.speed;
1264 cmd->duplex = p->link_config.duplex;
1265 } else {
1266 cmd->speed = -1;
1267 cmd->duplex = -1;
1268 }
1269
1270 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1271 cmd->phy_address = p->phy.addr;
1272 cmd->transceiver = XCVR_EXTERNAL;
1273 cmd->autoneg = p->link_config.autoneg;
1274 cmd->maxtxpkt = 0;
1275 cmd->maxrxpkt = 0;
1276 return 0;
1277}
1278
1279static int speed_duplex_to_caps(int speed, int duplex)
1280{
1281 int cap = 0;
1282
1283 switch (speed) {
1284 case SPEED_10:
1285 if (duplex == DUPLEX_FULL)
1286 cap = SUPPORTED_10baseT_Full;
1287 else
1288 cap = SUPPORTED_10baseT_Half;
1289 break;
1290 case SPEED_100:
1291 if (duplex == DUPLEX_FULL)
1292 cap = SUPPORTED_100baseT_Full;
1293 else
1294 cap = SUPPORTED_100baseT_Half;
1295 break;
1296 case SPEED_1000:
1297 if (duplex == DUPLEX_FULL)
1298 cap = SUPPORTED_1000baseT_Full;
1299 else
1300 cap = SUPPORTED_1000baseT_Half;
1301 break;
1302 case SPEED_10000:
1303 if (duplex == DUPLEX_FULL)
1304 cap = SUPPORTED_10000baseT_Full;
1305 }
1306 return cap;
1307}
1308
1309#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1310 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1311 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1312 ADVERTISED_10000baseT_Full)
1313
1314static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1315{
1316 struct port_info *p = netdev_priv(dev);
1317 struct link_config *lc = &p->link_config;
1318
1319 if (!(lc->supported & SUPPORTED_Autoneg))
1320 return -EOPNOTSUPP; /* can't change speed/duplex */
1321
1322 if (cmd->autoneg == AUTONEG_DISABLE) {
1323 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1324
1325 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1326 return -EINVAL;
1327 lc->requested_speed = cmd->speed;
1328 lc->requested_duplex = cmd->duplex;
1329 lc->advertising = 0;
1330 } else {
1331 cmd->advertising &= ADVERTISED_MASK;
1332 cmd->advertising &= lc->supported;
1333 if (!cmd->advertising)
1334 return -EINVAL;
1335 lc->requested_speed = SPEED_INVALID;
1336 lc->requested_duplex = DUPLEX_INVALID;
1337 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1338 }
1339 lc->autoneg = cmd->autoneg;
1340 if (netif_running(dev))
1341 t3_link_start(&p->phy, &p->mac, lc);
1342 return 0;
1343}
1344
1345static void get_pauseparam(struct net_device *dev,
1346 struct ethtool_pauseparam *epause)
1347{
1348 struct port_info *p = netdev_priv(dev);
1349
1350 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1351 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1352 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1353}
1354
1355static int set_pauseparam(struct net_device *dev,
1356 struct ethtool_pauseparam *epause)
1357{
1358 struct port_info *p = netdev_priv(dev);
1359 struct link_config *lc = &p->link_config;
1360
1361 if (epause->autoneg == AUTONEG_DISABLE)
1362 lc->requested_fc = 0;
1363 else if (lc->supported & SUPPORTED_Autoneg)
1364 lc->requested_fc = PAUSE_AUTONEG;
1365 else
1366 return -EINVAL;
1367
1368 if (epause->rx_pause)
1369 lc->requested_fc |= PAUSE_RX;
1370 if (epause->tx_pause)
1371 lc->requested_fc |= PAUSE_TX;
1372 if (lc->autoneg == AUTONEG_ENABLE) {
1373 if (netif_running(dev))
1374 t3_link_start(&p->phy, &p->mac, lc);
1375 } else {
1376 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1377 if (netif_running(dev))
1378 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1379 }
1380 return 0;
1381}
1382
1383static u32 get_rx_csum(struct net_device *dev)
1384{
1385 struct port_info *p = netdev_priv(dev);
1386
1387 return p->rx_csum_offload;
1388}
1389
1390static int set_rx_csum(struct net_device *dev, u32 data)
1391{
1392 struct port_info *p = netdev_priv(dev);
1393
1394 p->rx_csum_offload = data;
1395 return 0;
1396}
1397
1398static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1399{
Divy Le Ray05b97b32007-03-18 13:10:01 -07001400 const struct adapter *adapter = dev->priv;
1401 const struct port_info *pi = netdev_priv(dev);
1402 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001403
1404 e->rx_max_pending = MAX_RX_BUFFERS;
1405 e->rx_mini_max_pending = 0;
1406 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1407 e->tx_max_pending = MAX_TXQ_ENTRIES;
1408
Divy Le Ray05b97b32007-03-18 13:10:01 -07001409 e->rx_pending = q->fl_size;
1410 e->rx_mini_pending = q->rspq_size;
1411 e->rx_jumbo_pending = q->jumbo_size;
1412 e->tx_pending = q->txq_size[0];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001413}
1414
1415static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1416{
1417 int i;
Divy Le Ray05b97b32007-03-18 13:10:01 -07001418 struct qset_params *q;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001419 struct adapter *adapter = dev->priv;
Divy Le Ray05b97b32007-03-18 13:10:01 -07001420 const struct port_info *pi = netdev_priv(dev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001421
1422 if (e->rx_pending > MAX_RX_BUFFERS ||
1423 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1424 e->tx_pending > MAX_TXQ_ENTRIES ||
1425 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1426 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1427 e->rx_pending < MIN_FL_ENTRIES ||
1428 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1429 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1430 return -EINVAL;
1431
1432 if (adapter->flags & FULL_INIT_DONE)
1433 return -EBUSY;
1434
Divy Le Ray05b97b32007-03-18 13:10:01 -07001435 q = &adapter->params.sge.qset[pi->first_qset];
1436 for (i = 0; i < pi->nqsets; ++i, ++q) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001437 q->rspq_size = e->rx_mini_pending;
1438 q->fl_size = e->rx_pending;
1439 q->jumbo_size = e->rx_jumbo_pending;
1440 q->txq_size[0] = e->tx_pending;
1441 q->txq_size[1] = e->tx_pending;
1442 q->txq_size[2] = e->tx_pending;
1443 }
1444 return 0;
1445}
1446
1447static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1448{
1449 struct adapter *adapter = dev->priv;
1450 struct qset_params *qsp = &adapter->params.sge.qset[0];
1451 struct sge_qset *qs = &adapter->sge.qs[0];
1452
1453 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1454 return -EINVAL;
1455
1456 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1457 t3_update_qset_coalesce(qs, qsp);
1458 return 0;
1459}
1460
1461static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1462{
1463 struct adapter *adapter = dev->priv;
1464 struct qset_params *q = adapter->params.sge.qset;
1465
1466 c->rx_coalesce_usecs = q->coalesce_usecs;
1467 return 0;
1468}
1469
1470static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1471 u8 * data)
1472{
1473 int i, err = 0;
1474 struct adapter *adapter = dev->priv;
1475
1476 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1477 if (!buf)
1478 return -ENOMEM;
1479
1480 e->magic = EEPROM_MAGIC;
1481 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1482 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1483
1484 if (!err)
1485 memcpy(data, buf + e->offset, e->len);
1486 kfree(buf);
1487 return err;
1488}
1489
1490static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1491 u8 * data)
1492{
1493 u8 *buf;
1494 int err = 0;
1495 u32 aligned_offset, aligned_len, *p;
1496 struct adapter *adapter = dev->priv;
1497
1498 if (eeprom->magic != EEPROM_MAGIC)
1499 return -EINVAL;
1500
1501 aligned_offset = eeprom->offset & ~3;
1502 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1503
1504 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1505 buf = kmalloc(aligned_len, GFP_KERNEL);
1506 if (!buf)
1507 return -ENOMEM;
1508 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1509 if (!err && aligned_len > 4)
1510 err = t3_seeprom_read(adapter,
1511 aligned_offset + aligned_len - 4,
1512 (u32 *) & buf[aligned_len - 4]);
1513 if (err)
1514 goto out;
1515 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1516 } else
1517 buf = data;
1518
1519 err = t3_seeprom_wp(adapter, 0);
1520 if (err)
1521 goto out;
1522
1523 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1524 err = t3_seeprom_write(adapter, aligned_offset, *p);
1525 aligned_offset += 4;
1526 }
1527
1528 if (!err)
1529 err = t3_seeprom_wp(adapter, 1);
1530out:
1531 if (buf != data)
1532 kfree(buf);
1533 return err;
1534}
1535
1536static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1537{
1538 wol->supported = 0;
1539 wol->wolopts = 0;
1540 memset(&wol->sopass, 0, sizeof(wol->sopass));
1541}
1542
1543static const struct ethtool_ops cxgb_ethtool_ops = {
1544 .get_settings = get_settings,
1545 .set_settings = set_settings,
1546 .get_drvinfo = get_drvinfo,
1547 .get_msglevel = get_msglevel,
1548 .set_msglevel = set_msglevel,
1549 .get_ringparam = get_sge_param,
1550 .set_ringparam = set_sge_param,
1551 .get_coalesce = get_coalesce,
1552 .set_coalesce = set_coalesce,
1553 .get_eeprom_len = get_eeprom_len,
1554 .get_eeprom = get_eeprom,
1555 .set_eeprom = set_eeprom,
1556 .get_pauseparam = get_pauseparam,
1557 .set_pauseparam = set_pauseparam,
1558 .get_rx_csum = get_rx_csum,
1559 .set_rx_csum = set_rx_csum,
1560 .get_tx_csum = ethtool_op_get_tx_csum,
1561 .set_tx_csum = ethtool_op_set_tx_csum,
1562 .get_sg = ethtool_op_get_sg,
1563 .set_sg = ethtool_op_set_sg,
1564 .get_link = ethtool_op_get_link,
1565 .get_strings = get_strings,
1566 .phys_id = cxgb3_phys_id,
1567 .nway_reset = restart_autoneg,
1568 .get_stats_count = get_stats_count,
1569 .get_ethtool_stats = get_stats,
1570 .get_regs_len = get_regs_len,
1571 .get_regs = get_regs,
1572 .get_wol = get_wol,
1573 .get_tso = ethtool_op_get_tso,
1574 .set_tso = ethtool_op_set_tso,
1575 .get_perm_addr = ethtool_op_get_perm_addr
1576};
1577
1578static int in_range(int val, int lo, int hi)
1579{
1580 return val < 0 || (val <= hi && val >= lo);
1581}
1582
1583static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1584{
1585 int ret;
1586 u32 cmd;
1587 struct adapter *adapter = dev->priv;
1588
1589 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1590 return -EFAULT;
1591
1592 switch (cmd) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001593 case CHELSIO_SET_QSET_PARAMS:{
1594 int i;
1595 struct qset_params *q;
1596 struct ch_qset_params t;
1597
1598 if (!capable(CAP_NET_ADMIN))
1599 return -EPERM;
1600 if (copy_from_user(&t, useraddr, sizeof(t)))
1601 return -EFAULT;
1602 if (t.qset_idx >= SGE_QSETS)
1603 return -EINVAL;
1604 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1605 !in_range(t.cong_thres, 0, 255) ||
1606 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1607 MAX_TXQ_ENTRIES) ||
1608 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1609 MAX_TXQ_ENTRIES) ||
1610 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1611 MAX_CTRL_TXQ_ENTRIES) ||
1612 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1613 MAX_RX_BUFFERS)
1614 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1615 MAX_RX_JUMBO_BUFFERS)
1616 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1617 MAX_RSPQ_ENTRIES))
1618 return -EINVAL;
1619 if ((adapter->flags & FULL_INIT_DONE) &&
1620 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1621 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1622 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1623 t.polling >= 0 || t.cong_thres >= 0))
1624 return -EBUSY;
1625
1626 q = &adapter->params.sge.qset[t.qset_idx];
1627
1628 if (t.rspq_size >= 0)
1629 q->rspq_size = t.rspq_size;
1630 if (t.fl_size[0] >= 0)
1631 q->fl_size = t.fl_size[0];
1632 if (t.fl_size[1] >= 0)
1633 q->jumbo_size = t.fl_size[1];
1634 if (t.txq_size[0] >= 0)
1635 q->txq_size[0] = t.txq_size[0];
1636 if (t.txq_size[1] >= 0)
1637 q->txq_size[1] = t.txq_size[1];
1638 if (t.txq_size[2] >= 0)
1639 q->txq_size[2] = t.txq_size[2];
1640 if (t.cong_thres >= 0)
1641 q->cong_thres = t.cong_thres;
1642 if (t.intr_lat >= 0) {
1643 struct sge_qset *qs =
1644 &adapter->sge.qs[t.qset_idx];
1645
1646 q->coalesce_usecs = t.intr_lat;
1647 t3_update_qset_coalesce(qs, q);
1648 }
1649 if (t.polling >= 0) {
1650 if (adapter->flags & USING_MSIX)
1651 q->polling = t.polling;
1652 else {
1653 /* No polling with INTx for T3A */
1654 if (adapter->params.rev == 0 &&
1655 !(adapter->flags & USING_MSI))
1656 t.polling = 0;
1657
1658 for (i = 0; i < SGE_QSETS; i++) {
1659 q = &adapter->params.sge.
1660 qset[i];
1661 q->polling = t.polling;
1662 }
1663 }
1664 }
1665 break;
1666 }
1667 case CHELSIO_GET_QSET_PARAMS:{
1668 struct qset_params *q;
1669 struct ch_qset_params t;
1670
1671 if (copy_from_user(&t, useraddr, sizeof(t)))
1672 return -EFAULT;
1673 if (t.qset_idx >= SGE_QSETS)
1674 return -EINVAL;
1675
1676 q = &adapter->params.sge.qset[t.qset_idx];
1677 t.rspq_size = q->rspq_size;
1678 t.txq_size[0] = q->txq_size[0];
1679 t.txq_size[1] = q->txq_size[1];
1680 t.txq_size[2] = q->txq_size[2];
1681 t.fl_size[0] = q->fl_size;
1682 t.fl_size[1] = q->jumbo_size;
1683 t.polling = q->polling;
1684 t.intr_lat = q->coalesce_usecs;
1685 t.cong_thres = q->cong_thres;
1686
1687 if (copy_to_user(useraddr, &t, sizeof(t)))
1688 return -EFAULT;
1689 break;
1690 }
1691 case CHELSIO_SET_QSET_NUM:{
1692 struct ch_reg edata;
1693 struct port_info *pi = netdev_priv(dev);
1694 unsigned int i, first_qset = 0, other_qsets = 0;
1695
1696 if (!capable(CAP_NET_ADMIN))
1697 return -EPERM;
1698 if (adapter->flags & FULL_INIT_DONE)
1699 return -EBUSY;
1700 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1701 return -EFAULT;
1702 if (edata.val < 1 ||
1703 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1704 return -EINVAL;
1705
1706 for_each_port(adapter, i)
1707 if (adapter->port[i] && adapter->port[i] != dev)
1708 other_qsets += adap2pinfo(adapter, i)->nqsets;
1709
1710 if (edata.val + other_qsets > SGE_QSETS)
1711 return -EINVAL;
1712
1713 pi->nqsets = edata.val;
1714
1715 for_each_port(adapter, i)
1716 if (adapter->port[i]) {
1717 pi = adap2pinfo(adapter, i);
1718 pi->first_qset = first_qset;
1719 first_qset += pi->nqsets;
1720 }
1721 break;
1722 }
1723 case CHELSIO_GET_QSET_NUM:{
1724 struct ch_reg edata;
1725 struct port_info *pi = netdev_priv(dev);
1726
1727 edata.cmd = CHELSIO_GET_QSET_NUM;
1728 edata.val = pi->nqsets;
1729 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1730 return -EFAULT;
1731 break;
1732 }
1733 case CHELSIO_LOAD_FW:{
1734 u8 *fw_data;
1735 struct ch_mem_range t;
1736
1737 if (!capable(CAP_NET_ADMIN))
1738 return -EPERM;
1739 if (copy_from_user(&t, useraddr, sizeof(t)))
1740 return -EFAULT;
1741
1742 fw_data = kmalloc(t.len, GFP_KERNEL);
1743 if (!fw_data)
1744 return -ENOMEM;
1745
1746 if (copy_from_user
1747 (fw_data, useraddr + sizeof(t), t.len)) {
1748 kfree(fw_data);
1749 return -EFAULT;
1750 }
1751
1752 ret = t3_load_fw(adapter, fw_data, t.len);
1753 kfree(fw_data);
1754 if (ret)
1755 return ret;
1756 break;
1757 }
1758 case CHELSIO_SETMTUTAB:{
1759 struct ch_mtus m;
1760 int i;
1761
1762 if (!is_offload(adapter))
1763 return -EOPNOTSUPP;
1764 if (!capable(CAP_NET_ADMIN))
1765 return -EPERM;
1766 if (offload_running(adapter))
1767 return -EBUSY;
1768 if (copy_from_user(&m, useraddr, sizeof(m)))
1769 return -EFAULT;
1770 if (m.nmtus != NMTUS)
1771 return -EINVAL;
1772 if (m.mtus[0] < 81) /* accommodate SACK */
1773 return -EINVAL;
1774
1775 /* MTUs must be in ascending order */
1776 for (i = 1; i < NMTUS; ++i)
1777 if (m.mtus[i] < m.mtus[i - 1])
1778 return -EINVAL;
1779
1780 memcpy(adapter->params.mtus, m.mtus,
1781 sizeof(adapter->params.mtus));
1782 break;
1783 }
1784 case CHELSIO_GET_PM:{
1785 struct tp_params *p = &adapter->params.tp;
1786 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1787
1788 if (!is_offload(adapter))
1789 return -EOPNOTSUPP;
1790 m.tx_pg_sz = p->tx_pg_size;
1791 m.tx_num_pg = p->tx_num_pgs;
1792 m.rx_pg_sz = p->rx_pg_size;
1793 m.rx_num_pg = p->rx_num_pgs;
1794 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1795 if (copy_to_user(useraddr, &m, sizeof(m)))
1796 return -EFAULT;
1797 break;
1798 }
1799 case CHELSIO_SET_PM:{
1800 struct ch_pm m;
1801 struct tp_params *p = &adapter->params.tp;
1802
1803 if (!is_offload(adapter))
1804 return -EOPNOTSUPP;
1805 if (!capable(CAP_NET_ADMIN))
1806 return -EPERM;
1807 if (adapter->flags & FULL_INIT_DONE)
1808 return -EBUSY;
1809 if (copy_from_user(&m, useraddr, sizeof(m)))
1810 return -EFAULT;
1811 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1812 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1813 return -EINVAL; /* not power of 2 */
1814 if (!(m.rx_pg_sz & 0x14000))
1815 return -EINVAL; /* not 16KB or 64KB */
1816 if (!(m.tx_pg_sz & 0x1554000))
1817 return -EINVAL;
1818 if (m.tx_num_pg == -1)
1819 m.tx_num_pg = p->tx_num_pgs;
1820 if (m.rx_num_pg == -1)
1821 m.rx_num_pg = p->rx_num_pgs;
1822 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1823 return -EINVAL;
1824 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1825 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1826 return -EINVAL;
1827 p->rx_pg_size = m.rx_pg_sz;
1828 p->tx_pg_size = m.tx_pg_sz;
1829 p->rx_num_pgs = m.rx_num_pg;
1830 p->tx_num_pgs = m.tx_num_pg;
1831 break;
1832 }
1833 case CHELSIO_GET_MEM:{
1834 struct ch_mem_range t;
1835 struct mc7 *mem;
1836 u64 buf[32];
1837
1838 if (!is_offload(adapter))
1839 return -EOPNOTSUPP;
1840 if (!(adapter->flags & FULL_INIT_DONE))
1841 return -EIO; /* need the memory controllers */
1842 if (copy_from_user(&t, useraddr, sizeof(t)))
1843 return -EFAULT;
1844 if ((t.addr & 7) || (t.len & 7))
1845 return -EINVAL;
1846 if (t.mem_id == MEM_CM)
1847 mem = &adapter->cm;
1848 else if (t.mem_id == MEM_PMRX)
1849 mem = &adapter->pmrx;
1850 else if (t.mem_id == MEM_PMTX)
1851 mem = &adapter->pmtx;
1852 else
1853 return -EINVAL;
1854
1855 /*
Divy Le Ray18254942007-02-24 16:43:56 -08001856 * Version scheme:
1857 * bits 0..9: chip version
1858 * bits 10..15: chip revision
1859 */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001860 t.version = 3 | (adapter->params.rev << 10);
1861 if (copy_to_user(useraddr, &t, sizeof(t)))
1862 return -EFAULT;
1863
1864 /*
1865 * Read 256 bytes at a time as len can be large and we don't
1866 * want to use huge intermediate buffers.
1867 */
1868 useraddr += sizeof(t); /* advance to start of buffer */
1869 while (t.len) {
1870 unsigned int chunk =
1871 min_t(unsigned int, t.len, sizeof(buf));
1872
1873 ret =
1874 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1875 buf);
1876 if (ret)
1877 return ret;
1878 if (copy_to_user(useraddr, buf, chunk))
1879 return -EFAULT;
1880 useraddr += chunk;
1881 t.addr += chunk;
1882 t.len -= chunk;
1883 }
1884 break;
1885 }
1886 case CHELSIO_SET_TRACE_FILTER:{
1887 struct ch_trace t;
1888 const struct trace_params *tp;
1889
1890 if (!capable(CAP_NET_ADMIN))
1891 return -EPERM;
1892 if (!offload_running(adapter))
1893 return -EAGAIN;
1894 if (copy_from_user(&t, useraddr, sizeof(t)))
1895 return -EFAULT;
1896
1897 tp = (const struct trace_params *)&t.sip;
1898 if (t.config_tx)
1899 t3_config_trace_filter(adapter, tp, 0,
1900 t.invert_match,
1901 t.trace_tx);
1902 if (t.config_rx)
1903 t3_config_trace_filter(adapter, tp, 1,
1904 t.invert_match,
1905 t.trace_rx);
1906 break;
1907 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001908 default:
1909 return -EOPNOTSUPP;
1910 }
1911 return 0;
1912}
1913
1914static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1915{
1916 int ret, mmd;
1917 struct adapter *adapter = dev->priv;
1918 struct port_info *pi = netdev_priv(dev);
1919 struct mii_ioctl_data *data = if_mii(req);
1920
1921 switch (cmd) {
1922 case SIOCGMIIPHY:
1923 data->phy_id = pi->phy.addr;
1924 /* FALLTHRU */
1925 case SIOCGMIIREG:{
1926 u32 val;
1927 struct cphy *phy = &pi->phy;
1928
1929 if (!phy->mdio_read)
1930 return -EOPNOTSUPP;
1931 if (is_10G(adapter)) {
1932 mmd = data->phy_id >> 8;
1933 if (!mmd)
1934 mmd = MDIO_DEV_PCS;
1935 else if (mmd > MDIO_DEV_XGXS)
1936 return -EINVAL;
1937
1938 ret =
1939 phy->mdio_read(adapter, data->phy_id & 0x1f,
1940 mmd, data->reg_num, &val);
1941 } else
1942 ret =
1943 phy->mdio_read(adapter, data->phy_id & 0x1f,
1944 0, data->reg_num & 0x1f,
1945 &val);
1946 if (!ret)
1947 data->val_out = val;
1948 break;
1949 }
1950 case SIOCSMIIREG:{
1951 struct cphy *phy = &pi->phy;
1952
1953 if (!capable(CAP_NET_ADMIN))
1954 return -EPERM;
1955 if (!phy->mdio_write)
1956 return -EOPNOTSUPP;
1957 if (is_10G(adapter)) {
1958 mmd = data->phy_id >> 8;
1959 if (!mmd)
1960 mmd = MDIO_DEV_PCS;
1961 else if (mmd > MDIO_DEV_XGXS)
1962 return -EINVAL;
1963
1964 ret =
1965 phy->mdio_write(adapter,
1966 data->phy_id & 0x1f, mmd,
1967 data->reg_num,
1968 data->val_in);
1969 } else
1970 ret =
1971 phy->mdio_write(adapter,
1972 data->phy_id & 0x1f, 0,
1973 data->reg_num & 0x1f,
1974 data->val_in);
1975 break;
1976 }
1977 case SIOCCHIOCTL:
1978 return cxgb_extension_ioctl(dev, req->ifr_data);
1979 default:
1980 return -EOPNOTSUPP;
1981 }
1982 return ret;
1983}
1984
1985static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1986{
1987 int ret;
1988 struct adapter *adapter = dev->priv;
1989 struct port_info *pi = netdev_priv(dev);
1990
1991 if (new_mtu < 81) /* accommodate SACK */
1992 return -EINVAL;
1993 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1994 return ret;
1995 dev->mtu = new_mtu;
1996 init_port_mtus(adapter);
1997 if (adapter->params.rev == 0 && offload_running(adapter))
1998 t3_load_mtus(adapter, adapter->params.mtus,
1999 adapter->params.a_wnd, adapter->params.b_wnd,
2000 adapter->port[0]->mtu);
2001 return 0;
2002}
2003
2004static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2005{
2006 struct adapter *adapter = dev->priv;
2007 struct port_info *pi = netdev_priv(dev);
2008 struct sockaddr *addr = p;
2009
2010 if (!is_valid_ether_addr(addr->sa_data))
2011 return -EINVAL;
2012
2013 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2014 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2015 if (offload_running(adapter))
2016 write_smt_entry(adapter, pi->port_id);
2017 return 0;
2018}
2019
2020/**
2021 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2022 * @adap: the adapter
2023 * @p: the port
2024 *
2025 * Ensures that current Rx processing on any of the queues associated with
2026 * the given port completes before returning. We do this by acquiring and
2027 * releasing the locks of the response queues associated with the port.
2028 */
2029static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2030{
2031 int i;
2032
2033 for (i = 0; i < p->nqsets; i++) {
2034 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2035
2036 spin_lock_irq(&q->lock);
2037 spin_unlock_irq(&q->lock);
2038 }
2039}
2040
2041static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2042{
2043 struct adapter *adapter = dev->priv;
2044 struct port_info *pi = netdev_priv(dev);
2045
2046 pi->vlan_grp = grp;
2047 if (adapter->params.rev > 0)
2048 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2049 else {
2050 /* single control for all ports */
2051 unsigned int i, have_vlans = 0;
2052 for_each_port(adapter, i)
2053 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2054
2055 t3_set_vlan_accel(adapter, 1, have_vlans);
2056 }
2057 t3_synchronize_rx(adapter, pi);
2058}
2059
2060static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2061{
2062 /* nothing */
2063}
2064
2065#ifdef CONFIG_NET_POLL_CONTROLLER
2066static void cxgb_netpoll(struct net_device *dev)
2067{
2068 struct adapter *adapter = dev->priv;
2069 struct sge_qset *qs = dev2qset(dev);
2070
2071 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2072 adapter);
2073}
2074#endif
2075
2076/*
2077 * Periodic accumulation of MAC statistics.
2078 */
2079static void mac_stats_update(struct adapter *adapter)
2080{
2081 int i;
2082
2083 for_each_port(adapter, i) {
2084 struct net_device *dev = adapter->port[i];
2085 struct port_info *p = netdev_priv(dev);
2086
2087 if (netif_running(dev)) {
2088 spin_lock(&adapter->stats_lock);
2089 t3_mac_update_stats(&p->mac);
2090 spin_unlock(&adapter->stats_lock);
2091 }
2092 }
2093}
2094
2095static void check_link_status(struct adapter *adapter)
2096{
2097 int i;
2098
2099 for_each_port(adapter, i) {
2100 struct net_device *dev = adapter->port[i];
2101 struct port_info *p = netdev_priv(dev);
2102
2103 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2104 t3_link_changed(adapter, i);
2105 }
2106}
2107
Divy Le Rayfc906642007-03-18 13:10:12 -07002108static void check_t3b2_mac(struct adapter *adapter)
2109{
2110 int i;
2111
2112 rtnl_lock(); /* synchronize with ifdown */
2113 for_each_port(adapter, i) {
2114 struct net_device *dev = adapter->port[i];
2115 struct port_info *p = netdev_priv(dev);
2116 int status;
2117
2118 if (!netif_running(dev))
2119 continue;
2120
2121 status = 0;
2122 if (netif_running(dev))
2123 status = t3b2_mac_watchdog_task(&p->mac);
2124 if (status == 1)
2125 p->mac.stats.num_toggled++;
2126 else if (status == 2) {
2127 struct cmac *mac = &p->mac;
2128
2129 t3_mac_set_mtu(mac, dev->mtu);
2130 t3_mac_set_address(mac, 0, dev->dev_addr);
2131 cxgb_set_rxmode(dev);
2132 t3_link_start(&p->phy, mac, &p->link_config);
2133 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2134 t3_port_intr_enable(adapter, p->port_id);
2135 p->mac.stats.num_resets++;
2136 }
2137 }
2138 rtnl_unlock();
2139}
2140
2141
Divy Le Ray4d22de32007-01-18 22:04:14 -05002142static void t3_adap_check_task(struct work_struct *work)
2143{
2144 struct adapter *adapter = container_of(work, struct adapter,
2145 adap_check_task.work);
2146 const struct adapter_params *p = &adapter->params;
2147
2148 adapter->check_task_cnt++;
2149
2150 /* Check link status for PHYs without interrupts */
2151 if (p->linkpoll_period)
2152 check_link_status(adapter);
2153
2154 /* Accumulate MAC stats if needed */
2155 if (!p->linkpoll_period ||
2156 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2157 p->stats_update_period) {
2158 mac_stats_update(adapter);
2159 adapter->check_task_cnt = 0;
2160 }
2161
Divy Le Rayfc906642007-03-18 13:10:12 -07002162 if (p->rev == T3_REV_B2)
2163 check_t3b2_mac(adapter);
2164
Divy Le Ray4d22de32007-01-18 22:04:14 -05002165 /* Schedule the next check update if any port is active. */
2166 spin_lock(&adapter->work_lock);
2167 if (adapter->open_device_map & PORT_MASK)
2168 schedule_chk_task(adapter);
2169 spin_unlock(&adapter->work_lock);
2170}
2171
2172/*
2173 * Processes external (PHY) interrupts in process context.
2174 */
2175static void ext_intr_task(struct work_struct *work)
2176{
2177 struct adapter *adapter = container_of(work, struct adapter,
2178 ext_intr_handler_task);
2179
2180 t3_phy_intr_handler(adapter);
2181
2182 /* Now reenable external interrupts */
2183 spin_lock_irq(&adapter->work_lock);
2184 if (adapter->slow_intr_mask) {
2185 adapter->slow_intr_mask |= F_T3DBG;
2186 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2187 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2188 adapter->slow_intr_mask);
2189 }
2190 spin_unlock_irq(&adapter->work_lock);
2191}
2192
2193/*
2194 * Interrupt-context handler for external (PHY) interrupts.
2195 */
2196void t3_os_ext_intr_handler(struct adapter *adapter)
2197{
2198 /*
2199 * Schedule a task to handle external interrupts as they may be slow
2200 * and we use a mutex to protect MDIO registers. We disable PHY
2201 * interrupts in the meantime and let the task reenable them when
2202 * it's done.
2203 */
2204 spin_lock(&adapter->work_lock);
2205 if (adapter->slow_intr_mask) {
2206 adapter->slow_intr_mask &= ~F_T3DBG;
2207 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2208 adapter->slow_intr_mask);
2209 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2210 }
2211 spin_unlock(&adapter->work_lock);
2212}
2213
2214void t3_fatal_err(struct adapter *adapter)
2215{
2216 unsigned int fw_status[4];
2217
2218 if (adapter->flags & FULL_INIT_DONE) {
2219 t3_sge_stop(adapter);
2220 t3_intr_disable(adapter);
2221 }
2222 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2223 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2224 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2225 fw_status[0], fw_status[1],
2226 fw_status[2], fw_status[3]);
2227
2228}
2229
2230static int __devinit cxgb_enable_msix(struct adapter *adap)
2231{
2232 struct msix_entry entries[SGE_QSETS + 1];
2233 int i, err;
2234
2235 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2236 entries[i].entry = i;
2237
2238 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2239 if (!err) {
2240 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2241 adap->msix_info[i].vec = entries[i].vector;
2242 } else if (err > 0)
2243 dev_info(&adap->pdev->dev,
2244 "only %d MSI-X vectors left, not using MSI-X\n", err);
2245 return err;
2246}
2247
2248static void __devinit print_port_info(struct adapter *adap,
2249 const struct adapter_info *ai)
2250{
2251 static const char *pci_variant[] = {
2252 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2253 };
2254
2255 int i;
2256 char buf[80];
2257
2258 if (is_pcie(adap))
2259 snprintf(buf, sizeof(buf), "%s x%d",
2260 pci_variant[adap->params.pci.variant],
2261 adap->params.pci.width);
2262 else
2263 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2264 pci_variant[adap->params.pci.variant],
2265 adap->params.pci.speed, adap->params.pci.width);
2266
2267 for_each_port(adap, i) {
2268 struct net_device *dev = adap->port[i];
2269 const struct port_info *pi = netdev_priv(dev);
2270
2271 if (!test_bit(i, &adap->registered_device_map))
2272 continue;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07002273 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
Divy Le Ray4d22de32007-01-18 22:04:14 -05002274 dev->name, ai->desc, pi->port_type->desc,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07002275 is_offload(adap) ? "R" : "", adap->params.rev, buf,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002276 (adap->flags & USING_MSIX) ? " MSI-X" :
2277 (adap->flags & USING_MSI) ? " MSI" : "");
2278 if (adap->name == dev->name && adap->params.vpd.mclk)
2279 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2280 adap->name, t3_mc7_size(&adap->cm) >> 20,
2281 t3_mc7_size(&adap->pmtx) >> 20,
2282 t3_mc7_size(&adap->pmrx) >> 20);
2283 }
2284}
2285
2286static int __devinit init_one(struct pci_dev *pdev,
2287 const struct pci_device_id *ent)
2288{
2289 static int version_printed;
2290
2291 int i, err, pci_using_dac = 0;
2292 unsigned long mmio_start, mmio_len;
2293 const struct adapter_info *ai;
2294 struct adapter *adapter = NULL;
2295 struct port_info *pi;
2296
2297 if (!version_printed) {
2298 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2299 ++version_printed;
2300 }
2301
2302 if (!cxgb3_wq) {
2303 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2304 if (!cxgb3_wq) {
2305 printk(KERN_ERR DRV_NAME
2306 ": cannot initialize work queue\n");
2307 return -ENOMEM;
2308 }
2309 }
2310
2311 err = pci_request_regions(pdev, DRV_NAME);
2312 if (err) {
2313 /* Just info, some other driver may have claimed the device. */
2314 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2315 return err;
2316 }
2317
2318 err = pci_enable_device(pdev);
2319 if (err) {
2320 dev_err(&pdev->dev, "cannot enable PCI device\n");
2321 goto out_release_regions;
2322 }
2323
2324 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2325 pci_using_dac = 1;
2326 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2327 if (err) {
2328 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2329 "coherent allocations\n");
2330 goto out_disable_device;
2331 }
2332 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2333 dev_err(&pdev->dev, "no usable DMA configuration\n");
2334 goto out_disable_device;
2335 }
2336
2337 pci_set_master(pdev);
2338
2339 mmio_start = pci_resource_start(pdev, 0);
2340 mmio_len = pci_resource_len(pdev, 0);
2341 ai = t3_get_adapter_info(ent->driver_data);
2342
2343 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2344 if (!adapter) {
2345 err = -ENOMEM;
2346 goto out_disable_device;
2347 }
2348
2349 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2350 if (!adapter->regs) {
2351 dev_err(&pdev->dev, "cannot map device registers\n");
2352 err = -ENOMEM;
2353 goto out_free_adapter;
2354 }
2355
2356 adapter->pdev = pdev;
2357 adapter->name = pci_name(pdev);
2358 adapter->msg_enable = dflt_msg_enable;
2359 adapter->mmio_len = mmio_len;
2360
2361 mutex_init(&adapter->mdio_lock);
2362 spin_lock_init(&adapter->work_lock);
2363 spin_lock_init(&adapter->stats_lock);
2364
2365 INIT_LIST_HEAD(&adapter->adapter_list);
2366 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2367 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2368
2369 for (i = 0; i < ai->nports; ++i) {
2370 struct net_device *netdev;
2371
2372 netdev = alloc_etherdev(sizeof(struct port_info));
2373 if (!netdev) {
2374 err = -ENOMEM;
2375 goto out_free_dev;
2376 }
2377
2378 SET_MODULE_OWNER(netdev);
2379 SET_NETDEV_DEV(netdev, &pdev->dev);
2380
2381 adapter->port[i] = netdev;
2382 pi = netdev_priv(netdev);
2383 pi->rx_csum_offload = 1;
2384 pi->nqsets = 1;
2385 pi->first_qset = i;
2386 pi->activity = 0;
2387 pi->port_id = i;
2388 netif_carrier_off(netdev);
2389 netdev->irq = pdev->irq;
2390 netdev->mem_start = mmio_start;
2391 netdev->mem_end = mmio_start + mmio_len - 1;
2392 netdev->priv = adapter;
2393 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2394 netdev->features |= NETIF_F_LLTX;
2395 if (pci_using_dac)
2396 netdev->features |= NETIF_F_HIGHDMA;
2397
2398 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2399 netdev->vlan_rx_register = vlan_rx_register;
2400 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2401
2402 netdev->open = cxgb_open;
2403 netdev->stop = cxgb_close;
2404 netdev->hard_start_xmit = t3_eth_xmit;
2405 netdev->get_stats = cxgb_get_stats;
2406 netdev->set_multicast_list = cxgb_set_rxmode;
2407 netdev->do_ioctl = cxgb_ioctl;
2408 netdev->change_mtu = cxgb_change_mtu;
2409 netdev->set_mac_address = cxgb_set_mac_addr;
2410#ifdef CONFIG_NET_POLL_CONTROLLER
2411 netdev->poll_controller = cxgb_netpoll;
2412#endif
2413 netdev->weight = 64;
2414
2415 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2416 }
2417
2418 pci_set_drvdata(pdev, adapter->port[0]);
2419 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2420 err = -ENODEV;
2421 goto out_free_dev;
2422 }
2423
2424 /*
2425 * The card is now ready to go. If any errors occur during device
2426 * registration we do not fail the whole card but rather proceed only
2427 * with the ports we manage to register successfully. However we must
2428 * register at least one net device.
2429 */
2430 for_each_port(adapter, i) {
2431 err = register_netdev(adapter->port[i]);
2432 if (err)
2433 dev_warn(&pdev->dev,
2434 "cannot register net device %s, skipping\n",
2435 adapter->port[i]->name);
2436 else {
2437 /*
2438 * Change the name we use for messages to the name of
2439 * the first successfully registered interface.
2440 */
2441 if (!adapter->registered_device_map)
2442 adapter->name = adapter->port[i]->name;
2443
2444 __set_bit(i, &adapter->registered_device_map);
2445 }
2446 }
2447 if (!adapter->registered_device_map) {
2448 dev_err(&pdev->dev, "could not register any net devices\n");
2449 goto out_free_dev;
2450 }
2451
2452 /* Driver's ready. Reflect it on LEDs */
2453 t3_led_ready(adapter);
2454
2455 if (is_offload(adapter)) {
2456 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2457 cxgb3_adapter_ofld(adapter);
2458 }
2459
2460 /* See what interrupts we'll be using */
2461 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2462 adapter->flags |= USING_MSIX;
2463 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2464 adapter->flags |= USING_MSI;
2465
Divy Le Ray0ee8d332007-02-08 16:55:59 -08002466 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002467 &cxgb3_attr_group);
2468
2469 print_port_info(adapter, ai);
2470 return 0;
2471
2472out_free_dev:
2473 iounmap(adapter->regs);
2474 for (i = ai->nports - 1; i >= 0; --i)
2475 if (adapter->port[i])
2476 free_netdev(adapter->port[i]);
2477
2478out_free_adapter:
2479 kfree(adapter);
2480
2481out_disable_device:
2482 pci_disable_device(pdev);
2483out_release_regions:
2484 pci_release_regions(pdev);
2485 pci_set_drvdata(pdev, NULL);
2486 return err;
2487}
2488
2489static void __devexit remove_one(struct pci_dev *pdev)
2490{
2491 struct net_device *dev = pci_get_drvdata(pdev);
2492
2493 if (dev) {
2494 int i;
2495 struct adapter *adapter = dev->priv;
2496
2497 t3_sge_stop(adapter);
Divy Le Ray0ee8d332007-02-08 16:55:59 -08002498 sysfs_remove_group(&adapter->port[0]->dev.kobj,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002499 &cxgb3_attr_group);
2500
2501 for_each_port(adapter, i)
2502 if (test_bit(i, &adapter->registered_device_map))
2503 unregister_netdev(adapter->port[i]);
2504
2505 if (is_offload(adapter)) {
2506 cxgb3_adapter_unofld(adapter);
2507 if (test_bit(OFFLOAD_DEVMAP_BIT,
2508 &adapter->open_device_map))
2509 offload_close(&adapter->tdev);
2510 }
2511
2512 t3_free_sge_resources(adapter);
2513 cxgb_disable_msi(adapter);
2514
2515 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2516 if (adapter->dummy_netdev[i]) {
2517 free_netdev(adapter->dummy_netdev[i]);
2518 adapter->dummy_netdev[i] = NULL;
2519 }
2520
2521 for_each_port(adapter, i)
2522 if (adapter->port[i])
2523 free_netdev(adapter->port[i]);
2524
2525 iounmap(adapter->regs);
2526 kfree(adapter);
2527 pci_release_regions(pdev);
2528 pci_disable_device(pdev);
2529 pci_set_drvdata(pdev, NULL);
2530 }
2531}
2532
2533static struct pci_driver driver = {
2534 .name = DRV_NAME,
2535 .id_table = cxgb3_pci_tbl,
2536 .probe = init_one,
2537 .remove = __devexit_p(remove_one),
2538};
2539
2540static int __init cxgb3_init_module(void)
2541{
2542 int ret;
2543
2544 cxgb3_offload_init();
2545
2546 ret = pci_register_driver(&driver);
2547 return ret;
2548}
2549
2550static void __exit cxgb3_cleanup_module(void)
2551{
2552 pci_unregister_driver(&driver);
2553 if (cxgb3_wq)
2554 destroy_workqueue(cxgb3_wq);
2555}
2556
2557module_init(cxgb3_init_module);
2558module_exit(cxgb3_cleanup_module);