blob: d6eb9824a10ca3dea65d2e6d2ce3ed191e71f9dc [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Ray1d68e932007-01-30 19:44:35 -08002 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
Divy Le Ray2e283962007-03-18 13:10:06 -070045#include <linux/firmware.h>
Divy Le Ray4d22de32007-01-18 22:04:14 -050046#include <asm/uaccess.h>
47
48#include "common.h"
49#include "cxgb3_ioctl.h"
50#include "regs.h"
51#include "cxgb3_offload.h"
52#include "version.h"
53
54#include "cxgb3_ctl_defs.h"
55#include "t3_cpl.h"
56#include "firmware_exports.h"
57
58enum {
59 MAX_TXQ_ENTRIES = 16384,
60 MAX_CTRL_TXQ_ENTRIES = 1024,
61 MAX_RSPQ_ENTRIES = 16384,
62 MAX_RX_BUFFERS = 16384,
63 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_TXQ_ENTRIES = 4,
65 MIN_CTRL_TXQ_ENTRIES = 4,
66 MIN_RSPQ_ENTRIES = 32,
67 MIN_FL_ENTRIES = 32
68};
69
70#define PORT_MASK ((1 << MAX_NPORTS) - 1)
71
72#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75
76#define EEPROM_MAGIC 0x38E2F10C
77
Divy Le Ray4d22de32007-01-18 22:04:14 -050078#define CH_DEVICE(devid, ssid, idx) \
79 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80
81static const struct pci_device_id cxgb3_pci_tbl[] = {
82 CH_DEVICE(0x20, 1, 0), /* PE9000 */
83 CH_DEVICE(0x21, 1, 1), /* T302E */
84 CH_DEVICE(0x22, 1, 2), /* T310E */
85 CH_DEVICE(0x23, 1, 3), /* T320X */
86 CH_DEVICE(0x24, 1, 1), /* T302X */
87 CH_DEVICE(0x25, 1, 3), /* T320E */
88 CH_DEVICE(0x26, 1, 2), /* T310X */
89 CH_DEVICE(0x30, 1, 2), /* T3B10 */
90 CH_DEVICE(0x31, 1, 3), /* T3B20 */
91 CH_DEVICE(0x32, 1, 1), /* T3B02 */
92 {0,}
93};
94
95MODULE_DESCRIPTION(DRV_DESC);
96MODULE_AUTHOR("Chelsio Communications");
Divy Le Ray1d68e932007-01-30 19:44:35 -080097MODULE_LICENSE("Dual BSD/GPL");
Divy Le Ray4d22de32007-01-18 22:04:14 -050098MODULE_VERSION(DRV_VERSION);
99MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100
101static int dflt_msg_enable = DFLT_MSG_ENABLE;
102
103module_param(dflt_msg_enable, int, 0644);
104MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
105
106/*
107 * The driver uses the best interrupt scheme available on a platform in the
108 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
109 * of these schemes the driver may consider as follows:
110 *
111 * msi = 2: choose from among all three options
112 * msi = 1: only consider MSI and pin interrupts
113 * msi = 0: force pin interrupts
114 */
115static int msi = 2;
116
117module_param(msi, int, 0644);
118MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
119
120/*
121 * The driver enables offload as a default.
122 * To disable it, use ofld_disable = 1.
123 */
124
125static int ofld_disable = 0;
126
127module_param(ofld_disable, int, 0644);
128MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
129
130/*
131 * We have work elements that we need to cancel when an interface is taken
132 * down. Normally the work elements would be executed by keventd but that
133 * can deadlock because of linkwatch. If our close method takes the rtnl
134 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136 * for our work to complete. Get our own work queue to solve this.
137 */
138static struct workqueue_struct *cxgb3_wq;
139
140/**
141 * link_report - show link status and link speed/duplex
142 * @p: the port whose settings are to be reported
143 *
144 * Shows the link status, speed, and duplex of a port.
145 */
146static void link_report(struct net_device *dev)
147{
148 if (!netif_carrier_ok(dev))
149 printk(KERN_INFO "%s: link down\n", dev->name);
150 else {
151 const char *s = "10Mbps";
152 const struct port_info *p = netdev_priv(dev);
153
154 switch (p->link_config.speed) {
155 case SPEED_10000:
156 s = "10Gbps";
157 break;
158 case SPEED_1000:
159 s = "1000Mbps";
160 break;
161 case SPEED_100:
162 s = "100Mbps";
163 break;
164 }
165
166 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
168 }
169}
170
171/**
172 * t3_os_link_changed - handle link status changes
173 * @adapter: the adapter associated with the link change
174 * @port_id: the port index whose limk status has changed
175 * @link_stat: the new status of the link
176 * @speed: the new speed setting
177 * @duplex: the new duplex setting
178 * @pause: the new flow-control setting
179 *
180 * This is the OS-dependent handler for link status changes. The OS
181 * neutral handler takes care of most of the processing for these events,
182 * then calls this handler for any OS-specific processing.
183 */
184void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185 int speed, int duplex, int pause)
186{
187 struct net_device *dev = adapter->port[port_id];
Divy Le Ray6d6daba2007-03-31 00:23:24 -0700188 struct port_info *pi = netdev_priv(dev);
189 struct cmac *mac = &pi->mac;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500190
191 /* Skip changes from disabled ports. */
192 if (!netif_running(dev))
193 return;
194
195 if (link_stat != netif_carrier_ok(dev)) {
Divy Le Ray6d6daba2007-03-31 00:23:24 -0700196 if (link_stat) {
Divy Le Ray59cf8102007-04-09 20:10:27 -0700197 t3_mac_enable(mac, MAC_DIRECTION_RX);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500198 netif_carrier_on(dev);
Divy Le Ray6d6daba2007-03-31 00:23:24 -0700199 } else {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500200 netif_carrier_off(dev);
Divy Le Ray59cf8102007-04-09 20:10:27 -0700201 pi->phy.ops->power_down(&pi->phy, 1);
202 t3_mac_disable(mac, MAC_DIRECTION_RX);
203 t3_link_start(&pi->phy, mac, &pi->link_config);
Divy Le Ray6d6daba2007-03-31 00:23:24 -0700204 }
205
Divy Le Ray4d22de32007-01-18 22:04:14 -0500206 link_report(dev);
207 }
208}
209
210static void cxgb_set_rxmode(struct net_device *dev)
211{
212 struct t3_rx_mode rm;
213 struct port_info *pi = netdev_priv(dev);
214
215 init_rx_mode(&rm, dev, dev->mc_list);
216 t3_mac_set_rx_mode(&pi->mac, &rm);
217}
218
219/**
220 * link_start - enable a port
221 * @dev: the device to enable
222 *
223 * Performs the MAC and PHY actions needed to enable a port.
224 */
225static void link_start(struct net_device *dev)
226{
227 struct t3_rx_mode rm;
228 struct port_info *pi = netdev_priv(dev);
229 struct cmac *mac = &pi->mac;
230
231 init_rx_mode(&rm, dev, dev->mc_list);
232 t3_mac_reset(mac);
233 t3_mac_set_mtu(mac, dev->mtu);
234 t3_mac_set_address(mac, 0, dev->dev_addr);
235 t3_mac_set_rx_mode(mac, &rm);
236 t3_link_start(&pi->phy, mac, &pi->link_config);
237 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
238}
239
240static inline void cxgb_disable_msi(struct adapter *adapter)
241{
242 if (adapter->flags & USING_MSIX) {
243 pci_disable_msix(adapter->pdev);
244 adapter->flags &= ~USING_MSIX;
245 } else if (adapter->flags & USING_MSI) {
246 pci_disable_msi(adapter->pdev);
247 adapter->flags &= ~USING_MSI;
248 }
249}
250
251/*
252 * Interrupt handler for asynchronous events used with MSI-X.
253 */
254static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
255{
256 t3_slow_intr_handler(cookie);
257 return IRQ_HANDLED;
258}
259
260/*
261 * Name the MSI-X interrupts.
262 */
263static void name_msix_vecs(struct adapter *adap)
264{
265 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
266
267 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
268 adap->msix_info[0].desc[n] = 0;
269
270 for_each_port(adap, j) {
271 struct net_device *d = adap->port[j];
272 const struct port_info *pi = netdev_priv(d);
273
274 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
275 snprintf(adap->msix_info[msi_idx].desc, n,
276 "%s (queue %d)", d->name, i);
277 adap->msix_info[msi_idx].desc[n] = 0;
278 }
279 }
280}
281
282static int request_msix_data_irqs(struct adapter *adap)
283{
284 int i, j, err, qidx = 0;
285
286 for_each_port(adap, i) {
287 int nqsets = adap2pinfo(adap, i)->nqsets;
288
289 for (j = 0; j < nqsets; ++j) {
290 err = request_irq(adap->msix_info[qidx + 1].vec,
291 t3_intr_handler(adap,
292 adap->sge.qs[qidx].
293 rspq.polling), 0,
294 adap->msix_info[qidx + 1].desc,
295 &adap->sge.qs[qidx]);
296 if (err) {
297 while (--qidx >= 0)
298 free_irq(adap->msix_info[qidx + 1].vec,
299 &adap->sge.qs[qidx]);
300 return err;
301 }
302 qidx++;
303 }
304 }
305 return 0;
306}
307
308/**
309 * setup_rss - configure RSS
310 * @adap: the adapter
311 *
312 * Sets up RSS to distribute packets to multiple receive queues. We
313 * configure the RSS CPU lookup table to distribute to the number of HW
314 * receive queues, and the response queue lookup table to narrow that
315 * down to the response queues actually configured for each port.
316 * We always configure the RSS mapping for two ports since the mapping
317 * table has plenty of entries.
318 */
319static void setup_rss(struct adapter *adap)
320{
321 int i;
322 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
323 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
324 u8 cpus[SGE_QSETS + 1];
325 u16 rspq_map[RSS_TABLE_SIZE];
326
327 for (i = 0; i < SGE_QSETS; ++i)
328 cpus[i] = i;
329 cpus[SGE_QSETS] = 0xff; /* terminator */
330
331 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
332 rspq_map[i] = i % nq0;
333 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
334 }
335
336 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
337 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
338 V_RRCPLCPUSIZE(6), cpus, rspq_map);
339}
340
341/*
342 * If we have multiple receive queues per port serviced by NAPI we need one
343 * netdevice per queue as NAPI operates on netdevices. We already have one
344 * netdevice, namely the one associated with the interface, so we use dummy
345 * ones for any additional queues. Note that these netdevices exist purely
346 * so that NAPI has something to work with, they do not represent network
347 * ports and are not registered.
348 */
349static int init_dummy_netdevs(struct adapter *adap)
350{
351 int i, j, dummy_idx = 0;
352 struct net_device *nd;
353
354 for_each_port(adap, i) {
355 struct net_device *dev = adap->port[i];
356 const struct port_info *pi = netdev_priv(dev);
357
358 for (j = 0; j < pi->nqsets - 1; j++) {
359 if (!adap->dummy_netdev[dummy_idx]) {
360 nd = alloc_netdev(0, "", ether_setup);
361 if (!nd)
362 goto free_all;
363
364 nd->priv = adap;
365 nd->weight = 64;
366 set_bit(__LINK_STATE_START, &nd->state);
367 adap->dummy_netdev[dummy_idx] = nd;
368 }
369 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
370 dummy_idx++;
371 }
372 }
373 return 0;
374
375free_all:
376 while (--dummy_idx >= 0) {
377 free_netdev(adap->dummy_netdev[dummy_idx]);
378 adap->dummy_netdev[dummy_idx] = NULL;
379 }
380 return -ENOMEM;
381}
382
383/*
384 * Wait until all NAPI handlers are descheduled. This includes the handlers of
385 * both netdevices representing interfaces and the dummy ones for the extra
386 * queues.
387 */
388static void quiesce_rx(struct adapter *adap)
389{
390 int i;
391 struct net_device *dev;
392
393 for_each_port(adap, i) {
394 dev = adap->port[i];
395 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
396 msleep(1);
397 }
398
399 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
400 dev = adap->dummy_netdev[i];
401 if (dev)
402 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
403 msleep(1);
404 }
405}
406
407/**
408 * setup_sge_qsets - configure SGE Tx/Rx/response queues
409 * @adap: the adapter
410 *
411 * Determines how many sets of SGE queues to use and initializes them.
412 * We support multiple queue sets per port if we have MSI-X, otherwise
413 * just one queue set per port.
414 */
415static int setup_sge_qsets(struct adapter *adap)
416{
417 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700418 unsigned int ntxq = SGE_TXQ_PER_SET;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500419
420 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
421 irq_idx = -1;
422
423 for_each_port(adap, i) {
424 struct net_device *dev = adap->port[i];
425 const struct port_info *pi = netdev_priv(dev);
426
427 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
428 err = t3_sge_alloc_qset(adap, qset_idx, 1,
429 (adap->flags & USING_MSIX) ? qset_idx + 1 :
430 irq_idx,
431 &adap->params.sge.qset[qset_idx], ntxq,
432 j == 0 ? dev :
433 adap-> dummy_netdev[dummy_dev_idx++]);
434 if (err) {
435 t3_free_sge_resources(adap);
436 return err;
437 }
438 }
439 }
440
441 return 0;
442}
443
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800444static ssize_t attr_show(struct device *d, struct device_attribute *attr,
445 char *buf,
Divy Le Ray896392e2007-02-24 16:43:50 -0800446 ssize_t(*format) (struct net_device *, char *))
Divy Le Ray4d22de32007-01-18 22:04:14 -0500447{
448 ssize_t len;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500449
450 /* Synchronize with ioctls that may shut down the device */
451 rtnl_lock();
Divy Le Ray896392e2007-02-24 16:43:50 -0800452 len = (*format) (to_net_dev(d), buf);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500453 rtnl_unlock();
454 return len;
455}
456
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800457static ssize_t attr_store(struct device *d, struct device_attribute *attr,
458 const char *buf, size_t len,
Divy Le Ray896392e2007-02-24 16:43:50 -0800459 ssize_t(*set) (struct net_device *, unsigned int),
Divy Le Ray4d22de32007-01-18 22:04:14 -0500460 unsigned int min_val, unsigned int max_val)
461{
462 char *endp;
463 ssize_t ret;
464 unsigned int val;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500465
466 if (!capable(CAP_NET_ADMIN))
467 return -EPERM;
468
469 val = simple_strtoul(buf, &endp, 0);
470 if (endp == buf || val < min_val || val > max_val)
471 return -EINVAL;
472
473 rtnl_lock();
Divy Le Ray896392e2007-02-24 16:43:50 -0800474 ret = (*set) (to_net_dev(d), val);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500475 if (!ret)
476 ret = len;
477 rtnl_unlock();
478 return ret;
479}
480
481#define CXGB3_SHOW(name, val_expr) \
Divy Le Ray896392e2007-02-24 16:43:50 -0800482static ssize_t format_##name(struct net_device *dev, char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500483{ \
Divy Le Ray896392e2007-02-24 16:43:50 -0800484 struct adapter *adap = dev->priv; \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500485 return sprintf(buf, "%u\n", val_expr); \
486} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800487static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
488 char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500489{ \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800490 return attr_show(d, attr, buf, format_##name); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500491}
492
Divy Le Ray896392e2007-02-24 16:43:50 -0800493static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500494{
Divy Le Ray896392e2007-02-24 16:43:50 -0800495 struct adapter *adap = dev->priv;
Divy Le Ray9f238482007-03-31 00:23:13 -0700496 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
Divy Le Ray896392e2007-02-24 16:43:50 -0800497
Divy Le Ray4d22de32007-01-18 22:04:14 -0500498 if (adap->flags & FULL_INIT_DONE)
499 return -EBUSY;
500 if (val && adap->params.rev == 0)
501 return -EINVAL;
Divy Le Ray9f238482007-03-31 00:23:13 -0700502 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
503 min_tids)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500504 return -EINVAL;
505 adap->params.mc5.nfilters = val;
506 return 0;
507}
508
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800509static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
510 const char *buf, size_t len)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500511{
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800512 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500513}
514
Divy Le Ray896392e2007-02-24 16:43:50 -0800515static ssize_t set_nservers(struct net_device *dev, unsigned int val)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500516{
Divy Le Ray896392e2007-02-24 16:43:50 -0800517 struct adapter *adap = dev->priv;
518
Divy Le Ray4d22de32007-01-18 22:04:14 -0500519 if (adap->flags & FULL_INIT_DONE)
520 return -EBUSY;
Divy Le Ray9f238482007-03-31 00:23:13 -0700521 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
522 MC5_MIN_TIDS)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500523 return -EINVAL;
524 adap->params.mc5.nservers = val;
525 return 0;
526}
527
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800528static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
529 const char *buf, size_t len)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500530{
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800531 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500532}
533
534#define CXGB3_ATTR_R(name, val_expr) \
535CXGB3_SHOW(name, val_expr) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800536static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500537
538#define CXGB3_ATTR_RW(name, val_expr, store_method) \
539CXGB3_SHOW(name, val_expr) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800540static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500541
542CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
543CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
544CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
545
546static struct attribute *cxgb3_attrs[] = {
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800547 &dev_attr_cam_size.attr,
548 &dev_attr_nfilters.attr,
549 &dev_attr_nservers.attr,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500550 NULL
551};
552
553static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
554
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800555static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
556 char *buf, int sched)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500557{
558 ssize_t len;
559 unsigned int v, addr, bpt, cpt;
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800560 struct adapter *adap = to_net_dev(d)->priv;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500561
562 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
563 rtnl_lock();
564 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
565 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
566 if (sched & 1)
567 v >>= 16;
568 bpt = (v >> 8) & 0xff;
569 cpt = v & 0xff;
570 if (!cpt)
571 len = sprintf(buf, "disabled\n");
572 else {
573 v = (adap->params.vpd.cclk * 1000) / cpt;
574 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
575 }
576 rtnl_unlock();
577 return len;
578}
579
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800580static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
581 const char *buf, size_t len, int sched)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500582{
583 char *endp;
584 ssize_t ret;
585 unsigned int val;
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800586 struct adapter *adap = to_net_dev(d)->priv;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500587
588 if (!capable(CAP_NET_ADMIN))
589 return -EPERM;
590
591 val = simple_strtoul(buf, &endp, 0);
592 if (endp == buf || val > 10000000)
593 return -EINVAL;
594
595 rtnl_lock();
596 ret = t3_config_sched(adap, val, sched);
597 if (!ret)
598 ret = len;
599 rtnl_unlock();
600 return ret;
601}
602
603#define TM_ATTR(name, sched) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800604static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
605 char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500606{ \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800607 return tm_attr_show(d, attr, buf, sched); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500608} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800609static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
610 const char *buf, size_t len) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500611{ \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800612 return tm_attr_store(d, attr, buf, len, sched); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500613} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800614static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500615
616TM_ATTR(sched0, 0);
617TM_ATTR(sched1, 1);
618TM_ATTR(sched2, 2);
619TM_ATTR(sched3, 3);
620TM_ATTR(sched4, 4);
621TM_ATTR(sched5, 5);
622TM_ATTR(sched6, 6);
623TM_ATTR(sched7, 7);
624
625static struct attribute *offload_attrs[] = {
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800626 &dev_attr_sched0.attr,
627 &dev_attr_sched1.attr,
628 &dev_attr_sched2.attr,
629 &dev_attr_sched3.attr,
630 &dev_attr_sched4.attr,
631 &dev_attr_sched5.attr,
632 &dev_attr_sched6.attr,
633 &dev_attr_sched7.attr,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500634 NULL
635};
636
637static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
638
639/*
640 * Sends an sk_buff to an offload queue driver
641 * after dealing with any active network taps.
642 */
643static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
644{
645 int ret;
646
647 local_bh_disable();
648 ret = t3_offload_tx(tdev, skb);
649 local_bh_enable();
650 return ret;
651}
652
653static int write_smt_entry(struct adapter *adapter, int idx)
654{
655 struct cpl_smt_write_req *req;
656 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
657
658 if (!skb)
659 return -ENOMEM;
660
661 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
662 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
663 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
664 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
665 req->iff = idx;
666 memset(req->src_mac1, 0, sizeof(req->src_mac1));
667 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
668 skb->priority = 1;
669 offload_tx(&adapter->tdev, skb);
670 return 0;
671}
672
673static int init_smt(struct adapter *adapter)
674{
675 int i;
676
677 for_each_port(adapter, i)
678 write_smt_entry(adapter, i);
679 return 0;
680}
681
682static void init_port_mtus(struct adapter *adapter)
683{
684 unsigned int mtus = adapter->port[0]->mtu;
685
686 if (adapter->port[1])
687 mtus |= adapter->port[1]->mtu << 16;
688 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
689}
690
Divy Le Ray14ab9892007-01-30 19:43:50 -0800691static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
692 int hi, int port)
693{
694 struct sk_buff *skb;
695 struct mngt_pktsched_wr *req;
696
697 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
698 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
699 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
700 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
701 req->sched = sched;
702 req->idx = qidx;
703 req->min = lo;
704 req->max = hi;
705 req->binding = port;
706 t3_mgmt_tx(adap, skb);
707}
708
709static void bind_qsets(struct adapter *adap)
710{
711 int i, j;
712
713 for_each_port(adap, i) {
714 const struct port_info *pi = adap2pinfo(adap, i);
715
716 for (j = 0; j < pi->nqsets; ++j)
717 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
718 -1, i);
719 }
720}
721
Divy Le Ray7f672cf2007-03-31 00:23:30 -0700722#define FW_FNAME "t3fw-%d.%d.%d.bin"
Divy Le Ray2e283962007-03-18 13:10:06 -0700723
724static int upgrade_fw(struct adapter *adap)
725{
726 int ret;
727 char buf[64];
728 const struct firmware *fw;
729 struct device *dev = &adap->pdev->dev;
730
731 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
Divy Le Ray7f672cf2007-03-31 00:23:30 -0700732 FW_VERSION_MINOR, FW_VERSION_MICRO);
Divy Le Ray2e283962007-03-18 13:10:06 -0700733 ret = request_firmware(&fw, buf, dev);
734 if (ret < 0) {
735 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
736 buf);
737 return ret;
738 }
739 ret = t3_load_fw(adap, fw->data, fw->size);
740 release_firmware(fw);
741 return ret;
742}
743
Divy Le Ray4d22de32007-01-18 22:04:14 -0500744/**
745 * cxgb_up - enable the adapter
746 * @adapter: adapter being enabled
747 *
748 * Called when the first port is enabled, this function performs the
749 * actions necessary to make an adapter operational, such as completing
750 * the initialization of HW modules, and enabling interrupts.
751 *
752 * Must be called with the rtnl lock held.
753 */
754static int cxgb_up(struct adapter *adap)
755{
756 int err = 0;
757
758 if (!(adap->flags & FULL_INIT_DONE)) {
759 err = t3_check_fw_version(adap);
Divy Le Ray2e283962007-03-18 13:10:06 -0700760 if (err == -EINVAL)
761 err = upgrade_fw(adap);
Divy Le Ray4aac3892007-01-30 19:43:45 -0800762 if (err)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500763 goto out;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500764
765 err = init_dummy_netdevs(adap);
766 if (err)
767 goto out;
768
769 err = t3_init_hw(adap, 0);
770 if (err)
771 goto out;
772
773 err = setup_sge_qsets(adap);
774 if (err)
775 goto out;
776
777 setup_rss(adap);
778 adap->flags |= FULL_INIT_DONE;
779 }
780
781 t3_intr_clear(adap);
782
783 if (adap->flags & USING_MSIX) {
784 name_msix_vecs(adap);
785 err = request_irq(adap->msix_info[0].vec,
786 t3_async_intr_handler, 0,
787 adap->msix_info[0].desc, adap);
788 if (err)
789 goto irq_err;
790
791 if (request_msix_data_irqs(adap)) {
792 free_irq(adap->msix_info[0].vec, adap);
793 goto irq_err;
794 }
795 } else if ((err = request_irq(adap->pdev->irq,
796 t3_intr_handler(adap,
797 adap->sge.qs[0].rspq.
798 polling),
Thomas Gleixner2db63462007-02-14 00:33:20 -0800799 (adap->flags & USING_MSI) ?
800 0 : IRQF_SHARED,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500801 adap->name, adap)))
802 goto irq_err;
803
804 t3_sge_start(adap);
805 t3_intr_enable(adap);
Divy Le Ray14ab9892007-01-30 19:43:50 -0800806
807 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
808 bind_qsets(adap);
809 adap->flags |= QUEUES_BOUND;
810
Divy Le Ray4d22de32007-01-18 22:04:14 -0500811out:
812 return err;
813irq_err:
814 CH_ERR(adap, "request_irq failed, err %d\n", err);
815 goto out;
816}
817
818/*
819 * Release resources when all the ports and offloading have been stopped.
820 */
821static void cxgb_down(struct adapter *adapter)
822{
823 t3_sge_stop(adapter);
824 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
825 t3_intr_disable(adapter);
826 spin_unlock_irq(&adapter->work_lock);
827
828 if (adapter->flags & USING_MSIX) {
829 int i, n = 0;
830
831 free_irq(adapter->msix_info[0].vec, adapter);
832 for_each_port(adapter, i)
833 n += adap2pinfo(adapter, i)->nqsets;
834
835 for (i = 0; i < n; ++i)
836 free_irq(adapter->msix_info[i + 1].vec,
837 &adapter->sge.qs[i]);
838 } else
839 free_irq(adapter->pdev->irq, adapter);
840
841 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
842 quiesce_rx(adapter);
843}
844
845static void schedule_chk_task(struct adapter *adap)
846{
847 unsigned int timeo;
848
849 timeo = adap->params.linkpoll_period ?
850 (HZ * adap->params.linkpoll_period) / 10 :
851 adap->params.stats_update_period * HZ;
852 if (timeo)
853 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
854}
855
856static int offload_open(struct net_device *dev)
857{
858 struct adapter *adapter = dev->priv;
859 struct t3cdev *tdev = T3CDEV(dev);
860 int adap_up = adapter->open_device_map & PORT_MASK;
861 int err = 0;
862
863 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
864 return 0;
865
866 if (!adap_up && (err = cxgb_up(adapter)) < 0)
867 return err;
868
869 t3_tp_set_offload_mode(adapter, 1);
870 tdev->lldev = adapter->port[0];
871 err = cxgb3_offload_activate(adapter);
872 if (err)
873 goto out;
874
875 init_port_mtus(adapter);
876 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
877 adapter->params.b_wnd,
878 adapter->params.rev == 0 ?
879 adapter->port[0]->mtu : 0xffff);
880 init_smt(adapter);
881
882 /* Never mind if the next step fails */
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800883 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500884
885 /* Call back all registered clients */
886 cxgb3_add_clients(tdev);
887
888out:
889 /* restore them in case the offload module has changed them */
890 if (err) {
891 t3_tp_set_offload_mode(adapter, 0);
892 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
893 cxgb3_set_dummy_ops(tdev);
894 }
895 return err;
896}
897
898static int offload_close(struct t3cdev *tdev)
899{
900 struct adapter *adapter = tdev2adap(tdev);
901
902 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
903 return 0;
904
905 /* Call back all registered clients */
906 cxgb3_remove_clients(tdev);
907
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800908 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500909
910 tdev->lldev = NULL;
911 cxgb3_set_dummy_ops(tdev);
912 t3_tp_set_offload_mode(adapter, 0);
913 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
914
915 if (!adapter->open_device_map)
916 cxgb_down(adapter);
917
918 cxgb3_offload_deactivate(adapter);
919 return 0;
920}
921
922static int cxgb_open(struct net_device *dev)
923{
924 int err;
925 struct adapter *adapter = dev->priv;
926 struct port_info *pi = netdev_priv(dev);
927 int other_ports = adapter->open_device_map & PORT_MASK;
928
929 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
930 return err;
931
932 set_bit(pi->port_id, &adapter->open_device_map);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700933 if (is_offload(adapter) && !ofld_disable) {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500934 err = offload_open(dev);
935 if (err)
936 printk(KERN_WARNING
937 "Could not initialize offload capabilities\n");
938 }
939
940 link_start(dev);
941 t3_port_intr_enable(adapter, pi->port_id);
942 netif_start_queue(dev);
943 if (!other_ports)
944 schedule_chk_task(adapter);
945
946 return 0;
947}
948
949static int cxgb_close(struct net_device *dev)
950{
951 struct adapter *adapter = dev->priv;
952 struct port_info *p = netdev_priv(dev);
953
954 t3_port_intr_disable(adapter, p->port_id);
955 netif_stop_queue(dev);
956 p->phy.ops->power_down(&p->phy, 1);
957 netif_carrier_off(dev);
958 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
959
960 spin_lock(&adapter->work_lock); /* sync with update task */
961 clear_bit(p->port_id, &adapter->open_device_map);
962 spin_unlock(&adapter->work_lock);
963
964 if (!(adapter->open_device_map & PORT_MASK))
965 cancel_rearming_delayed_workqueue(cxgb3_wq,
966 &adapter->adap_check_task);
967
968 if (!adapter->open_device_map)
969 cxgb_down(adapter);
970
971 return 0;
972}
973
974static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
975{
976 struct adapter *adapter = dev->priv;
977 struct port_info *p = netdev_priv(dev);
978 struct net_device_stats *ns = &p->netstats;
979 const struct mac_stats *pstats;
980
981 spin_lock(&adapter->stats_lock);
982 pstats = t3_mac_update_stats(&p->mac);
983 spin_unlock(&adapter->stats_lock);
984
985 ns->tx_bytes = pstats->tx_octets;
986 ns->tx_packets = pstats->tx_frames;
987 ns->rx_bytes = pstats->rx_octets;
988 ns->rx_packets = pstats->rx_frames;
989 ns->multicast = pstats->rx_mcast_frames;
990
991 ns->tx_errors = pstats->tx_underrun;
992 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
993 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
994 pstats->rx_fifo_ovfl;
995
996 /* detailed rx_errors */
997 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
998 ns->rx_over_errors = 0;
999 ns->rx_crc_errors = pstats->rx_fcs_errs;
1000 ns->rx_frame_errors = pstats->rx_symbol_errs;
1001 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1002 ns->rx_missed_errors = pstats->rx_cong_drops;
1003
1004 /* detailed tx_errors */
1005 ns->tx_aborted_errors = 0;
1006 ns->tx_carrier_errors = 0;
1007 ns->tx_fifo_errors = pstats->tx_underrun;
1008 ns->tx_heartbeat_errors = 0;
1009 ns->tx_window_errors = 0;
1010 return ns;
1011}
1012
1013static u32 get_msglevel(struct net_device *dev)
1014{
1015 struct adapter *adapter = dev->priv;
1016
1017 return adapter->msg_enable;
1018}
1019
1020static void set_msglevel(struct net_device *dev, u32 val)
1021{
1022 struct adapter *adapter = dev->priv;
1023
1024 adapter->msg_enable = val;
1025}
1026
1027static char stats_strings[][ETH_GSTRING_LEN] = {
1028 "TxOctetsOK ",
1029 "TxFramesOK ",
1030 "TxMulticastFramesOK",
1031 "TxBroadcastFramesOK",
1032 "TxPauseFrames ",
1033 "TxUnderrun ",
1034 "TxExtUnderrun ",
1035
1036 "TxFrames64 ",
1037 "TxFrames65To127 ",
1038 "TxFrames128To255 ",
1039 "TxFrames256To511 ",
1040 "TxFrames512To1023 ",
1041 "TxFrames1024To1518 ",
1042 "TxFrames1519ToMax ",
1043
1044 "RxOctetsOK ",
1045 "RxFramesOK ",
1046 "RxMulticastFramesOK",
1047 "RxBroadcastFramesOK",
1048 "RxPauseFrames ",
1049 "RxFCSErrors ",
1050 "RxSymbolErrors ",
1051 "RxShortErrors ",
1052 "RxJabberErrors ",
1053 "RxLengthErrors ",
1054 "RxFIFOoverflow ",
1055
1056 "RxFrames64 ",
1057 "RxFrames65To127 ",
1058 "RxFrames128To255 ",
1059 "RxFrames256To511 ",
1060 "RxFrames512To1023 ",
1061 "RxFrames1024To1518 ",
1062 "RxFrames1519ToMax ",
1063
1064 "PhyFIFOErrors ",
1065 "TSO ",
1066 "VLANextractions ",
1067 "VLANinsertions ",
1068 "TxCsumOffload ",
1069 "RxCsumGood ",
Divy Le Rayfc906642007-03-18 13:10:12 -07001070 "RxDrops ",
1071
1072 "CheckTXEnToggled ",
1073 "CheckResets ",
1074
Divy Le Ray4d22de32007-01-18 22:04:14 -05001075};
1076
1077static int get_stats_count(struct net_device *dev)
1078{
1079 return ARRAY_SIZE(stats_strings);
1080}
1081
1082#define T3_REGMAP_SIZE (3 * 1024)
1083
1084static int get_regs_len(struct net_device *dev)
1085{
1086 return T3_REGMAP_SIZE;
1087}
1088
1089static int get_eeprom_len(struct net_device *dev)
1090{
1091 return EEPROMSIZE;
1092}
1093
1094static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1095{
1096 u32 fw_vers = 0;
1097 struct adapter *adapter = dev->priv;
1098
1099 t3_get_fw_version(adapter, &fw_vers);
1100
1101 strcpy(info->driver, DRV_NAME);
1102 strcpy(info->version, DRV_VERSION);
1103 strcpy(info->bus_info, pci_name(adapter->pdev));
1104 if (!fw_vers)
1105 strcpy(info->fw_version, "N/A");
Divy Le Ray4aac3892007-01-30 19:43:45 -08001106 else {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001107 snprintf(info->fw_version, sizeof(info->fw_version),
Divy Le Ray4aac3892007-01-30 19:43:45 -08001108 "%s %u.%u.%u",
1109 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1110 G_FW_VERSION_MAJOR(fw_vers),
1111 G_FW_VERSION_MINOR(fw_vers),
1112 G_FW_VERSION_MICRO(fw_vers));
1113 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001114}
1115
1116static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1117{
1118 if (stringset == ETH_SS_STATS)
1119 memcpy(data, stats_strings, sizeof(stats_strings));
1120}
1121
1122static unsigned long collect_sge_port_stats(struct adapter *adapter,
1123 struct port_info *p, int idx)
1124{
1125 int i;
1126 unsigned long tot = 0;
1127
1128 for (i = 0; i < p->nqsets; ++i)
1129 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1130 return tot;
1131}
1132
1133static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1134 u64 *data)
1135{
1136 struct adapter *adapter = dev->priv;
1137 struct port_info *pi = netdev_priv(dev);
1138 const struct mac_stats *s;
1139
1140 spin_lock(&adapter->stats_lock);
1141 s = t3_mac_update_stats(&pi->mac);
1142 spin_unlock(&adapter->stats_lock);
1143
1144 *data++ = s->tx_octets;
1145 *data++ = s->tx_frames;
1146 *data++ = s->tx_mcast_frames;
1147 *data++ = s->tx_bcast_frames;
1148 *data++ = s->tx_pause;
1149 *data++ = s->tx_underrun;
1150 *data++ = s->tx_fifo_urun;
1151
1152 *data++ = s->tx_frames_64;
1153 *data++ = s->tx_frames_65_127;
1154 *data++ = s->tx_frames_128_255;
1155 *data++ = s->tx_frames_256_511;
1156 *data++ = s->tx_frames_512_1023;
1157 *data++ = s->tx_frames_1024_1518;
1158 *data++ = s->tx_frames_1519_max;
1159
1160 *data++ = s->rx_octets;
1161 *data++ = s->rx_frames;
1162 *data++ = s->rx_mcast_frames;
1163 *data++ = s->rx_bcast_frames;
1164 *data++ = s->rx_pause;
1165 *data++ = s->rx_fcs_errs;
1166 *data++ = s->rx_symbol_errs;
1167 *data++ = s->rx_short;
1168 *data++ = s->rx_jabber;
1169 *data++ = s->rx_too_long;
1170 *data++ = s->rx_fifo_ovfl;
1171
1172 *data++ = s->rx_frames_64;
1173 *data++ = s->rx_frames_65_127;
1174 *data++ = s->rx_frames_128_255;
1175 *data++ = s->rx_frames_256_511;
1176 *data++ = s->rx_frames_512_1023;
1177 *data++ = s->rx_frames_1024_1518;
1178 *data++ = s->rx_frames_1519_max;
1179
1180 *data++ = pi->phy.fifo_errors;
1181
1182 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1183 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1184 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1185 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1186 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1187 *data++ = s->rx_cong_drops;
Divy Le Rayfc906642007-03-18 13:10:12 -07001188
1189 *data++ = s->num_toggled;
1190 *data++ = s->num_resets;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001191}
1192
1193static inline void reg_block_dump(struct adapter *ap, void *buf,
1194 unsigned int start, unsigned int end)
1195{
1196 u32 *p = buf + start;
1197
1198 for (; start <= end; start += sizeof(u32))
1199 *p++ = t3_read_reg(ap, start);
1200}
1201
1202static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1203 void *buf)
1204{
1205 struct adapter *ap = dev->priv;
1206
1207 /*
1208 * Version scheme:
1209 * bits 0..9: chip version
1210 * bits 10..15: chip revision
1211 * bit 31: set for PCIe cards
1212 */
1213 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1214
1215 /*
1216 * We skip the MAC statistics registers because they are clear-on-read.
1217 * Also reading multi-register stats would need to synchronize with the
1218 * periodic mac stats accumulation. Hard to justify the complexity.
1219 */
1220 memset(buf, 0, T3_REGMAP_SIZE);
1221 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1222 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1223 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1224 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1225 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1226 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1227 XGM_REG(A_XGM_SERDES_STAT3, 1));
1228 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1229 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1230}
1231
1232static int restart_autoneg(struct net_device *dev)
1233{
1234 struct port_info *p = netdev_priv(dev);
1235
1236 if (!netif_running(dev))
1237 return -EAGAIN;
1238 if (p->link_config.autoneg != AUTONEG_ENABLE)
1239 return -EINVAL;
1240 p->phy.ops->autoneg_restart(&p->phy);
1241 return 0;
1242}
1243
1244static int cxgb3_phys_id(struct net_device *dev, u32 data)
1245{
1246 int i;
1247 struct adapter *adapter = dev->priv;
1248
1249 if (data == 0)
1250 data = 2;
1251
1252 for (i = 0; i < data * 2; i++) {
1253 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1254 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1255 if (msleep_interruptible(500))
1256 break;
1257 }
1258 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1259 F_GPIO0_OUT_VAL);
1260 return 0;
1261}
1262
1263static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1264{
1265 struct port_info *p = netdev_priv(dev);
1266
1267 cmd->supported = p->link_config.supported;
1268 cmd->advertising = p->link_config.advertising;
1269
1270 if (netif_carrier_ok(dev)) {
1271 cmd->speed = p->link_config.speed;
1272 cmd->duplex = p->link_config.duplex;
1273 } else {
1274 cmd->speed = -1;
1275 cmd->duplex = -1;
1276 }
1277
1278 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1279 cmd->phy_address = p->phy.addr;
1280 cmd->transceiver = XCVR_EXTERNAL;
1281 cmd->autoneg = p->link_config.autoneg;
1282 cmd->maxtxpkt = 0;
1283 cmd->maxrxpkt = 0;
1284 return 0;
1285}
1286
1287static int speed_duplex_to_caps(int speed, int duplex)
1288{
1289 int cap = 0;
1290
1291 switch (speed) {
1292 case SPEED_10:
1293 if (duplex == DUPLEX_FULL)
1294 cap = SUPPORTED_10baseT_Full;
1295 else
1296 cap = SUPPORTED_10baseT_Half;
1297 break;
1298 case SPEED_100:
1299 if (duplex == DUPLEX_FULL)
1300 cap = SUPPORTED_100baseT_Full;
1301 else
1302 cap = SUPPORTED_100baseT_Half;
1303 break;
1304 case SPEED_1000:
1305 if (duplex == DUPLEX_FULL)
1306 cap = SUPPORTED_1000baseT_Full;
1307 else
1308 cap = SUPPORTED_1000baseT_Half;
1309 break;
1310 case SPEED_10000:
1311 if (duplex == DUPLEX_FULL)
1312 cap = SUPPORTED_10000baseT_Full;
1313 }
1314 return cap;
1315}
1316
1317#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1318 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1319 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1320 ADVERTISED_10000baseT_Full)
1321
1322static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1323{
1324 struct port_info *p = netdev_priv(dev);
1325 struct link_config *lc = &p->link_config;
1326
1327 if (!(lc->supported & SUPPORTED_Autoneg))
1328 return -EOPNOTSUPP; /* can't change speed/duplex */
1329
1330 if (cmd->autoneg == AUTONEG_DISABLE) {
1331 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1332
1333 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1334 return -EINVAL;
1335 lc->requested_speed = cmd->speed;
1336 lc->requested_duplex = cmd->duplex;
1337 lc->advertising = 0;
1338 } else {
1339 cmd->advertising &= ADVERTISED_MASK;
1340 cmd->advertising &= lc->supported;
1341 if (!cmd->advertising)
1342 return -EINVAL;
1343 lc->requested_speed = SPEED_INVALID;
1344 lc->requested_duplex = DUPLEX_INVALID;
1345 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1346 }
1347 lc->autoneg = cmd->autoneg;
1348 if (netif_running(dev))
1349 t3_link_start(&p->phy, &p->mac, lc);
1350 return 0;
1351}
1352
1353static void get_pauseparam(struct net_device *dev,
1354 struct ethtool_pauseparam *epause)
1355{
1356 struct port_info *p = netdev_priv(dev);
1357
1358 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1359 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1360 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1361}
1362
1363static int set_pauseparam(struct net_device *dev,
1364 struct ethtool_pauseparam *epause)
1365{
1366 struct port_info *p = netdev_priv(dev);
1367 struct link_config *lc = &p->link_config;
1368
1369 if (epause->autoneg == AUTONEG_DISABLE)
1370 lc->requested_fc = 0;
1371 else if (lc->supported & SUPPORTED_Autoneg)
1372 lc->requested_fc = PAUSE_AUTONEG;
1373 else
1374 return -EINVAL;
1375
1376 if (epause->rx_pause)
1377 lc->requested_fc |= PAUSE_RX;
1378 if (epause->tx_pause)
1379 lc->requested_fc |= PAUSE_TX;
1380 if (lc->autoneg == AUTONEG_ENABLE) {
1381 if (netif_running(dev))
1382 t3_link_start(&p->phy, &p->mac, lc);
1383 } else {
1384 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1385 if (netif_running(dev))
1386 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1387 }
1388 return 0;
1389}
1390
1391static u32 get_rx_csum(struct net_device *dev)
1392{
1393 struct port_info *p = netdev_priv(dev);
1394
1395 return p->rx_csum_offload;
1396}
1397
1398static int set_rx_csum(struct net_device *dev, u32 data)
1399{
1400 struct port_info *p = netdev_priv(dev);
1401
1402 p->rx_csum_offload = data;
1403 return 0;
1404}
1405
1406static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1407{
Divy Le Ray05b97b32007-03-18 13:10:01 -07001408 const struct adapter *adapter = dev->priv;
1409 const struct port_info *pi = netdev_priv(dev);
1410 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001411
1412 e->rx_max_pending = MAX_RX_BUFFERS;
1413 e->rx_mini_max_pending = 0;
1414 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1415 e->tx_max_pending = MAX_TXQ_ENTRIES;
1416
Divy Le Ray05b97b32007-03-18 13:10:01 -07001417 e->rx_pending = q->fl_size;
1418 e->rx_mini_pending = q->rspq_size;
1419 e->rx_jumbo_pending = q->jumbo_size;
1420 e->tx_pending = q->txq_size[0];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001421}
1422
1423static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1424{
1425 int i;
Divy Le Ray05b97b32007-03-18 13:10:01 -07001426 struct qset_params *q;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001427 struct adapter *adapter = dev->priv;
Divy Le Ray05b97b32007-03-18 13:10:01 -07001428 const struct port_info *pi = netdev_priv(dev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001429
1430 if (e->rx_pending > MAX_RX_BUFFERS ||
1431 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1432 e->tx_pending > MAX_TXQ_ENTRIES ||
1433 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1434 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1435 e->rx_pending < MIN_FL_ENTRIES ||
1436 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1437 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1438 return -EINVAL;
1439
1440 if (adapter->flags & FULL_INIT_DONE)
1441 return -EBUSY;
1442
Divy Le Ray05b97b32007-03-18 13:10:01 -07001443 q = &adapter->params.sge.qset[pi->first_qset];
1444 for (i = 0; i < pi->nqsets; ++i, ++q) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001445 q->rspq_size = e->rx_mini_pending;
1446 q->fl_size = e->rx_pending;
1447 q->jumbo_size = e->rx_jumbo_pending;
1448 q->txq_size[0] = e->tx_pending;
1449 q->txq_size[1] = e->tx_pending;
1450 q->txq_size[2] = e->tx_pending;
1451 }
1452 return 0;
1453}
1454
1455static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1456{
1457 struct adapter *adapter = dev->priv;
1458 struct qset_params *qsp = &adapter->params.sge.qset[0];
1459 struct sge_qset *qs = &adapter->sge.qs[0];
1460
1461 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1462 return -EINVAL;
1463
1464 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1465 t3_update_qset_coalesce(qs, qsp);
1466 return 0;
1467}
1468
1469static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1470{
1471 struct adapter *adapter = dev->priv;
1472 struct qset_params *q = adapter->params.sge.qset;
1473
1474 c->rx_coalesce_usecs = q->coalesce_usecs;
1475 return 0;
1476}
1477
1478static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1479 u8 * data)
1480{
1481 int i, err = 0;
1482 struct adapter *adapter = dev->priv;
1483
1484 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1485 if (!buf)
1486 return -ENOMEM;
1487
1488 e->magic = EEPROM_MAGIC;
1489 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1490 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1491
1492 if (!err)
1493 memcpy(data, buf + e->offset, e->len);
1494 kfree(buf);
1495 return err;
1496}
1497
1498static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1499 u8 * data)
1500{
1501 u8 *buf;
1502 int err = 0;
1503 u32 aligned_offset, aligned_len, *p;
1504 struct adapter *adapter = dev->priv;
1505
1506 if (eeprom->magic != EEPROM_MAGIC)
1507 return -EINVAL;
1508
1509 aligned_offset = eeprom->offset & ~3;
1510 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1511
1512 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1513 buf = kmalloc(aligned_len, GFP_KERNEL);
1514 if (!buf)
1515 return -ENOMEM;
1516 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1517 if (!err && aligned_len > 4)
1518 err = t3_seeprom_read(adapter,
1519 aligned_offset + aligned_len - 4,
1520 (u32 *) & buf[aligned_len - 4]);
1521 if (err)
1522 goto out;
1523 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1524 } else
1525 buf = data;
1526
1527 err = t3_seeprom_wp(adapter, 0);
1528 if (err)
1529 goto out;
1530
1531 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1532 err = t3_seeprom_write(adapter, aligned_offset, *p);
1533 aligned_offset += 4;
1534 }
1535
1536 if (!err)
1537 err = t3_seeprom_wp(adapter, 1);
1538out:
1539 if (buf != data)
1540 kfree(buf);
1541 return err;
1542}
1543
1544static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1545{
1546 wol->supported = 0;
1547 wol->wolopts = 0;
1548 memset(&wol->sopass, 0, sizeof(wol->sopass));
1549}
1550
1551static const struct ethtool_ops cxgb_ethtool_ops = {
1552 .get_settings = get_settings,
1553 .set_settings = set_settings,
1554 .get_drvinfo = get_drvinfo,
1555 .get_msglevel = get_msglevel,
1556 .set_msglevel = set_msglevel,
1557 .get_ringparam = get_sge_param,
1558 .set_ringparam = set_sge_param,
1559 .get_coalesce = get_coalesce,
1560 .set_coalesce = set_coalesce,
1561 .get_eeprom_len = get_eeprom_len,
1562 .get_eeprom = get_eeprom,
1563 .set_eeprom = set_eeprom,
1564 .get_pauseparam = get_pauseparam,
1565 .set_pauseparam = set_pauseparam,
1566 .get_rx_csum = get_rx_csum,
1567 .set_rx_csum = set_rx_csum,
1568 .get_tx_csum = ethtool_op_get_tx_csum,
1569 .set_tx_csum = ethtool_op_set_tx_csum,
1570 .get_sg = ethtool_op_get_sg,
1571 .set_sg = ethtool_op_set_sg,
1572 .get_link = ethtool_op_get_link,
1573 .get_strings = get_strings,
1574 .phys_id = cxgb3_phys_id,
1575 .nway_reset = restart_autoneg,
1576 .get_stats_count = get_stats_count,
1577 .get_ethtool_stats = get_stats,
1578 .get_regs_len = get_regs_len,
1579 .get_regs = get_regs,
1580 .get_wol = get_wol,
1581 .get_tso = ethtool_op_get_tso,
1582 .set_tso = ethtool_op_set_tso,
1583 .get_perm_addr = ethtool_op_get_perm_addr
1584};
1585
1586static int in_range(int val, int lo, int hi)
1587{
1588 return val < 0 || (val <= hi && val >= lo);
1589}
1590
1591static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1592{
1593 int ret;
1594 u32 cmd;
1595 struct adapter *adapter = dev->priv;
1596
1597 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1598 return -EFAULT;
1599
1600 switch (cmd) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001601 case CHELSIO_SET_QSET_PARAMS:{
1602 int i;
1603 struct qset_params *q;
1604 struct ch_qset_params t;
1605
1606 if (!capable(CAP_NET_ADMIN))
1607 return -EPERM;
1608 if (copy_from_user(&t, useraddr, sizeof(t)))
1609 return -EFAULT;
1610 if (t.qset_idx >= SGE_QSETS)
1611 return -EINVAL;
1612 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1613 !in_range(t.cong_thres, 0, 255) ||
1614 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1615 MAX_TXQ_ENTRIES) ||
1616 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1617 MAX_TXQ_ENTRIES) ||
1618 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1619 MAX_CTRL_TXQ_ENTRIES) ||
1620 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1621 MAX_RX_BUFFERS)
1622 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1623 MAX_RX_JUMBO_BUFFERS)
1624 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1625 MAX_RSPQ_ENTRIES))
1626 return -EINVAL;
1627 if ((adapter->flags & FULL_INIT_DONE) &&
1628 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1629 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1630 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1631 t.polling >= 0 || t.cong_thres >= 0))
1632 return -EBUSY;
1633
1634 q = &adapter->params.sge.qset[t.qset_idx];
1635
1636 if (t.rspq_size >= 0)
1637 q->rspq_size = t.rspq_size;
1638 if (t.fl_size[0] >= 0)
1639 q->fl_size = t.fl_size[0];
1640 if (t.fl_size[1] >= 0)
1641 q->jumbo_size = t.fl_size[1];
1642 if (t.txq_size[0] >= 0)
1643 q->txq_size[0] = t.txq_size[0];
1644 if (t.txq_size[1] >= 0)
1645 q->txq_size[1] = t.txq_size[1];
1646 if (t.txq_size[2] >= 0)
1647 q->txq_size[2] = t.txq_size[2];
1648 if (t.cong_thres >= 0)
1649 q->cong_thres = t.cong_thres;
1650 if (t.intr_lat >= 0) {
1651 struct sge_qset *qs =
1652 &adapter->sge.qs[t.qset_idx];
1653
1654 q->coalesce_usecs = t.intr_lat;
1655 t3_update_qset_coalesce(qs, q);
1656 }
1657 if (t.polling >= 0) {
1658 if (adapter->flags & USING_MSIX)
1659 q->polling = t.polling;
1660 else {
1661 /* No polling with INTx for T3A */
1662 if (adapter->params.rev == 0 &&
1663 !(adapter->flags & USING_MSI))
1664 t.polling = 0;
1665
1666 for (i = 0; i < SGE_QSETS; i++) {
1667 q = &adapter->params.sge.
1668 qset[i];
1669 q->polling = t.polling;
1670 }
1671 }
1672 }
1673 break;
1674 }
1675 case CHELSIO_GET_QSET_PARAMS:{
1676 struct qset_params *q;
1677 struct ch_qset_params t;
1678
1679 if (copy_from_user(&t, useraddr, sizeof(t)))
1680 return -EFAULT;
1681 if (t.qset_idx >= SGE_QSETS)
1682 return -EINVAL;
1683
1684 q = &adapter->params.sge.qset[t.qset_idx];
1685 t.rspq_size = q->rspq_size;
1686 t.txq_size[0] = q->txq_size[0];
1687 t.txq_size[1] = q->txq_size[1];
1688 t.txq_size[2] = q->txq_size[2];
1689 t.fl_size[0] = q->fl_size;
1690 t.fl_size[1] = q->jumbo_size;
1691 t.polling = q->polling;
1692 t.intr_lat = q->coalesce_usecs;
1693 t.cong_thres = q->cong_thres;
1694
1695 if (copy_to_user(useraddr, &t, sizeof(t)))
1696 return -EFAULT;
1697 break;
1698 }
1699 case CHELSIO_SET_QSET_NUM:{
1700 struct ch_reg edata;
1701 struct port_info *pi = netdev_priv(dev);
1702 unsigned int i, first_qset = 0, other_qsets = 0;
1703
1704 if (!capable(CAP_NET_ADMIN))
1705 return -EPERM;
1706 if (adapter->flags & FULL_INIT_DONE)
1707 return -EBUSY;
1708 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1709 return -EFAULT;
1710 if (edata.val < 1 ||
1711 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1712 return -EINVAL;
1713
1714 for_each_port(adapter, i)
1715 if (adapter->port[i] && adapter->port[i] != dev)
1716 other_qsets += adap2pinfo(adapter, i)->nqsets;
1717
1718 if (edata.val + other_qsets > SGE_QSETS)
1719 return -EINVAL;
1720
1721 pi->nqsets = edata.val;
1722
1723 for_each_port(adapter, i)
1724 if (adapter->port[i]) {
1725 pi = adap2pinfo(adapter, i);
1726 pi->first_qset = first_qset;
1727 first_qset += pi->nqsets;
1728 }
1729 break;
1730 }
1731 case CHELSIO_GET_QSET_NUM:{
1732 struct ch_reg edata;
1733 struct port_info *pi = netdev_priv(dev);
1734
1735 edata.cmd = CHELSIO_GET_QSET_NUM;
1736 edata.val = pi->nqsets;
1737 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1738 return -EFAULT;
1739 break;
1740 }
1741 case CHELSIO_LOAD_FW:{
1742 u8 *fw_data;
1743 struct ch_mem_range t;
1744
1745 if (!capable(CAP_NET_ADMIN))
1746 return -EPERM;
1747 if (copy_from_user(&t, useraddr, sizeof(t)))
1748 return -EFAULT;
1749
1750 fw_data = kmalloc(t.len, GFP_KERNEL);
1751 if (!fw_data)
1752 return -ENOMEM;
1753
1754 if (copy_from_user
1755 (fw_data, useraddr + sizeof(t), t.len)) {
1756 kfree(fw_data);
1757 return -EFAULT;
1758 }
1759
1760 ret = t3_load_fw(adapter, fw_data, t.len);
1761 kfree(fw_data);
1762 if (ret)
1763 return ret;
1764 break;
1765 }
1766 case CHELSIO_SETMTUTAB:{
1767 struct ch_mtus m;
1768 int i;
1769
1770 if (!is_offload(adapter))
1771 return -EOPNOTSUPP;
1772 if (!capable(CAP_NET_ADMIN))
1773 return -EPERM;
1774 if (offload_running(adapter))
1775 return -EBUSY;
1776 if (copy_from_user(&m, useraddr, sizeof(m)))
1777 return -EFAULT;
1778 if (m.nmtus != NMTUS)
1779 return -EINVAL;
1780 if (m.mtus[0] < 81) /* accommodate SACK */
1781 return -EINVAL;
1782
1783 /* MTUs must be in ascending order */
1784 for (i = 1; i < NMTUS; ++i)
1785 if (m.mtus[i] < m.mtus[i - 1])
1786 return -EINVAL;
1787
1788 memcpy(adapter->params.mtus, m.mtus,
1789 sizeof(adapter->params.mtus));
1790 break;
1791 }
1792 case CHELSIO_GET_PM:{
1793 struct tp_params *p = &adapter->params.tp;
1794 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1795
1796 if (!is_offload(adapter))
1797 return -EOPNOTSUPP;
1798 m.tx_pg_sz = p->tx_pg_size;
1799 m.tx_num_pg = p->tx_num_pgs;
1800 m.rx_pg_sz = p->rx_pg_size;
1801 m.rx_num_pg = p->rx_num_pgs;
1802 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1803 if (copy_to_user(useraddr, &m, sizeof(m)))
1804 return -EFAULT;
1805 break;
1806 }
1807 case CHELSIO_SET_PM:{
1808 struct ch_pm m;
1809 struct tp_params *p = &adapter->params.tp;
1810
1811 if (!is_offload(adapter))
1812 return -EOPNOTSUPP;
1813 if (!capable(CAP_NET_ADMIN))
1814 return -EPERM;
1815 if (adapter->flags & FULL_INIT_DONE)
1816 return -EBUSY;
1817 if (copy_from_user(&m, useraddr, sizeof(m)))
1818 return -EFAULT;
1819 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1820 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1821 return -EINVAL; /* not power of 2 */
1822 if (!(m.rx_pg_sz & 0x14000))
1823 return -EINVAL; /* not 16KB or 64KB */
1824 if (!(m.tx_pg_sz & 0x1554000))
1825 return -EINVAL;
1826 if (m.tx_num_pg == -1)
1827 m.tx_num_pg = p->tx_num_pgs;
1828 if (m.rx_num_pg == -1)
1829 m.rx_num_pg = p->rx_num_pgs;
1830 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1831 return -EINVAL;
1832 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1833 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1834 return -EINVAL;
1835 p->rx_pg_size = m.rx_pg_sz;
1836 p->tx_pg_size = m.tx_pg_sz;
1837 p->rx_num_pgs = m.rx_num_pg;
1838 p->tx_num_pgs = m.tx_num_pg;
1839 break;
1840 }
1841 case CHELSIO_GET_MEM:{
1842 struct ch_mem_range t;
1843 struct mc7 *mem;
1844 u64 buf[32];
1845
1846 if (!is_offload(adapter))
1847 return -EOPNOTSUPP;
1848 if (!(adapter->flags & FULL_INIT_DONE))
1849 return -EIO; /* need the memory controllers */
1850 if (copy_from_user(&t, useraddr, sizeof(t)))
1851 return -EFAULT;
1852 if ((t.addr & 7) || (t.len & 7))
1853 return -EINVAL;
1854 if (t.mem_id == MEM_CM)
1855 mem = &adapter->cm;
1856 else if (t.mem_id == MEM_PMRX)
1857 mem = &adapter->pmrx;
1858 else if (t.mem_id == MEM_PMTX)
1859 mem = &adapter->pmtx;
1860 else
1861 return -EINVAL;
1862
1863 /*
Divy Le Ray18254942007-02-24 16:43:56 -08001864 * Version scheme:
1865 * bits 0..9: chip version
1866 * bits 10..15: chip revision
1867 */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001868 t.version = 3 | (adapter->params.rev << 10);
1869 if (copy_to_user(useraddr, &t, sizeof(t)))
1870 return -EFAULT;
1871
1872 /*
1873 * Read 256 bytes at a time as len can be large and we don't
1874 * want to use huge intermediate buffers.
1875 */
1876 useraddr += sizeof(t); /* advance to start of buffer */
1877 while (t.len) {
1878 unsigned int chunk =
1879 min_t(unsigned int, t.len, sizeof(buf));
1880
1881 ret =
1882 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1883 buf);
1884 if (ret)
1885 return ret;
1886 if (copy_to_user(useraddr, buf, chunk))
1887 return -EFAULT;
1888 useraddr += chunk;
1889 t.addr += chunk;
1890 t.len -= chunk;
1891 }
1892 break;
1893 }
1894 case CHELSIO_SET_TRACE_FILTER:{
1895 struct ch_trace t;
1896 const struct trace_params *tp;
1897
1898 if (!capable(CAP_NET_ADMIN))
1899 return -EPERM;
1900 if (!offload_running(adapter))
1901 return -EAGAIN;
1902 if (copy_from_user(&t, useraddr, sizeof(t)))
1903 return -EFAULT;
1904
1905 tp = (const struct trace_params *)&t.sip;
1906 if (t.config_tx)
1907 t3_config_trace_filter(adapter, tp, 0,
1908 t.invert_match,
1909 t.trace_tx);
1910 if (t.config_rx)
1911 t3_config_trace_filter(adapter, tp, 1,
1912 t.invert_match,
1913 t.trace_rx);
1914 break;
1915 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001916 default:
1917 return -EOPNOTSUPP;
1918 }
1919 return 0;
1920}
1921
1922static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1923{
1924 int ret, mmd;
1925 struct adapter *adapter = dev->priv;
1926 struct port_info *pi = netdev_priv(dev);
1927 struct mii_ioctl_data *data = if_mii(req);
1928
1929 switch (cmd) {
1930 case SIOCGMIIPHY:
1931 data->phy_id = pi->phy.addr;
1932 /* FALLTHRU */
1933 case SIOCGMIIREG:{
1934 u32 val;
1935 struct cphy *phy = &pi->phy;
1936
1937 if (!phy->mdio_read)
1938 return -EOPNOTSUPP;
1939 if (is_10G(adapter)) {
1940 mmd = data->phy_id >> 8;
1941 if (!mmd)
1942 mmd = MDIO_DEV_PCS;
1943 else if (mmd > MDIO_DEV_XGXS)
1944 return -EINVAL;
1945
1946 ret =
1947 phy->mdio_read(adapter, data->phy_id & 0x1f,
1948 mmd, data->reg_num, &val);
1949 } else
1950 ret =
1951 phy->mdio_read(adapter, data->phy_id & 0x1f,
1952 0, data->reg_num & 0x1f,
1953 &val);
1954 if (!ret)
1955 data->val_out = val;
1956 break;
1957 }
1958 case SIOCSMIIREG:{
1959 struct cphy *phy = &pi->phy;
1960
1961 if (!capable(CAP_NET_ADMIN))
1962 return -EPERM;
1963 if (!phy->mdio_write)
1964 return -EOPNOTSUPP;
1965 if (is_10G(adapter)) {
1966 mmd = data->phy_id >> 8;
1967 if (!mmd)
1968 mmd = MDIO_DEV_PCS;
1969 else if (mmd > MDIO_DEV_XGXS)
1970 return -EINVAL;
1971
1972 ret =
1973 phy->mdio_write(adapter,
1974 data->phy_id & 0x1f, mmd,
1975 data->reg_num,
1976 data->val_in);
1977 } else
1978 ret =
1979 phy->mdio_write(adapter,
1980 data->phy_id & 0x1f, 0,
1981 data->reg_num & 0x1f,
1982 data->val_in);
1983 break;
1984 }
1985 case SIOCCHIOCTL:
1986 return cxgb_extension_ioctl(dev, req->ifr_data);
1987 default:
1988 return -EOPNOTSUPP;
1989 }
1990 return ret;
1991}
1992
1993static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1994{
1995 int ret;
1996 struct adapter *adapter = dev->priv;
1997 struct port_info *pi = netdev_priv(dev);
1998
1999 if (new_mtu < 81) /* accommodate SACK */
2000 return -EINVAL;
2001 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2002 return ret;
2003 dev->mtu = new_mtu;
2004 init_port_mtus(adapter);
2005 if (adapter->params.rev == 0 && offload_running(adapter))
2006 t3_load_mtus(adapter, adapter->params.mtus,
2007 adapter->params.a_wnd, adapter->params.b_wnd,
2008 adapter->port[0]->mtu);
2009 return 0;
2010}
2011
2012static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2013{
2014 struct adapter *adapter = dev->priv;
2015 struct port_info *pi = netdev_priv(dev);
2016 struct sockaddr *addr = p;
2017
2018 if (!is_valid_ether_addr(addr->sa_data))
2019 return -EINVAL;
2020
2021 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2022 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2023 if (offload_running(adapter))
2024 write_smt_entry(adapter, pi->port_id);
2025 return 0;
2026}
2027
2028/**
2029 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2030 * @adap: the adapter
2031 * @p: the port
2032 *
2033 * Ensures that current Rx processing on any of the queues associated with
2034 * the given port completes before returning. We do this by acquiring and
2035 * releasing the locks of the response queues associated with the port.
2036 */
2037static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2038{
2039 int i;
2040
2041 for (i = 0; i < p->nqsets; i++) {
2042 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2043
2044 spin_lock_irq(&q->lock);
2045 spin_unlock_irq(&q->lock);
2046 }
2047}
2048
2049static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2050{
2051 struct adapter *adapter = dev->priv;
2052 struct port_info *pi = netdev_priv(dev);
2053
2054 pi->vlan_grp = grp;
2055 if (adapter->params.rev > 0)
2056 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2057 else {
2058 /* single control for all ports */
2059 unsigned int i, have_vlans = 0;
2060 for_each_port(adapter, i)
2061 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2062
2063 t3_set_vlan_accel(adapter, 1, have_vlans);
2064 }
2065 t3_synchronize_rx(adapter, pi);
2066}
2067
2068static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2069{
2070 /* nothing */
2071}
2072
2073#ifdef CONFIG_NET_POLL_CONTROLLER
2074static void cxgb_netpoll(struct net_device *dev)
2075{
2076 struct adapter *adapter = dev->priv;
2077 struct sge_qset *qs = dev2qset(dev);
2078
2079 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2080 adapter);
2081}
2082#endif
2083
2084/*
2085 * Periodic accumulation of MAC statistics.
2086 */
2087static void mac_stats_update(struct adapter *adapter)
2088{
2089 int i;
2090
2091 for_each_port(adapter, i) {
2092 struct net_device *dev = adapter->port[i];
2093 struct port_info *p = netdev_priv(dev);
2094
2095 if (netif_running(dev)) {
2096 spin_lock(&adapter->stats_lock);
2097 t3_mac_update_stats(&p->mac);
2098 spin_unlock(&adapter->stats_lock);
2099 }
2100 }
2101}
2102
2103static void check_link_status(struct adapter *adapter)
2104{
2105 int i;
2106
2107 for_each_port(adapter, i) {
2108 struct net_device *dev = adapter->port[i];
2109 struct port_info *p = netdev_priv(dev);
2110
2111 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2112 t3_link_changed(adapter, i);
2113 }
2114}
2115
Divy Le Rayfc906642007-03-18 13:10:12 -07002116static void check_t3b2_mac(struct adapter *adapter)
2117{
2118 int i;
2119
Divy Le Rayf2d961c2007-04-09 20:10:22 -07002120 if (!rtnl_trylock()) /* synchronize with ifdown */
2121 return;
2122
Divy Le Rayfc906642007-03-18 13:10:12 -07002123 for_each_port(adapter, i) {
2124 struct net_device *dev = adapter->port[i];
2125 struct port_info *p = netdev_priv(dev);
2126 int status;
2127
2128 if (!netif_running(dev))
2129 continue;
2130
2131 status = 0;
Divy Le Ray6d6daba2007-03-31 00:23:24 -07002132 if (netif_running(dev) && netif_carrier_ok(dev))
Divy Le Rayfc906642007-03-18 13:10:12 -07002133 status = t3b2_mac_watchdog_task(&p->mac);
2134 if (status == 1)
2135 p->mac.stats.num_toggled++;
2136 else if (status == 2) {
2137 struct cmac *mac = &p->mac;
2138
2139 t3_mac_set_mtu(mac, dev->mtu);
2140 t3_mac_set_address(mac, 0, dev->dev_addr);
2141 cxgb_set_rxmode(dev);
2142 t3_link_start(&p->phy, mac, &p->link_config);
2143 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2144 t3_port_intr_enable(adapter, p->port_id);
2145 p->mac.stats.num_resets++;
2146 }
2147 }
2148 rtnl_unlock();
2149}
2150
2151
Divy Le Ray4d22de32007-01-18 22:04:14 -05002152static void t3_adap_check_task(struct work_struct *work)
2153{
2154 struct adapter *adapter = container_of(work, struct adapter,
2155 adap_check_task.work);
2156 const struct adapter_params *p = &adapter->params;
2157
2158 adapter->check_task_cnt++;
2159
2160 /* Check link status for PHYs without interrupts */
2161 if (p->linkpoll_period)
2162 check_link_status(adapter);
2163
2164 /* Accumulate MAC stats if needed */
2165 if (!p->linkpoll_period ||
2166 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2167 p->stats_update_period) {
2168 mac_stats_update(adapter);
2169 adapter->check_task_cnt = 0;
2170 }
2171
Divy Le Rayfc906642007-03-18 13:10:12 -07002172 if (p->rev == T3_REV_B2)
2173 check_t3b2_mac(adapter);
2174
Divy Le Ray4d22de32007-01-18 22:04:14 -05002175 /* Schedule the next check update if any port is active. */
2176 spin_lock(&adapter->work_lock);
2177 if (adapter->open_device_map & PORT_MASK)
2178 schedule_chk_task(adapter);
2179 spin_unlock(&adapter->work_lock);
2180}
2181
2182/*
2183 * Processes external (PHY) interrupts in process context.
2184 */
2185static void ext_intr_task(struct work_struct *work)
2186{
2187 struct adapter *adapter = container_of(work, struct adapter,
2188 ext_intr_handler_task);
2189
2190 t3_phy_intr_handler(adapter);
2191
2192 /* Now reenable external interrupts */
2193 spin_lock_irq(&adapter->work_lock);
2194 if (adapter->slow_intr_mask) {
2195 adapter->slow_intr_mask |= F_T3DBG;
2196 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2197 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2198 adapter->slow_intr_mask);
2199 }
2200 spin_unlock_irq(&adapter->work_lock);
2201}
2202
2203/*
2204 * Interrupt-context handler for external (PHY) interrupts.
2205 */
2206void t3_os_ext_intr_handler(struct adapter *adapter)
2207{
2208 /*
2209 * Schedule a task to handle external interrupts as they may be slow
2210 * and we use a mutex to protect MDIO registers. We disable PHY
2211 * interrupts in the meantime and let the task reenable them when
2212 * it's done.
2213 */
2214 spin_lock(&adapter->work_lock);
2215 if (adapter->slow_intr_mask) {
2216 adapter->slow_intr_mask &= ~F_T3DBG;
2217 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2218 adapter->slow_intr_mask);
2219 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2220 }
2221 spin_unlock(&adapter->work_lock);
2222}
2223
2224void t3_fatal_err(struct adapter *adapter)
2225{
2226 unsigned int fw_status[4];
2227
2228 if (adapter->flags & FULL_INIT_DONE) {
2229 t3_sge_stop(adapter);
2230 t3_intr_disable(adapter);
2231 }
2232 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2233 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2234 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2235 fw_status[0], fw_status[1],
2236 fw_status[2], fw_status[3]);
2237
2238}
2239
2240static int __devinit cxgb_enable_msix(struct adapter *adap)
2241{
2242 struct msix_entry entries[SGE_QSETS + 1];
2243 int i, err;
2244
2245 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2246 entries[i].entry = i;
2247
2248 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2249 if (!err) {
2250 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2251 adap->msix_info[i].vec = entries[i].vector;
2252 } else if (err > 0)
2253 dev_info(&adap->pdev->dev,
2254 "only %d MSI-X vectors left, not using MSI-X\n", err);
2255 return err;
2256}
2257
2258static void __devinit print_port_info(struct adapter *adap,
2259 const struct adapter_info *ai)
2260{
2261 static const char *pci_variant[] = {
2262 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2263 };
2264
2265 int i;
2266 char buf[80];
2267
2268 if (is_pcie(adap))
2269 snprintf(buf, sizeof(buf), "%s x%d",
2270 pci_variant[adap->params.pci.variant],
2271 adap->params.pci.width);
2272 else
2273 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2274 pci_variant[adap->params.pci.variant],
2275 adap->params.pci.speed, adap->params.pci.width);
2276
2277 for_each_port(adap, i) {
2278 struct net_device *dev = adap->port[i];
2279 const struct port_info *pi = netdev_priv(dev);
2280
2281 if (!test_bit(i, &adap->registered_device_map))
2282 continue;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07002283 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
Divy Le Ray4d22de32007-01-18 22:04:14 -05002284 dev->name, ai->desc, pi->port_type->desc,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07002285 is_offload(adap) ? "R" : "", adap->params.rev, buf,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002286 (adap->flags & USING_MSIX) ? " MSI-X" :
2287 (adap->flags & USING_MSI) ? " MSI" : "");
2288 if (adap->name == dev->name && adap->params.vpd.mclk)
2289 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2290 adap->name, t3_mc7_size(&adap->cm) >> 20,
2291 t3_mc7_size(&adap->pmtx) >> 20,
2292 t3_mc7_size(&adap->pmrx) >> 20);
2293 }
2294}
2295
2296static int __devinit init_one(struct pci_dev *pdev,
2297 const struct pci_device_id *ent)
2298{
2299 static int version_printed;
2300
2301 int i, err, pci_using_dac = 0;
2302 unsigned long mmio_start, mmio_len;
2303 const struct adapter_info *ai;
2304 struct adapter *adapter = NULL;
2305 struct port_info *pi;
2306
2307 if (!version_printed) {
2308 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2309 ++version_printed;
2310 }
2311
2312 if (!cxgb3_wq) {
2313 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2314 if (!cxgb3_wq) {
2315 printk(KERN_ERR DRV_NAME
2316 ": cannot initialize work queue\n");
2317 return -ENOMEM;
2318 }
2319 }
2320
2321 err = pci_request_regions(pdev, DRV_NAME);
2322 if (err) {
2323 /* Just info, some other driver may have claimed the device. */
2324 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2325 return err;
2326 }
2327
2328 err = pci_enable_device(pdev);
2329 if (err) {
2330 dev_err(&pdev->dev, "cannot enable PCI device\n");
2331 goto out_release_regions;
2332 }
2333
2334 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2335 pci_using_dac = 1;
2336 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2337 if (err) {
2338 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2339 "coherent allocations\n");
2340 goto out_disable_device;
2341 }
2342 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2343 dev_err(&pdev->dev, "no usable DMA configuration\n");
2344 goto out_disable_device;
2345 }
2346
2347 pci_set_master(pdev);
2348
2349 mmio_start = pci_resource_start(pdev, 0);
2350 mmio_len = pci_resource_len(pdev, 0);
2351 ai = t3_get_adapter_info(ent->driver_data);
2352
2353 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2354 if (!adapter) {
2355 err = -ENOMEM;
2356 goto out_disable_device;
2357 }
2358
2359 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2360 if (!adapter->regs) {
2361 dev_err(&pdev->dev, "cannot map device registers\n");
2362 err = -ENOMEM;
2363 goto out_free_adapter;
2364 }
2365
2366 adapter->pdev = pdev;
2367 adapter->name = pci_name(pdev);
2368 adapter->msg_enable = dflt_msg_enable;
2369 adapter->mmio_len = mmio_len;
2370
2371 mutex_init(&adapter->mdio_lock);
2372 spin_lock_init(&adapter->work_lock);
2373 spin_lock_init(&adapter->stats_lock);
2374
2375 INIT_LIST_HEAD(&adapter->adapter_list);
2376 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2377 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2378
2379 for (i = 0; i < ai->nports; ++i) {
2380 struct net_device *netdev;
2381
2382 netdev = alloc_etherdev(sizeof(struct port_info));
2383 if (!netdev) {
2384 err = -ENOMEM;
2385 goto out_free_dev;
2386 }
2387
2388 SET_MODULE_OWNER(netdev);
2389 SET_NETDEV_DEV(netdev, &pdev->dev);
2390
2391 adapter->port[i] = netdev;
2392 pi = netdev_priv(netdev);
2393 pi->rx_csum_offload = 1;
2394 pi->nqsets = 1;
2395 pi->first_qset = i;
2396 pi->activity = 0;
2397 pi->port_id = i;
2398 netif_carrier_off(netdev);
2399 netdev->irq = pdev->irq;
2400 netdev->mem_start = mmio_start;
2401 netdev->mem_end = mmio_start + mmio_len - 1;
2402 netdev->priv = adapter;
2403 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2404 netdev->features |= NETIF_F_LLTX;
2405 if (pci_using_dac)
2406 netdev->features |= NETIF_F_HIGHDMA;
2407
2408 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2409 netdev->vlan_rx_register = vlan_rx_register;
2410 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2411
2412 netdev->open = cxgb_open;
2413 netdev->stop = cxgb_close;
2414 netdev->hard_start_xmit = t3_eth_xmit;
2415 netdev->get_stats = cxgb_get_stats;
2416 netdev->set_multicast_list = cxgb_set_rxmode;
2417 netdev->do_ioctl = cxgb_ioctl;
2418 netdev->change_mtu = cxgb_change_mtu;
2419 netdev->set_mac_address = cxgb_set_mac_addr;
2420#ifdef CONFIG_NET_POLL_CONTROLLER
2421 netdev->poll_controller = cxgb_netpoll;
2422#endif
2423 netdev->weight = 64;
2424
2425 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2426 }
2427
2428 pci_set_drvdata(pdev, adapter->port[0]);
2429 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2430 err = -ENODEV;
2431 goto out_free_dev;
2432 }
2433
2434 /*
2435 * The card is now ready to go. If any errors occur during device
2436 * registration we do not fail the whole card but rather proceed only
2437 * with the ports we manage to register successfully. However we must
2438 * register at least one net device.
2439 */
2440 for_each_port(adapter, i) {
2441 err = register_netdev(adapter->port[i]);
2442 if (err)
2443 dev_warn(&pdev->dev,
2444 "cannot register net device %s, skipping\n",
2445 adapter->port[i]->name);
2446 else {
2447 /*
2448 * Change the name we use for messages to the name of
2449 * the first successfully registered interface.
2450 */
2451 if (!adapter->registered_device_map)
2452 adapter->name = adapter->port[i]->name;
2453
2454 __set_bit(i, &adapter->registered_device_map);
2455 }
2456 }
2457 if (!adapter->registered_device_map) {
2458 dev_err(&pdev->dev, "could not register any net devices\n");
2459 goto out_free_dev;
2460 }
2461
2462 /* Driver's ready. Reflect it on LEDs */
2463 t3_led_ready(adapter);
2464
2465 if (is_offload(adapter)) {
2466 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2467 cxgb3_adapter_ofld(adapter);
2468 }
2469
2470 /* See what interrupts we'll be using */
2471 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2472 adapter->flags |= USING_MSIX;
2473 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2474 adapter->flags |= USING_MSI;
2475
Divy Le Ray0ee8d332007-02-08 16:55:59 -08002476 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002477 &cxgb3_attr_group);
2478
2479 print_port_info(adapter, ai);
2480 return 0;
2481
2482out_free_dev:
2483 iounmap(adapter->regs);
2484 for (i = ai->nports - 1; i >= 0; --i)
2485 if (adapter->port[i])
2486 free_netdev(adapter->port[i]);
2487
2488out_free_adapter:
2489 kfree(adapter);
2490
2491out_disable_device:
2492 pci_disable_device(pdev);
2493out_release_regions:
2494 pci_release_regions(pdev);
2495 pci_set_drvdata(pdev, NULL);
2496 return err;
2497}
2498
2499static void __devexit remove_one(struct pci_dev *pdev)
2500{
2501 struct net_device *dev = pci_get_drvdata(pdev);
2502
2503 if (dev) {
2504 int i;
2505 struct adapter *adapter = dev->priv;
2506
2507 t3_sge_stop(adapter);
Divy Le Ray0ee8d332007-02-08 16:55:59 -08002508 sysfs_remove_group(&adapter->port[0]->dev.kobj,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002509 &cxgb3_attr_group);
2510
2511 for_each_port(adapter, i)
2512 if (test_bit(i, &adapter->registered_device_map))
2513 unregister_netdev(adapter->port[i]);
2514
2515 if (is_offload(adapter)) {
2516 cxgb3_adapter_unofld(adapter);
2517 if (test_bit(OFFLOAD_DEVMAP_BIT,
2518 &adapter->open_device_map))
2519 offload_close(&adapter->tdev);
2520 }
2521
2522 t3_free_sge_resources(adapter);
2523 cxgb_disable_msi(adapter);
2524
2525 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2526 if (adapter->dummy_netdev[i]) {
2527 free_netdev(adapter->dummy_netdev[i]);
2528 adapter->dummy_netdev[i] = NULL;
2529 }
2530
2531 for_each_port(adapter, i)
2532 if (adapter->port[i])
2533 free_netdev(adapter->port[i]);
2534
2535 iounmap(adapter->regs);
2536 kfree(adapter);
2537 pci_release_regions(pdev);
2538 pci_disable_device(pdev);
2539 pci_set_drvdata(pdev, NULL);
2540 }
2541}
2542
2543static struct pci_driver driver = {
2544 .name = DRV_NAME,
2545 .id_table = cxgb3_pci_tbl,
2546 .probe = init_one,
2547 .remove = __devexit_p(remove_one),
2548};
2549
2550static int __init cxgb3_init_module(void)
2551{
2552 int ret;
2553
2554 cxgb3_offload_init();
2555
2556 ret = pci_register_driver(&driver);
2557 return ret;
2558}
2559
2560static void __exit cxgb3_cleanup_module(void)
2561{
2562 pci_unregister_driver(&driver);
2563 if (cxgb3_wq)
2564 destroy_workqueue(cxgb3_wq);
2565}
2566
2567module_init(cxgb3_init_module);
2568module_exit(cxgb3_cleanup_module);