blob: 3a31272167913dc4b7a621d7c07e2f08b70a3a6d [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Ray1d68e932007-01-30 19:44:35 -08002 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
Divy Le Ray2e283962007-03-18 13:10:06 -070045#include <linux/firmware.h>
vignesh babud9da4662007-07-09 11:50:22 -070046#include <linux/log2.h>
Divy Le Ray4d22de32007-01-18 22:04:14 -050047#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
Divy Le Ray678771d2007-11-16 14:26:44 -080079#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
Divy Le Ray4d22de32007-01-18 22:04:14 -050081
82static const struct pci_device_id cxgb3_pci_tbl[] = {
Divy Le Ray678771d2007-11-16 14:26:44 -080083 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050093 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
Divy Le Ray1d68e932007-01-30 19:44:35 -080098MODULE_LICENSE("Dual BSD/GPL");
Divy Le Ray4d22de32007-01-18 22:04:14 -050099MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
Divy Le Ray6d6daba2007-03-31 00:23:24 -0700189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
Divy Le Ray6d6daba2007-03-31 00:23:24 -0700197 if (link_stat) {
Divy Le Ray59cf8102007-04-09 20:10:27 -0700198 t3_mac_enable(mac, MAC_DIRECTION_RX);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500199 netif_carrier_on(dev);
Divy Le Ray6d6daba2007-03-31 00:23:24 -0700200 } else {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500201 netif_carrier_off(dev);
Divy Le Ray59cf8102007-04-09 20:10:27 -0700202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
Divy Le Ray6d6daba2007-03-31 00:23:24 -0700205 }
206
Divy Le Ray4d22de32007-01-18 22:04:14 -0500207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
280 }
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
Divy Le Rayb8819552007-12-17 18:47:31 -0800309static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
310 unsigned long n)
311{
312 int attempts = 5;
313
314 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
315 if (!--attempts)
316 return -ETIMEDOUT;
317 msleep(10);
318 }
319 return 0;
320}
321
322static int init_tp_parity(struct adapter *adap)
323{
324 int i;
325 struct sk_buff *skb;
326 struct cpl_set_tcb_field *greq;
327 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
328
329 t3_tp_set_offload_mode(adap, 1);
330
331 for (i = 0; i < 16; i++) {
332 struct cpl_smt_write_req *req;
333
334 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336 memset(req, 0, sizeof(*req));
337 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
339 req->iff = i;
340 t3_mgmt_tx(adap, skb);
341 }
342
343 for (i = 0; i < 2048; i++) {
344 struct cpl_l2t_write_req *req;
345
346 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348 memset(req, 0, sizeof(*req));
349 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351 req->params = htonl(V_L2T_W_IDX(i));
352 t3_mgmt_tx(adap, skb);
353 }
354
355 for (i = 0; i < 2048; i++) {
356 struct cpl_rte_write_req *req;
357
358 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360 memset(req, 0, sizeof(*req));
361 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364 t3_mgmt_tx(adap, skb);
365 }
366
367 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369 memset(greq, 0, sizeof(*greq));
370 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372 greq->mask = cpu_to_be64(1);
373 t3_mgmt_tx(adap, skb);
374
375 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376 t3_tp_set_offload_mode(adap, 0);
377 return i;
378}
379
Divy Le Ray4d22de32007-01-18 22:04:14 -0500380/**
381 * setup_rss - configure RSS
382 * @adap: the adapter
383 *
384 * Sets up RSS to distribute packets to multiple receive queues. We
385 * configure the RSS CPU lookup table to distribute to the number of HW
386 * receive queues, and the response queue lookup table to narrow that
387 * down to the response queues actually configured for each port.
388 * We always configure the RSS mapping for two ports since the mapping
389 * table has plenty of entries.
390 */
391static void setup_rss(struct adapter *adap)
392{
393 int i;
394 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396 u8 cpus[SGE_QSETS + 1];
397 u16 rspq_map[RSS_TABLE_SIZE];
398
399 for (i = 0; i < SGE_QSETS; ++i)
400 cpus[i] = i;
401 cpus[SGE_QSETS] = 0xff; /* terminator */
402
403 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404 rspq_map[i] = i % nq0;
405 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
406 }
407
408 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
Divy Le Raya2604be2007-11-16 11:22:16 -0800410 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500411}
412
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700413static void init_napi(struct adapter *adap)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500414{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700415 int i;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500416
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700417 for (i = 0; i < SGE_QSETS; i++) {
418 struct sge_qset *qs = &adap->sge.qs[i];
Divy Le Ray4d22de32007-01-18 22:04:14 -0500419
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700420 if (qs->adap)
421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
422 64);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500423 }
Divy Le Ray48c4b6d2008-05-06 19:25:56 -0700424
425 /*
426 * netif_napi_add() can be called only once per napi_struct because it
427 * adds each new napi_struct to a list. Be careful not to call it a
428 * second time, e.g., during EEH recovery, by making a note of it.
429 */
430 adap->flags |= NAPI_INIT;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500431}
432
433/*
434 * Wait until all NAPI handlers are descheduled. This includes the handlers of
435 * both netdevices representing interfaces and the dummy ones for the extra
436 * queues.
437 */
438static void quiesce_rx(struct adapter *adap)
439{
440 int i;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500441
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700442 for (i = 0; i < SGE_QSETS; i++)
443 if (adap->sge.qs[i].adap)
444 napi_disable(&adap->sge.qs[i].napi);
445}
Divy Le Ray4d22de32007-01-18 22:04:14 -0500446
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700447static void enable_all_napi(struct adapter *adap)
448{
449 int i;
450 for (i = 0; i < SGE_QSETS; i++)
451 if (adap->sge.qs[i].adap)
452 napi_enable(&adap->sge.qs[i].napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500453}
454
455/**
456 * setup_sge_qsets - configure SGE Tx/Rx/response queues
457 * @adap: the adapter
458 *
459 * Determines how many sets of SGE queues to use and initializes them.
460 * We support multiple queue sets per port if we have MSI-X, otherwise
461 * just one queue set per port.
462 */
463static int setup_sge_qsets(struct adapter *adap)
464{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700465 int i, j, err, irq_idx = 0, qset_idx = 0;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700466 unsigned int ntxq = SGE_TXQ_PER_SET;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500467
468 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
469 irq_idx = -1;
470
471 for_each_port(adap, i) {
472 struct net_device *dev = adap->port[i];
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700473 struct port_info *pi = netdev_priv(dev);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500474
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700475 pi->qs = &adap->sge.qs[pi->first_qset];
Divy Le Ray4d22de32007-01-18 22:04:14 -0500476 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
477 err = t3_sge_alloc_qset(adap, qset_idx, 1,
478 (adap->flags & USING_MSIX) ? qset_idx + 1 :
479 irq_idx,
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700480 &adap->params.sge.qset[qset_idx], ntxq, dev);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500481 if (err) {
482 t3_free_sge_resources(adap);
483 return err;
484 }
485 }
486 }
487
488 return 0;
489}
490
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800491static ssize_t attr_show(struct device *d, char *buf,
Divy Le Ray896392e2007-02-24 16:43:50 -0800492 ssize_t(*format) (struct net_device *, char *))
Divy Le Ray4d22de32007-01-18 22:04:14 -0500493{
494 ssize_t len;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500495
496 /* Synchronize with ioctls that may shut down the device */
497 rtnl_lock();
Divy Le Ray896392e2007-02-24 16:43:50 -0800498 len = (*format) (to_net_dev(d), buf);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500499 rtnl_unlock();
500 return len;
501}
502
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800503static ssize_t attr_store(struct device *d,
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800504 const char *buf, size_t len,
Divy Le Ray896392e2007-02-24 16:43:50 -0800505 ssize_t(*set) (struct net_device *, unsigned int),
Divy Le Ray4d22de32007-01-18 22:04:14 -0500506 unsigned int min_val, unsigned int max_val)
507{
508 char *endp;
509 ssize_t ret;
510 unsigned int val;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500511
512 if (!capable(CAP_NET_ADMIN))
513 return -EPERM;
514
515 val = simple_strtoul(buf, &endp, 0);
516 if (endp == buf || val < min_val || val > max_val)
517 return -EINVAL;
518
519 rtnl_lock();
Divy Le Ray896392e2007-02-24 16:43:50 -0800520 ret = (*set) (to_net_dev(d), val);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500521 if (!ret)
522 ret = len;
523 rtnl_unlock();
524 return ret;
525}
526
527#define CXGB3_SHOW(name, val_expr) \
Divy Le Ray896392e2007-02-24 16:43:50 -0800528static ssize_t format_##name(struct net_device *dev, char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500529{ \
Divy Le Ray5fbf8162007-08-29 19:15:47 -0700530 struct port_info *pi = netdev_priv(dev); \
531 struct adapter *adap = pi->adapter; \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500532 return sprintf(buf, "%u\n", val_expr); \
533} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800534static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
535 char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500536{ \
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800537 return attr_show(d, buf, format_##name); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500538}
539
Divy Le Ray896392e2007-02-24 16:43:50 -0800540static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500541{
Divy Le Ray5fbf8162007-08-29 19:15:47 -0700542 struct port_info *pi = netdev_priv(dev);
543 struct adapter *adap = pi->adapter;
Divy Le Ray9f238482007-03-31 00:23:13 -0700544 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
Divy Le Ray896392e2007-02-24 16:43:50 -0800545
Divy Le Ray4d22de32007-01-18 22:04:14 -0500546 if (adap->flags & FULL_INIT_DONE)
547 return -EBUSY;
548 if (val && adap->params.rev == 0)
549 return -EINVAL;
Divy Le Ray9f238482007-03-31 00:23:13 -0700550 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
551 min_tids)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500552 return -EINVAL;
553 adap->params.mc5.nfilters = val;
554 return 0;
555}
556
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800557static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
558 const char *buf, size_t len)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500559{
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800560 return attr_store(d, buf, len, set_nfilters, 0, ~0);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500561}
562
Divy Le Ray896392e2007-02-24 16:43:50 -0800563static ssize_t set_nservers(struct net_device *dev, unsigned int val)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500564{
Divy Le Ray5fbf8162007-08-29 19:15:47 -0700565 struct port_info *pi = netdev_priv(dev);
566 struct adapter *adap = pi->adapter;
Divy Le Ray896392e2007-02-24 16:43:50 -0800567
Divy Le Ray4d22de32007-01-18 22:04:14 -0500568 if (adap->flags & FULL_INIT_DONE)
569 return -EBUSY;
Divy Le Ray9f238482007-03-31 00:23:13 -0700570 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
571 MC5_MIN_TIDS)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500572 return -EINVAL;
573 adap->params.mc5.nservers = val;
574 return 0;
575}
576
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800577static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
578 const char *buf, size_t len)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500579{
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800580 return attr_store(d, buf, len, set_nservers, 0, ~0);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500581}
582
583#define CXGB3_ATTR_R(name, val_expr) \
584CXGB3_SHOW(name, val_expr) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800585static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500586
587#define CXGB3_ATTR_RW(name, val_expr, store_method) \
588CXGB3_SHOW(name, val_expr) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800589static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500590
591CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
592CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
593CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
594
595static struct attribute *cxgb3_attrs[] = {
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800596 &dev_attr_cam_size.attr,
597 &dev_attr_nfilters.attr,
598 &dev_attr_nservers.attr,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500599 NULL
600};
601
602static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
603
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800604static ssize_t tm_attr_show(struct device *d,
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800605 char *buf, int sched)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500606{
Divy Le Ray5fbf8162007-08-29 19:15:47 -0700607 struct port_info *pi = netdev_priv(to_net_dev(d));
608 struct adapter *adap = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500609 unsigned int v, addr, bpt, cpt;
Divy Le Ray5fbf8162007-08-29 19:15:47 -0700610 ssize_t len;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500611
612 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
613 rtnl_lock();
614 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
615 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
616 if (sched & 1)
617 v >>= 16;
618 bpt = (v >> 8) & 0xff;
619 cpt = v & 0xff;
620 if (!cpt)
621 len = sprintf(buf, "disabled\n");
622 else {
623 v = (adap->params.vpd.cclk * 1000) / cpt;
624 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
625 }
626 rtnl_unlock();
627 return len;
628}
629
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800630static ssize_t tm_attr_store(struct device *d,
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800631 const char *buf, size_t len, int sched)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500632{
Divy Le Ray5fbf8162007-08-29 19:15:47 -0700633 struct port_info *pi = netdev_priv(to_net_dev(d));
634 struct adapter *adap = pi->adapter;
635 unsigned int val;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500636 char *endp;
637 ssize_t ret;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500638
639 if (!capable(CAP_NET_ADMIN))
640 return -EPERM;
641
642 val = simple_strtoul(buf, &endp, 0);
643 if (endp == buf || val > 10000000)
644 return -EINVAL;
645
646 rtnl_lock();
647 ret = t3_config_sched(adap, val, sched);
648 if (!ret)
649 ret = len;
650 rtnl_unlock();
651 return ret;
652}
653
654#define TM_ATTR(name, sched) \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800655static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
656 char *buf) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500657{ \
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800658 return tm_attr_show(d, buf, sched); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500659} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800660static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
661 const char *buf, size_t len) \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500662{ \
Divy Le Ray3e5192e2007-11-16 11:22:10 -0800663 return tm_attr_store(d, buf, len, sched); \
Divy Le Ray4d22de32007-01-18 22:04:14 -0500664} \
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800665static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500666
667TM_ATTR(sched0, 0);
668TM_ATTR(sched1, 1);
669TM_ATTR(sched2, 2);
670TM_ATTR(sched3, 3);
671TM_ATTR(sched4, 4);
672TM_ATTR(sched5, 5);
673TM_ATTR(sched6, 6);
674TM_ATTR(sched7, 7);
675
676static struct attribute *offload_attrs[] = {
Divy Le Ray0ee8d332007-02-08 16:55:59 -0800677 &dev_attr_sched0.attr,
678 &dev_attr_sched1.attr,
679 &dev_attr_sched2.attr,
680 &dev_attr_sched3.attr,
681 &dev_attr_sched4.attr,
682 &dev_attr_sched5.attr,
683 &dev_attr_sched6.attr,
684 &dev_attr_sched7.attr,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500685 NULL
686};
687
688static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
689
690/*
691 * Sends an sk_buff to an offload queue driver
692 * after dealing with any active network taps.
693 */
694static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
695{
696 int ret;
697
698 local_bh_disable();
699 ret = t3_offload_tx(tdev, skb);
700 local_bh_enable();
701 return ret;
702}
703
704static int write_smt_entry(struct adapter *adapter, int idx)
705{
706 struct cpl_smt_write_req *req;
707 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
708
709 if (!skb)
710 return -ENOMEM;
711
712 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
713 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
714 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
715 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
716 req->iff = idx;
717 memset(req->src_mac1, 0, sizeof(req->src_mac1));
718 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
719 skb->priority = 1;
720 offload_tx(&adapter->tdev, skb);
721 return 0;
722}
723
724static int init_smt(struct adapter *adapter)
725{
726 int i;
727
728 for_each_port(adapter, i)
729 write_smt_entry(adapter, i);
730 return 0;
731}
732
733static void init_port_mtus(struct adapter *adapter)
734{
735 unsigned int mtus = adapter->port[0]->mtu;
736
737 if (adapter->port[1])
738 mtus |= adapter->port[1]->mtu << 16;
739 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
740}
741
Divy Le Ray14ab9892007-01-30 19:43:50 -0800742static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
743 int hi, int port)
744{
745 struct sk_buff *skb;
746 struct mngt_pktsched_wr *req;
747
748 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
749 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
750 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
751 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
752 req->sched = sched;
753 req->idx = qidx;
754 req->min = lo;
755 req->max = hi;
756 req->binding = port;
757 t3_mgmt_tx(adap, skb);
758}
759
760static void bind_qsets(struct adapter *adap)
761{
762 int i, j;
763
764 for_each_port(adap, i) {
765 const struct port_info *pi = adap2pinfo(adap, i);
766
767 for (j = 0; j < pi->nqsets; ++j)
768 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
769 -1, i);
770 }
771}
772
Divy Le Ray7f672cf2007-03-31 00:23:30 -0700773#define FW_FNAME "t3fw-%d.%d.%d.bin"
Divy Le Ray47330072007-08-29 19:15:52 -0700774#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
Divy Le Ray2e283962007-03-18 13:10:06 -0700775
776static int upgrade_fw(struct adapter *adap)
777{
778 int ret;
779 char buf[64];
780 const struct firmware *fw;
781 struct device *dev = &adap->pdev->dev;
782
783 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
Divy Le Ray7f672cf2007-03-31 00:23:30 -0700784 FW_VERSION_MINOR, FW_VERSION_MICRO);
Divy Le Ray2e283962007-03-18 13:10:06 -0700785 ret = request_firmware(&fw, buf, dev);
786 if (ret < 0) {
787 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
788 buf);
789 return ret;
790 }
791 ret = t3_load_fw(adap, fw->data, fw->size);
792 release_firmware(fw);
Divy Le Ray47330072007-08-29 19:15:52 -0700793
794 if (ret == 0)
795 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
796 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
797 else
798 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
799 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500800
Divy Le Ray47330072007-08-29 19:15:52 -0700801 return ret;
802}
803
804static inline char t3rev2char(struct adapter *adapter)
805{
806 char rev = 0;
807
808 switch(adapter->params.rev) {
809 case T3_REV_B:
810 case T3_REV_B2:
811 rev = 'b';
812 break;
Divy Le Ray1aafee22007-09-05 15:58:36 -0700813 case T3_REV_C:
814 rev = 'c';
815 break;
Divy Le Ray47330072007-08-29 19:15:52 -0700816 }
817 return rev;
818}
819
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700820static int update_tpsram(struct adapter *adap)
Divy Le Ray47330072007-08-29 19:15:52 -0700821{
822 const struct firmware *tpsram;
823 char buf[64];
824 struct device *dev = &adap->pdev->dev;
825 int ret;
826 char rev;
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500827
Divy Le Ray47330072007-08-29 19:15:52 -0700828 rev = t3rev2char(adap);
829 if (!rev)
830 return 0;
831
832 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
833 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
834
835 ret = request_firmware(&tpsram, buf, dev);
836 if (ret < 0) {
837 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
838 buf);
839 return ret;
840 }
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500841
Divy Le Ray47330072007-08-29 19:15:52 -0700842 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
843 if (ret)
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500844 goto release_tpsram;
Divy Le Ray47330072007-08-29 19:15:52 -0700845
846 ret = t3_set_proto_sram(adap, tpsram->data);
847 if (ret == 0)
848 dev_info(dev,
849 "successful update of protocol engine "
850 "to %d.%d.%d\n",
851 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
852 else
853 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
854 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
855 if (ret)
856 dev_err(dev, "loading protocol SRAM failed\n");
857
858release_tpsram:
859 release_firmware(tpsram);
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500860
Divy Le Ray2e283962007-03-18 13:10:06 -0700861 return ret;
862}
863
Divy Le Ray4d22de32007-01-18 22:04:14 -0500864/**
865 * cxgb_up - enable the adapter
866 * @adapter: adapter being enabled
867 *
868 * Called when the first port is enabled, this function performs the
869 * actions necessary to make an adapter operational, such as completing
870 * the initialization of HW modules, and enabling interrupts.
871 *
872 * Must be called with the rtnl lock held.
873 */
874static int cxgb_up(struct adapter *adap)
875{
Denis Chengc54f5c22007-07-18 15:24:49 +0800876 int err;
Divy Le Ray47330072007-08-29 19:15:52 -0700877 int must_load;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500878
879 if (!(adap->flags & FULL_INIT_DONE)) {
Divy Le Raya5a3b462007-09-05 15:58:09 -0700880 err = t3_check_fw_version(adap, &must_load);
881 if (err == -EINVAL) {
Divy Le Ray2e283962007-03-18 13:10:06 -0700882 err = upgrade_fw(adap);
Divy Le Raya5a3b462007-09-05 15:58:09 -0700883 if (err && must_load)
884 goto out;
885 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500886
Divy Le Ray47330072007-08-29 19:15:52 -0700887 err = t3_check_tpsram_version(adap, &must_load);
888 if (err == -EINVAL) {
889 err = update_tpsram(adap);
890 if (err && must_load)
891 goto out;
892 }
893
Divy Le Ray4d22de32007-01-18 22:04:14 -0500894 err = t3_init_hw(adap, 0);
895 if (err)
896 goto out;
897
Divy Le Rayb8819552007-12-17 18:47:31 -0800898 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
Divy Le Ray6cdbd772007-04-09 20:10:33 -0700899 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700900
Divy Le Ray4d22de32007-01-18 22:04:14 -0500901 err = setup_sge_qsets(adap);
902 if (err)
903 goto out;
904
905 setup_rss(adap);
Divy Le Ray48c4b6d2008-05-06 19:25:56 -0700906 if (!(adap->flags & NAPI_INIT))
907 init_napi(adap);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500908 adap->flags |= FULL_INIT_DONE;
909 }
910
911 t3_intr_clear(adap);
912
913 if (adap->flags & USING_MSIX) {
914 name_msix_vecs(adap);
915 err = request_irq(adap->msix_info[0].vec,
916 t3_async_intr_handler, 0,
917 adap->msix_info[0].desc, adap);
918 if (err)
919 goto irq_err;
920
Divy Le Ray42256f52007-11-16 11:21:39 -0800921 err = request_msix_data_irqs(adap);
922 if (err) {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500923 free_irq(adap->msix_info[0].vec, adap);
924 goto irq_err;
925 }
926 } else if ((err = request_irq(adap->pdev->irq,
927 t3_intr_handler(adap,
928 adap->sge.qs[0].rspq.
929 polling),
Thomas Gleixner2db63462007-02-14 00:33:20 -0800930 (adap->flags & USING_MSI) ?
931 0 : IRQF_SHARED,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500932 adap->name, adap)))
933 goto irq_err;
934
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700935 enable_all_napi(adap);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500936 t3_sge_start(adap);
937 t3_intr_enable(adap);
Divy Le Ray14ab9892007-01-30 19:43:50 -0800938
Divy Le Rayb8819552007-12-17 18:47:31 -0800939 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
940 is_offload(adap) && init_tp_parity(adap) == 0)
941 adap->flags |= TP_PARITY_INIT;
942
943 if (adap->flags & TP_PARITY_INIT) {
944 t3_write_reg(adap, A_TP_INT_CAUSE,
945 F_CMCACHEPERR | F_ARPLUTPERR);
946 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
947 }
948
Divy Le Ray14ab9892007-01-30 19:43:50 -0800949 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
950 bind_qsets(adap);
951 adap->flags |= QUEUES_BOUND;
952
Divy Le Ray4d22de32007-01-18 22:04:14 -0500953out:
954 return err;
955irq_err:
956 CH_ERR(adap, "request_irq failed, err %d\n", err);
957 goto out;
958}
959
960/*
961 * Release resources when all the ports and offloading have been stopped.
962 */
963static void cxgb_down(struct adapter *adapter)
964{
965 t3_sge_stop(adapter);
966 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
967 t3_intr_disable(adapter);
968 spin_unlock_irq(&adapter->work_lock);
969
970 if (adapter->flags & USING_MSIX) {
971 int i, n = 0;
972
973 free_irq(adapter->msix_info[0].vec, adapter);
974 for_each_port(adapter, i)
975 n += adap2pinfo(adapter, i)->nqsets;
976
977 for (i = 0; i < n; ++i)
978 free_irq(adapter->msix_info[i + 1].vec,
979 &adapter->sge.qs[i]);
980 } else
981 free_irq(adapter->pdev->irq, adapter);
982
983 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
984 quiesce_rx(adapter);
985}
986
987static void schedule_chk_task(struct adapter *adap)
988{
989 unsigned int timeo;
990
991 timeo = adap->params.linkpoll_period ?
992 (HZ * adap->params.linkpoll_period) / 10 :
993 adap->params.stats_update_period * HZ;
994 if (timeo)
995 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
996}
997
998static int offload_open(struct net_device *dev)
999{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001000 struct port_info *pi = netdev_priv(dev);
1001 struct adapter *adapter = pi->adapter;
1002 struct t3cdev *tdev = dev2t3cdev(dev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001003 int adap_up = adapter->open_device_map & PORT_MASK;
Denis Chengc54f5c22007-07-18 15:24:49 +08001004 int err;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001005
1006 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1007 return 0;
1008
1009 if (!adap_up && (err = cxgb_up(adapter)) < 0)
Divy Le Ray48c4b6d2008-05-06 19:25:56 -07001010 goto out;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001011
1012 t3_tp_set_offload_mode(adapter, 1);
1013 tdev->lldev = adapter->port[0];
1014 err = cxgb3_offload_activate(adapter);
1015 if (err)
1016 goto out;
1017
1018 init_port_mtus(adapter);
1019 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1020 adapter->params.b_wnd,
1021 adapter->params.rev == 0 ?
1022 adapter->port[0]->mtu : 0xffff);
1023 init_smt(adapter);
1024
Dan Noed96a51f2008-04-12 22:34:38 -04001025 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1026 dev_dbg(&dev->dev, "cannot create sysfs group\n");
Divy Le Ray4d22de32007-01-18 22:04:14 -05001027
1028 /* Call back all registered clients */
1029 cxgb3_add_clients(tdev);
1030
1031out:
1032 /* restore them in case the offload module has changed them */
1033 if (err) {
1034 t3_tp_set_offload_mode(adapter, 0);
1035 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1036 cxgb3_set_dummy_ops(tdev);
1037 }
1038 return err;
1039}
1040
1041static int offload_close(struct t3cdev *tdev)
1042{
1043 struct adapter *adapter = tdev2adap(tdev);
1044
1045 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1046 return 0;
1047
1048 /* Call back all registered clients */
1049 cxgb3_remove_clients(tdev);
1050
Divy Le Ray0ee8d332007-02-08 16:55:59 -08001051 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001052
1053 tdev->lldev = NULL;
1054 cxgb3_set_dummy_ops(tdev);
1055 t3_tp_set_offload_mode(adapter, 0);
1056 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1057
1058 if (!adapter->open_device_map)
1059 cxgb_down(adapter);
1060
1061 cxgb3_offload_deactivate(adapter);
1062 return 0;
1063}
1064
1065static int cxgb_open(struct net_device *dev)
1066{
Divy Le Ray4d22de32007-01-18 22:04:14 -05001067 struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001068 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001069 int other_ports = adapter->open_device_map & PORT_MASK;
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001070 int err;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001071
Divy Le Ray48c4b6d2008-05-06 19:25:56 -07001072 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001073 return err;
1074
1075 set_bit(pi->port_id, &adapter->open_device_map);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07001076 if (is_offload(adapter) && !ofld_disable) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001077 err = offload_open(dev);
1078 if (err)
1079 printk(KERN_WARNING
1080 "Could not initialize offload capabilities\n");
1081 }
1082
1083 link_start(dev);
1084 t3_port_intr_enable(adapter, pi->port_id);
1085 netif_start_queue(dev);
1086 if (!other_ports)
1087 schedule_chk_task(adapter);
1088
1089 return 0;
1090}
1091
1092static int cxgb_close(struct net_device *dev)
1093{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001094 struct port_info *pi = netdev_priv(dev);
1095 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001096
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001097 t3_port_intr_disable(adapter, pi->port_id);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001098 netif_stop_queue(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001099 pi->phy.ops->power_down(&pi->phy, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001100 netif_carrier_off(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001101 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001102
1103 spin_lock(&adapter->work_lock); /* sync with update task */
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001104 clear_bit(pi->port_id, &adapter->open_device_map);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001105 spin_unlock(&adapter->work_lock);
1106
1107 if (!(adapter->open_device_map & PORT_MASK))
1108 cancel_rearming_delayed_workqueue(cxgb3_wq,
1109 &adapter->adap_check_task);
1110
1111 if (!adapter->open_device_map)
1112 cxgb_down(adapter);
1113
1114 return 0;
1115}
1116
1117static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1118{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001119 struct port_info *pi = netdev_priv(dev);
1120 struct adapter *adapter = pi->adapter;
1121 struct net_device_stats *ns = &pi->netstats;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001122 const struct mac_stats *pstats;
1123
1124 spin_lock(&adapter->stats_lock);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001125 pstats = t3_mac_update_stats(&pi->mac);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001126 spin_unlock(&adapter->stats_lock);
1127
1128 ns->tx_bytes = pstats->tx_octets;
1129 ns->tx_packets = pstats->tx_frames;
1130 ns->rx_bytes = pstats->rx_octets;
1131 ns->rx_packets = pstats->rx_frames;
1132 ns->multicast = pstats->rx_mcast_frames;
1133
1134 ns->tx_errors = pstats->tx_underrun;
1135 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1136 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1137 pstats->rx_fifo_ovfl;
1138
1139 /* detailed rx_errors */
1140 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1141 ns->rx_over_errors = 0;
1142 ns->rx_crc_errors = pstats->rx_fcs_errs;
1143 ns->rx_frame_errors = pstats->rx_symbol_errs;
1144 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1145 ns->rx_missed_errors = pstats->rx_cong_drops;
1146
1147 /* detailed tx_errors */
1148 ns->tx_aborted_errors = 0;
1149 ns->tx_carrier_errors = 0;
1150 ns->tx_fifo_errors = pstats->tx_underrun;
1151 ns->tx_heartbeat_errors = 0;
1152 ns->tx_window_errors = 0;
1153 return ns;
1154}
1155
1156static u32 get_msglevel(struct net_device *dev)
1157{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001158 struct port_info *pi = netdev_priv(dev);
1159 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001160
1161 return adapter->msg_enable;
1162}
1163
1164static void set_msglevel(struct net_device *dev, u32 val)
1165{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001166 struct port_info *pi = netdev_priv(dev);
1167 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001168
1169 adapter->msg_enable = val;
1170}
1171
1172static char stats_strings[][ETH_GSTRING_LEN] = {
1173 "TxOctetsOK ",
1174 "TxFramesOK ",
1175 "TxMulticastFramesOK",
1176 "TxBroadcastFramesOK",
1177 "TxPauseFrames ",
1178 "TxUnderrun ",
1179 "TxExtUnderrun ",
1180
1181 "TxFrames64 ",
1182 "TxFrames65To127 ",
1183 "TxFrames128To255 ",
1184 "TxFrames256To511 ",
1185 "TxFrames512To1023 ",
1186 "TxFrames1024To1518 ",
1187 "TxFrames1519ToMax ",
1188
1189 "RxOctetsOK ",
1190 "RxFramesOK ",
1191 "RxMulticastFramesOK",
1192 "RxBroadcastFramesOK",
1193 "RxPauseFrames ",
1194 "RxFCSErrors ",
1195 "RxSymbolErrors ",
1196 "RxShortErrors ",
1197 "RxJabberErrors ",
1198 "RxLengthErrors ",
1199 "RxFIFOoverflow ",
1200
1201 "RxFrames64 ",
1202 "RxFrames65To127 ",
1203 "RxFrames128To255 ",
1204 "RxFrames256To511 ",
1205 "RxFrames512To1023 ",
1206 "RxFrames1024To1518 ",
1207 "RxFrames1519ToMax ",
1208
1209 "PhyFIFOErrors ",
1210 "TSO ",
1211 "VLANextractions ",
1212 "VLANinsertions ",
1213 "TxCsumOffload ",
1214 "RxCsumGood ",
Divy Le Rayfc906642007-03-18 13:10:12 -07001215 "RxDrops ",
1216
1217 "CheckTXEnToggled ",
1218 "CheckResets ",
1219
Divy Le Ray4d22de32007-01-18 22:04:14 -05001220};
1221
Jeff Garzikb9f2c042007-10-03 18:07:32 -07001222static int get_sset_count(struct net_device *dev, int sset)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001223{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07001224 switch (sset) {
1225 case ETH_SS_STATS:
1226 return ARRAY_SIZE(stats_strings);
1227 default:
1228 return -EOPNOTSUPP;
1229 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001230}
1231
1232#define T3_REGMAP_SIZE (3 * 1024)
1233
1234static int get_regs_len(struct net_device *dev)
1235{
1236 return T3_REGMAP_SIZE;
1237}
1238
1239static int get_eeprom_len(struct net_device *dev)
1240{
1241 return EEPROMSIZE;
1242}
1243
1244static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1245{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001246 struct port_info *pi = netdev_priv(dev);
1247 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001248 u32 fw_vers = 0;
Divy Le Ray47330072007-08-29 19:15:52 -07001249 u32 tp_vers = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001250
1251 t3_get_fw_version(adapter, &fw_vers);
Divy Le Ray47330072007-08-29 19:15:52 -07001252 t3_get_tp_version(adapter, &tp_vers);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001253
1254 strcpy(info->driver, DRV_NAME);
1255 strcpy(info->version, DRV_VERSION);
1256 strcpy(info->bus_info, pci_name(adapter->pdev));
1257 if (!fw_vers)
1258 strcpy(info->fw_version, "N/A");
Divy Le Ray4aac3892007-01-30 19:43:45 -08001259 else {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001260 snprintf(info->fw_version, sizeof(info->fw_version),
Divy Le Ray47330072007-08-29 19:15:52 -07001261 "%s %u.%u.%u TP %u.%u.%u",
Divy Le Ray4aac3892007-01-30 19:43:45 -08001262 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1263 G_FW_VERSION_MAJOR(fw_vers),
1264 G_FW_VERSION_MINOR(fw_vers),
Divy Le Ray47330072007-08-29 19:15:52 -07001265 G_FW_VERSION_MICRO(fw_vers),
1266 G_TP_VERSION_MAJOR(tp_vers),
1267 G_TP_VERSION_MINOR(tp_vers),
1268 G_TP_VERSION_MICRO(tp_vers));
Divy Le Ray4aac3892007-01-30 19:43:45 -08001269 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001270}
1271
1272static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1273{
1274 if (stringset == ETH_SS_STATS)
1275 memcpy(data, stats_strings, sizeof(stats_strings));
1276}
1277
1278static unsigned long collect_sge_port_stats(struct adapter *adapter,
1279 struct port_info *p, int idx)
1280{
1281 int i;
1282 unsigned long tot = 0;
1283
1284 for (i = 0; i < p->nqsets; ++i)
1285 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1286 return tot;
1287}
1288
1289static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1290 u64 *data)
1291{
Divy Le Ray4d22de32007-01-18 22:04:14 -05001292 struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001293 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001294 const struct mac_stats *s;
1295
1296 spin_lock(&adapter->stats_lock);
1297 s = t3_mac_update_stats(&pi->mac);
1298 spin_unlock(&adapter->stats_lock);
1299
1300 *data++ = s->tx_octets;
1301 *data++ = s->tx_frames;
1302 *data++ = s->tx_mcast_frames;
1303 *data++ = s->tx_bcast_frames;
1304 *data++ = s->tx_pause;
1305 *data++ = s->tx_underrun;
1306 *data++ = s->tx_fifo_urun;
1307
1308 *data++ = s->tx_frames_64;
1309 *data++ = s->tx_frames_65_127;
1310 *data++ = s->tx_frames_128_255;
1311 *data++ = s->tx_frames_256_511;
1312 *data++ = s->tx_frames_512_1023;
1313 *data++ = s->tx_frames_1024_1518;
1314 *data++ = s->tx_frames_1519_max;
1315
1316 *data++ = s->rx_octets;
1317 *data++ = s->rx_frames;
1318 *data++ = s->rx_mcast_frames;
1319 *data++ = s->rx_bcast_frames;
1320 *data++ = s->rx_pause;
1321 *data++ = s->rx_fcs_errs;
1322 *data++ = s->rx_symbol_errs;
1323 *data++ = s->rx_short;
1324 *data++ = s->rx_jabber;
1325 *data++ = s->rx_too_long;
1326 *data++ = s->rx_fifo_ovfl;
1327
1328 *data++ = s->rx_frames_64;
1329 *data++ = s->rx_frames_65_127;
1330 *data++ = s->rx_frames_128_255;
1331 *data++ = s->rx_frames_256_511;
1332 *data++ = s->rx_frames_512_1023;
1333 *data++ = s->rx_frames_1024_1518;
1334 *data++ = s->rx_frames_1519_max;
1335
1336 *data++ = pi->phy.fifo_errors;
1337
1338 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1339 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1340 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1341 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1342 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1343 *data++ = s->rx_cong_drops;
Divy Le Rayfc906642007-03-18 13:10:12 -07001344
1345 *data++ = s->num_toggled;
1346 *data++ = s->num_resets;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001347}
1348
1349static inline void reg_block_dump(struct adapter *ap, void *buf,
1350 unsigned int start, unsigned int end)
1351{
1352 u32 *p = buf + start;
1353
1354 for (; start <= end; start += sizeof(u32))
1355 *p++ = t3_read_reg(ap, start);
1356}
1357
1358static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1359 void *buf)
1360{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001361 struct port_info *pi = netdev_priv(dev);
1362 struct adapter *ap = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001363
1364 /*
1365 * Version scheme:
1366 * bits 0..9: chip version
1367 * bits 10..15: chip revision
1368 * bit 31: set for PCIe cards
1369 */
1370 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1371
1372 /*
1373 * We skip the MAC statistics registers because they are clear-on-read.
1374 * Also reading multi-register stats would need to synchronize with the
1375 * periodic mac stats accumulation. Hard to justify the complexity.
1376 */
1377 memset(buf, 0, T3_REGMAP_SIZE);
1378 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1379 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1380 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1381 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1382 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1383 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1384 XGM_REG(A_XGM_SERDES_STAT3, 1));
1385 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1386 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1387}
1388
1389static int restart_autoneg(struct net_device *dev)
1390{
1391 struct port_info *p = netdev_priv(dev);
1392
1393 if (!netif_running(dev))
1394 return -EAGAIN;
1395 if (p->link_config.autoneg != AUTONEG_ENABLE)
1396 return -EINVAL;
1397 p->phy.ops->autoneg_restart(&p->phy);
1398 return 0;
1399}
1400
1401static int cxgb3_phys_id(struct net_device *dev, u32 data)
1402{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001403 struct port_info *pi = netdev_priv(dev);
1404 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001405 int i;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001406
1407 if (data == 0)
1408 data = 2;
1409
1410 for (i = 0; i < data * 2; i++) {
1411 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1412 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1413 if (msleep_interruptible(500))
1414 break;
1415 }
1416 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1417 F_GPIO0_OUT_VAL);
1418 return 0;
1419}
1420
1421static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1422{
1423 struct port_info *p = netdev_priv(dev);
1424
1425 cmd->supported = p->link_config.supported;
1426 cmd->advertising = p->link_config.advertising;
1427
1428 if (netif_carrier_ok(dev)) {
1429 cmd->speed = p->link_config.speed;
1430 cmd->duplex = p->link_config.duplex;
1431 } else {
1432 cmd->speed = -1;
1433 cmd->duplex = -1;
1434 }
1435
1436 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1437 cmd->phy_address = p->phy.addr;
1438 cmd->transceiver = XCVR_EXTERNAL;
1439 cmd->autoneg = p->link_config.autoneg;
1440 cmd->maxtxpkt = 0;
1441 cmd->maxrxpkt = 0;
1442 return 0;
1443}
1444
1445static int speed_duplex_to_caps(int speed, int duplex)
1446{
1447 int cap = 0;
1448
1449 switch (speed) {
1450 case SPEED_10:
1451 if (duplex == DUPLEX_FULL)
1452 cap = SUPPORTED_10baseT_Full;
1453 else
1454 cap = SUPPORTED_10baseT_Half;
1455 break;
1456 case SPEED_100:
1457 if (duplex == DUPLEX_FULL)
1458 cap = SUPPORTED_100baseT_Full;
1459 else
1460 cap = SUPPORTED_100baseT_Half;
1461 break;
1462 case SPEED_1000:
1463 if (duplex == DUPLEX_FULL)
1464 cap = SUPPORTED_1000baseT_Full;
1465 else
1466 cap = SUPPORTED_1000baseT_Half;
1467 break;
1468 case SPEED_10000:
1469 if (duplex == DUPLEX_FULL)
1470 cap = SUPPORTED_10000baseT_Full;
1471 }
1472 return cap;
1473}
1474
1475#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1476 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1477 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1478 ADVERTISED_10000baseT_Full)
1479
1480static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1481{
1482 struct port_info *p = netdev_priv(dev);
1483 struct link_config *lc = &p->link_config;
1484
1485 if (!(lc->supported & SUPPORTED_Autoneg))
1486 return -EOPNOTSUPP; /* can't change speed/duplex */
1487
1488 if (cmd->autoneg == AUTONEG_DISABLE) {
1489 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1490
1491 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1492 return -EINVAL;
1493 lc->requested_speed = cmd->speed;
1494 lc->requested_duplex = cmd->duplex;
1495 lc->advertising = 0;
1496 } else {
1497 cmd->advertising &= ADVERTISED_MASK;
1498 cmd->advertising &= lc->supported;
1499 if (!cmd->advertising)
1500 return -EINVAL;
1501 lc->requested_speed = SPEED_INVALID;
1502 lc->requested_duplex = DUPLEX_INVALID;
1503 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1504 }
1505 lc->autoneg = cmd->autoneg;
1506 if (netif_running(dev))
1507 t3_link_start(&p->phy, &p->mac, lc);
1508 return 0;
1509}
1510
1511static void get_pauseparam(struct net_device *dev,
1512 struct ethtool_pauseparam *epause)
1513{
1514 struct port_info *p = netdev_priv(dev);
1515
1516 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1517 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1518 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1519}
1520
1521static int set_pauseparam(struct net_device *dev,
1522 struct ethtool_pauseparam *epause)
1523{
1524 struct port_info *p = netdev_priv(dev);
1525 struct link_config *lc = &p->link_config;
1526
1527 if (epause->autoneg == AUTONEG_DISABLE)
1528 lc->requested_fc = 0;
1529 else if (lc->supported & SUPPORTED_Autoneg)
1530 lc->requested_fc = PAUSE_AUTONEG;
1531 else
1532 return -EINVAL;
1533
1534 if (epause->rx_pause)
1535 lc->requested_fc |= PAUSE_RX;
1536 if (epause->tx_pause)
1537 lc->requested_fc |= PAUSE_TX;
1538 if (lc->autoneg == AUTONEG_ENABLE) {
1539 if (netif_running(dev))
1540 t3_link_start(&p->phy, &p->mac, lc);
1541 } else {
1542 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1543 if (netif_running(dev))
1544 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1545 }
1546 return 0;
1547}
1548
1549static u32 get_rx_csum(struct net_device *dev)
1550{
1551 struct port_info *p = netdev_priv(dev);
1552
1553 return p->rx_csum_offload;
1554}
1555
1556static int set_rx_csum(struct net_device *dev, u32 data)
1557{
1558 struct port_info *p = netdev_priv(dev);
1559
1560 p->rx_csum_offload = data;
1561 return 0;
1562}
1563
1564static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1565{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001566 struct port_info *pi = netdev_priv(dev);
1567 struct adapter *adapter = pi->adapter;
Divy Le Ray05b97b32007-03-18 13:10:01 -07001568 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001569
1570 e->rx_max_pending = MAX_RX_BUFFERS;
1571 e->rx_mini_max_pending = 0;
1572 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1573 e->tx_max_pending = MAX_TXQ_ENTRIES;
1574
Divy Le Ray05b97b32007-03-18 13:10:01 -07001575 e->rx_pending = q->fl_size;
1576 e->rx_mini_pending = q->rspq_size;
1577 e->rx_jumbo_pending = q->jumbo_size;
1578 e->tx_pending = q->txq_size[0];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001579}
1580
1581static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1582{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001583 struct port_info *pi = netdev_priv(dev);
1584 struct adapter *adapter = pi->adapter;
Divy Le Ray05b97b32007-03-18 13:10:01 -07001585 struct qset_params *q;
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001586 int i;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001587
1588 if (e->rx_pending > MAX_RX_BUFFERS ||
1589 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1590 e->tx_pending > MAX_TXQ_ENTRIES ||
1591 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1592 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1593 e->rx_pending < MIN_FL_ENTRIES ||
1594 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1595 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1596 return -EINVAL;
1597
1598 if (adapter->flags & FULL_INIT_DONE)
1599 return -EBUSY;
1600
Divy Le Ray05b97b32007-03-18 13:10:01 -07001601 q = &adapter->params.sge.qset[pi->first_qset];
1602 for (i = 0; i < pi->nqsets; ++i, ++q) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001603 q->rspq_size = e->rx_mini_pending;
1604 q->fl_size = e->rx_pending;
1605 q->jumbo_size = e->rx_jumbo_pending;
1606 q->txq_size[0] = e->tx_pending;
1607 q->txq_size[1] = e->tx_pending;
1608 q->txq_size[2] = e->tx_pending;
1609 }
1610 return 0;
1611}
1612
1613static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1614{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001615 struct port_info *pi = netdev_priv(dev);
1616 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001617 struct qset_params *qsp = &adapter->params.sge.qset[0];
1618 struct sge_qset *qs = &adapter->sge.qs[0];
1619
1620 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1621 return -EINVAL;
1622
1623 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1624 t3_update_qset_coalesce(qs, qsp);
1625 return 0;
1626}
1627
1628static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1629{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001630 struct port_info *pi = netdev_priv(dev);
1631 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001632 struct qset_params *q = adapter->params.sge.qset;
1633
1634 c->rx_coalesce_usecs = q->coalesce_usecs;
1635 return 0;
1636}
1637
1638static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1639 u8 * data)
1640{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001641 struct port_info *pi = netdev_priv(dev);
1642 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001643 int i, err = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001644
1645 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1646 if (!buf)
1647 return -ENOMEM;
1648
1649 e->magic = EEPROM_MAGIC;
1650 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
Al Viro05e5c112007-12-22 18:56:23 +00001651 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001652
1653 if (!err)
1654 memcpy(data, buf + e->offset, e->len);
1655 kfree(buf);
1656 return err;
1657}
1658
1659static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1660 u8 * data)
1661{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001662 struct port_info *pi = netdev_priv(dev);
1663 struct adapter *adapter = pi->adapter;
Al Viro05e5c112007-12-22 18:56:23 +00001664 u32 aligned_offset, aligned_len;
1665 __le32 *p;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001666 u8 *buf;
Denis Chengc54f5c22007-07-18 15:24:49 +08001667 int err;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001668
1669 if (eeprom->magic != EEPROM_MAGIC)
1670 return -EINVAL;
1671
1672 aligned_offset = eeprom->offset & ~3;
1673 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1674
1675 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1676 buf = kmalloc(aligned_len, GFP_KERNEL);
1677 if (!buf)
1678 return -ENOMEM;
Al Viro05e5c112007-12-22 18:56:23 +00001679 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001680 if (!err && aligned_len > 4)
1681 err = t3_seeprom_read(adapter,
1682 aligned_offset + aligned_len - 4,
Al Viro05e5c112007-12-22 18:56:23 +00001683 (__le32 *) & buf[aligned_len - 4]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001684 if (err)
1685 goto out;
1686 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1687 } else
1688 buf = data;
1689
1690 err = t3_seeprom_wp(adapter, 0);
1691 if (err)
1692 goto out;
1693
Al Viro05e5c112007-12-22 18:56:23 +00001694 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001695 err = t3_seeprom_write(adapter, aligned_offset, *p);
1696 aligned_offset += 4;
1697 }
1698
1699 if (!err)
1700 err = t3_seeprom_wp(adapter, 1);
1701out:
1702 if (buf != data)
1703 kfree(buf);
1704 return err;
1705}
1706
1707static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1708{
1709 wol->supported = 0;
1710 wol->wolopts = 0;
1711 memset(&wol->sopass, 0, sizeof(wol->sopass));
1712}
1713
1714static const struct ethtool_ops cxgb_ethtool_ops = {
1715 .get_settings = get_settings,
1716 .set_settings = set_settings,
1717 .get_drvinfo = get_drvinfo,
1718 .get_msglevel = get_msglevel,
1719 .set_msglevel = set_msglevel,
1720 .get_ringparam = get_sge_param,
1721 .set_ringparam = set_sge_param,
1722 .get_coalesce = get_coalesce,
1723 .set_coalesce = set_coalesce,
1724 .get_eeprom_len = get_eeprom_len,
1725 .get_eeprom = get_eeprom,
1726 .set_eeprom = set_eeprom,
1727 .get_pauseparam = get_pauseparam,
1728 .set_pauseparam = set_pauseparam,
1729 .get_rx_csum = get_rx_csum,
1730 .set_rx_csum = set_rx_csum,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001731 .set_tx_csum = ethtool_op_set_tx_csum,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001732 .set_sg = ethtool_op_set_sg,
1733 .get_link = ethtool_op_get_link,
1734 .get_strings = get_strings,
1735 .phys_id = cxgb3_phys_id,
1736 .nway_reset = restart_autoneg,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07001737 .get_sset_count = get_sset_count,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001738 .get_ethtool_stats = get_stats,
1739 .get_regs_len = get_regs_len,
1740 .get_regs = get_regs,
1741 .get_wol = get_wol,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001742 .set_tso = ethtool_op_set_tso,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001743};
1744
1745static int in_range(int val, int lo, int hi)
1746{
1747 return val < 0 || (val <= hi && val >= lo);
1748}
1749
1750static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1751{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001752 struct port_info *pi = netdev_priv(dev);
1753 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001754 u32 cmd;
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001755 int ret;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001756
1757 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1758 return -EFAULT;
1759
1760 switch (cmd) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001761 case CHELSIO_SET_QSET_PARAMS:{
1762 int i;
1763 struct qset_params *q;
1764 struct ch_qset_params t;
1765
1766 if (!capable(CAP_NET_ADMIN))
1767 return -EPERM;
1768 if (copy_from_user(&t, useraddr, sizeof(t)))
1769 return -EFAULT;
1770 if (t.qset_idx >= SGE_QSETS)
1771 return -EINVAL;
1772 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1773 !in_range(t.cong_thres, 0, 255) ||
1774 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1775 MAX_TXQ_ENTRIES) ||
1776 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1777 MAX_TXQ_ENTRIES) ||
1778 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1779 MAX_CTRL_TXQ_ENTRIES) ||
1780 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1781 MAX_RX_BUFFERS)
1782 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1783 MAX_RX_JUMBO_BUFFERS)
1784 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1785 MAX_RSPQ_ENTRIES))
1786 return -EINVAL;
1787 if ((adapter->flags & FULL_INIT_DONE) &&
1788 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1789 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1790 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1791 t.polling >= 0 || t.cong_thres >= 0))
1792 return -EBUSY;
1793
1794 q = &adapter->params.sge.qset[t.qset_idx];
1795
1796 if (t.rspq_size >= 0)
1797 q->rspq_size = t.rspq_size;
1798 if (t.fl_size[0] >= 0)
1799 q->fl_size = t.fl_size[0];
1800 if (t.fl_size[1] >= 0)
1801 q->jumbo_size = t.fl_size[1];
1802 if (t.txq_size[0] >= 0)
1803 q->txq_size[0] = t.txq_size[0];
1804 if (t.txq_size[1] >= 0)
1805 q->txq_size[1] = t.txq_size[1];
1806 if (t.txq_size[2] >= 0)
1807 q->txq_size[2] = t.txq_size[2];
1808 if (t.cong_thres >= 0)
1809 q->cong_thres = t.cong_thres;
1810 if (t.intr_lat >= 0) {
1811 struct sge_qset *qs =
1812 &adapter->sge.qs[t.qset_idx];
1813
1814 q->coalesce_usecs = t.intr_lat;
1815 t3_update_qset_coalesce(qs, q);
1816 }
1817 if (t.polling >= 0) {
1818 if (adapter->flags & USING_MSIX)
1819 q->polling = t.polling;
1820 else {
1821 /* No polling with INTx for T3A */
1822 if (adapter->params.rev == 0 &&
1823 !(adapter->flags & USING_MSI))
1824 t.polling = 0;
1825
1826 for (i = 0; i < SGE_QSETS; i++) {
1827 q = &adapter->params.sge.
1828 qset[i];
1829 q->polling = t.polling;
1830 }
1831 }
1832 }
1833 break;
1834 }
1835 case CHELSIO_GET_QSET_PARAMS:{
1836 struct qset_params *q;
1837 struct ch_qset_params t;
1838
1839 if (copy_from_user(&t, useraddr, sizeof(t)))
1840 return -EFAULT;
1841 if (t.qset_idx >= SGE_QSETS)
1842 return -EINVAL;
1843
1844 q = &adapter->params.sge.qset[t.qset_idx];
1845 t.rspq_size = q->rspq_size;
1846 t.txq_size[0] = q->txq_size[0];
1847 t.txq_size[1] = q->txq_size[1];
1848 t.txq_size[2] = q->txq_size[2];
1849 t.fl_size[0] = q->fl_size;
1850 t.fl_size[1] = q->jumbo_size;
1851 t.polling = q->polling;
1852 t.intr_lat = q->coalesce_usecs;
1853 t.cong_thres = q->cong_thres;
1854
1855 if (copy_to_user(useraddr, &t, sizeof(t)))
1856 return -EFAULT;
1857 break;
1858 }
1859 case CHELSIO_SET_QSET_NUM:{
1860 struct ch_reg edata;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001861 unsigned int i, first_qset = 0, other_qsets = 0;
1862
1863 if (!capable(CAP_NET_ADMIN))
1864 return -EPERM;
1865 if (adapter->flags & FULL_INIT_DONE)
1866 return -EBUSY;
1867 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1868 return -EFAULT;
1869 if (edata.val < 1 ||
1870 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1871 return -EINVAL;
1872
1873 for_each_port(adapter, i)
1874 if (adapter->port[i] && adapter->port[i] != dev)
1875 other_qsets += adap2pinfo(adapter, i)->nqsets;
1876
1877 if (edata.val + other_qsets > SGE_QSETS)
1878 return -EINVAL;
1879
1880 pi->nqsets = edata.val;
1881
1882 for_each_port(adapter, i)
1883 if (adapter->port[i]) {
1884 pi = adap2pinfo(adapter, i);
1885 pi->first_qset = first_qset;
1886 first_qset += pi->nqsets;
1887 }
1888 break;
1889 }
1890 case CHELSIO_GET_QSET_NUM:{
1891 struct ch_reg edata;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001892
1893 edata.cmd = CHELSIO_GET_QSET_NUM;
1894 edata.val = pi->nqsets;
1895 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1896 return -EFAULT;
1897 break;
1898 }
1899 case CHELSIO_LOAD_FW:{
1900 u8 *fw_data;
1901 struct ch_mem_range t;
1902
Alan Cox1b3aa7a2008-04-29 14:29:30 +01001903 if (!capable(CAP_SYS_RAWIO))
Divy Le Ray4d22de32007-01-18 22:04:14 -05001904 return -EPERM;
1905 if (copy_from_user(&t, useraddr, sizeof(t)))
1906 return -EFAULT;
Alan Cox1b3aa7a2008-04-29 14:29:30 +01001907 /* Check t.len sanity ? */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001908 fw_data = kmalloc(t.len, GFP_KERNEL);
1909 if (!fw_data)
1910 return -ENOMEM;
1911
1912 if (copy_from_user
1913 (fw_data, useraddr + sizeof(t), t.len)) {
1914 kfree(fw_data);
1915 return -EFAULT;
1916 }
1917
1918 ret = t3_load_fw(adapter, fw_data, t.len);
1919 kfree(fw_data);
1920 if (ret)
1921 return ret;
1922 break;
1923 }
1924 case CHELSIO_SETMTUTAB:{
1925 struct ch_mtus m;
1926 int i;
1927
1928 if (!is_offload(adapter))
1929 return -EOPNOTSUPP;
1930 if (!capable(CAP_NET_ADMIN))
1931 return -EPERM;
1932 if (offload_running(adapter))
1933 return -EBUSY;
1934 if (copy_from_user(&m, useraddr, sizeof(m)))
1935 return -EFAULT;
1936 if (m.nmtus != NMTUS)
1937 return -EINVAL;
1938 if (m.mtus[0] < 81) /* accommodate SACK */
1939 return -EINVAL;
1940
1941 /* MTUs must be in ascending order */
1942 for (i = 1; i < NMTUS; ++i)
1943 if (m.mtus[i] < m.mtus[i - 1])
1944 return -EINVAL;
1945
1946 memcpy(adapter->params.mtus, m.mtus,
1947 sizeof(adapter->params.mtus));
1948 break;
1949 }
1950 case CHELSIO_GET_PM:{
1951 struct tp_params *p = &adapter->params.tp;
1952 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1953
1954 if (!is_offload(adapter))
1955 return -EOPNOTSUPP;
1956 m.tx_pg_sz = p->tx_pg_size;
1957 m.tx_num_pg = p->tx_num_pgs;
1958 m.rx_pg_sz = p->rx_pg_size;
1959 m.rx_num_pg = p->rx_num_pgs;
1960 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1961 if (copy_to_user(useraddr, &m, sizeof(m)))
1962 return -EFAULT;
1963 break;
1964 }
1965 case CHELSIO_SET_PM:{
1966 struct ch_pm m;
1967 struct tp_params *p = &adapter->params.tp;
1968
1969 if (!is_offload(adapter))
1970 return -EOPNOTSUPP;
1971 if (!capable(CAP_NET_ADMIN))
1972 return -EPERM;
1973 if (adapter->flags & FULL_INIT_DONE)
1974 return -EBUSY;
1975 if (copy_from_user(&m, useraddr, sizeof(m)))
1976 return -EFAULT;
vignesh babud9da4662007-07-09 11:50:22 -07001977 if (!is_power_of_2(m.rx_pg_sz) ||
1978 !is_power_of_2(m.tx_pg_sz))
Divy Le Ray4d22de32007-01-18 22:04:14 -05001979 return -EINVAL; /* not power of 2 */
1980 if (!(m.rx_pg_sz & 0x14000))
1981 return -EINVAL; /* not 16KB or 64KB */
1982 if (!(m.tx_pg_sz & 0x1554000))
1983 return -EINVAL;
1984 if (m.tx_num_pg == -1)
1985 m.tx_num_pg = p->tx_num_pgs;
1986 if (m.rx_num_pg == -1)
1987 m.rx_num_pg = p->rx_num_pgs;
1988 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1989 return -EINVAL;
1990 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1991 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1992 return -EINVAL;
1993 p->rx_pg_size = m.rx_pg_sz;
1994 p->tx_pg_size = m.tx_pg_sz;
1995 p->rx_num_pgs = m.rx_num_pg;
1996 p->tx_num_pgs = m.tx_num_pg;
1997 break;
1998 }
1999 case CHELSIO_GET_MEM:{
2000 struct ch_mem_range t;
2001 struct mc7 *mem;
2002 u64 buf[32];
2003
2004 if (!is_offload(adapter))
2005 return -EOPNOTSUPP;
2006 if (!(adapter->flags & FULL_INIT_DONE))
2007 return -EIO; /* need the memory controllers */
2008 if (copy_from_user(&t, useraddr, sizeof(t)))
2009 return -EFAULT;
2010 if ((t.addr & 7) || (t.len & 7))
2011 return -EINVAL;
2012 if (t.mem_id == MEM_CM)
2013 mem = &adapter->cm;
2014 else if (t.mem_id == MEM_PMRX)
2015 mem = &adapter->pmrx;
2016 else if (t.mem_id == MEM_PMTX)
2017 mem = &adapter->pmtx;
2018 else
2019 return -EINVAL;
2020
2021 /*
Divy Le Ray18254942007-02-24 16:43:56 -08002022 * Version scheme:
2023 * bits 0..9: chip version
2024 * bits 10..15: chip revision
2025 */
Divy Le Ray4d22de32007-01-18 22:04:14 -05002026 t.version = 3 | (adapter->params.rev << 10);
2027 if (copy_to_user(useraddr, &t, sizeof(t)))
2028 return -EFAULT;
2029
2030 /*
2031 * Read 256 bytes at a time as len can be large and we don't
2032 * want to use huge intermediate buffers.
2033 */
2034 useraddr += sizeof(t); /* advance to start of buffer */
2035 while (t.len) {
2036 unsigned int chunk =
2037 min_t(unsigned int, t.len, sizeof(buf));
2038
2039 ret =
2040 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2041 buf);
2042 if (ret)
2043 return ret;
2044 if (copy_to_user(useraddr, buf, chunk))
2045 return -EFAULT;
2046 useraddr += chunk;
2047 t.addr += chunk;
2048 t.len -= chunk;
2049 }
2050 break;
2051 }
2052 case CHELSIO_SET_TRACE_FILTER:{
2053 struct ch_trace t;
2054 const struct trace_params *tp;
2055
2056 if (!capable(CAP_NET_ADMIN))
2057 return -EPERM;
2058 if (!offload_running(adapter))
2059 return -EAGAIN;
2060 if (copy_from_user(&t, useraddr, sizeof(t)))
2061 return -EFAULT;
2062
2063 tp = (const struct trace_params *)&t.sip;
2064 if (t.config_tx)
2065 t3_config_trace_filter(adapter, tp, 0,
2066 t.invert_match,
2067 t.trace_tx);
2068 if (t.config_rx)
2069 t3_config_trace_filter(adapter, tp, 1,
2070 t.invert_match,
2071 t.trace_rx);
2072 break;
2073 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002074 default:
2075 return -EOPNOTSUPP;
2076 }
2077 return 0;
2078}
2079
2080static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2081{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002082 struct mii_ioctl_data *data = if_mii(req);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002083 struct port_info *pi = netdev_priv(dev);
2084 struct adapter *adapter = pi->adapter;
2085 int ret, mmd;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002086
2087 switch (cmd) {
2088 case SIOCGMIIPHY:
2089 data->phy_id = pi->phy.addr;
2090 /* FALLTHRU */
2091 case SIOCGMIIREG:{
2092 u32 val;
2093 struct cphy *phy = &pi->phy;
2094
2095 if (!phy->mdio_read)
2096 return -EOPNOTSUPP;
2097 if (is_10G(adapter)) {
2098 mmd = data->phy_id >> 8;
2099 if (!mmd)
2100 mmd = MDIO_DEV_PCS;
2101 else if (mmd > MDIO_DEV_XGXS)
2102 return -EINVAL;
2103
2104 ret =
2105 phy->mdio_read(adapter, data->phy_id & 0x1f,
2106 mmd, data->reg_num, &val);
2107 } else
2108 ret =
2109 phy->mdio_read(adapter, data->phy_id & 0x1f,
2110 0, data->reg_num & 0x1f,
2111 &val);
2112 if (!ret)
2113 data->val_out = val;
2114 break;
2115 }
2116 case SIOCSMIIREG:{
2117 struct cphy *phy = &pi->phy;
2118
2119 if (!capable(CAP_NET_ADMIN))
2120 return -EPERM;
2121 if (!phy->mdio_write)
2122 return -EOPNOTSUPP;
2123 if (is_10G(adapter)) {
2124 mmd = data->phy_id >> 8;
2125 if (!mmd)
2126 mmd = MDIO_DEV_PCS;
2127 else if (mmd > MDIO_DEV_XGXS)
2128 return -EINVAL;
2129
2130 ret =
2131 phy->mdio_write(adapter,
2132 data->phy_id & 0x1f, mmd,
2133 data->reg_num,
2134 data->val_in);
2135 } else
2136 ret =
2137 phy->mdio_write(adapter,
2138 data->phy_id & 0x1f, 0,
2139 data->reg_num & 0x1f,
2140 data->val_in);
2141 break;
2142 }
2143 case SIOCCHIOCTL:
2144 return cxgb_extension_ioctl(dev, req->ifr_data);
2145 default:
2146 return -EOPNOTSUPP;
2147 }
2148 return ret;
2149}
2150
2151static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2152{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002153 struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002154 struct adapter *adapter = pi->adapter;
2155 int ret;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002156
2157 if (new_mtu < 81) /* accommodate SACK */
2158 return -EINVAL;
2159 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2160 return ret;
2161 dev->mtu = new_mtu;
2162 init_port_mtus(adapter);
2163 if (adapter->params.rev == 0 && offload_running(adapter))
2164 t3_load_mtus(adapter, adapter->params.mtus,
2165 adapter->params.a_wnd, adapter->params.b_wnd,
2166 adapter->port[0]->mtu);
2167 return 0;
2168}
2169
2170static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2171{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002172 struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002173 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002174 struct sockaddr *addr = p;
2175
2176 if (!is_valid_ether_addr(addr->sa_data))
2177 return -EINVAL;
2178
2179 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2180 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2181 if (offload_running(adapter))
2182 write_smt_entry(adapter, pi->port_id);
2183 return 0;
2184}
2185
2186/**
2187 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2188 * @adap: the adapter
2189 * @p: the port
2190 *
2191 * Ensures that current Rx processing on any of the queues associated with
2192 * the given port completes before returning. We do this by acquiring and
2193 * releasing the locks of the response queues associated with the port.
2194 */
2195static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2196{
2197 int i;
2198
2199 for (i = 0; i < p->nqsets; i++) {
2200 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2201
2202 spin_lock_irq(&q->lock);
2203 spin_unlock_irq(&q->lock);
2204 }
2205}
2206
2207static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2208{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002209 struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002210 struct adapter *adapter = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002211
2212 pi->vlan_grp = grp;
2213 if (adapter->params.rev > 0)
2214 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2215 else {
2216 /* single control for all ports */
2217 unsigned int i, have_vlans = 0;
2218 for_each_port(adapter, i)
2219 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2220
2221 t3_set_vlan_accel(adapter, 1, have_vlans);
2222 }
2223 t3_synchronize_rx(adapter, pi);
2224}
2225
Divy Le Ray4d22de32007-01-18 22:04:14 -05002226#ifdef CONFIG_NET_POLL_CONTROLLER
2227static void cxgb_netpoll(struct net_device *dev)
2228{
Divy Le Ray890de332007-05-30 10:01:34 -07002229 struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002230 struct adapter *adapter = pi->adapter;
Divy Le Ray890de332007-05-30 10:01:34 -07002231 int qidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002232
Divy Le Ray890de332007-05-30 10:01:34 -07002233 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2234 struct sge_qset *qs = &adapter->sge.qs[qidx];
2235 void *source;
Jeff Garzik2eab17a2007-11-23 21:59:45 -05002236
Divy Le Ray890de332007-05-30 10:01:34 -07002237 if (adapter->flags & USING_MSIX)
2238 source = qs;
2239 else
2240 source = adapter;
2241
2242 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2243 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002244}
2245#endif
2246
2247/*
2248 * Periodic accumulation of MAC statistics.
2249 */
2250static void mac_stats_update(struct adapter *adapter)
2251{
2252 int i;
2253
2254 for_each_port(adapter, i) {
2255 struct net_device *dev = adapter->port[i];
2256 struct port_info *p = netdev_priv(dev);
2257
2258 if (netif_running(dev)) {
2259 spin_lock(&adapter->stats_lock);
2260 t3_mac_update_stats(&p->mac);
2261 spin_unlock(&adapter->stats_lock);
2262 }
2263 }
2264}
2265
2266static void check_link_status(struct adapter *adapter)
2267{
2268 int i;
2269
2270 for_each_port(adapter, i) {
2271 struct net_device *dev = adapter->port[i];
2272 struct port_info *p = netdev_priv(dev);
2273
2274 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2275 t3_link_changed(adapter, i);
2276 }
2277}
2278
Divy Le Rayfc906642007-03-18 13:10:12 -07002279static void check_t3b2_mac(struct adapter *adapter)
2280{
2281 int i;
2282
Divy Le Rayf2d961c2007-04-09 20:10:22 -07002283 if (!rtnl_trylock()) /* synchronize with ifdown */
2284 return;
2285
Divy Le Rayfc906642007-03-18 13:10:12 -07002286 for_each_port(adapter, i) {
2287 struct net_device *dev = adapter->port[i];
2288 struct port_info *p = netdev_priv(dev);
2289 int status;
2290
2291 if (!netif_running(dev))
2292 continue;
2293
2294 status = 0;
Divy Le Ray6d6daba2007-03-31 00:23:24 -07002295 if (netif_running(dev) && netif_carrier_ok(dev))
Divy Le Rayfc906642007-03-18 13:10:12 -07002296 status = t3b2_mac_watchdog_task(&p->mac);
2297 if (status == 1)
2298 p->mac.stats.num_toggled++;
2299 else if (status == 2) {
2300 struct cmac *mac = &p->mac;
2301
2302 t3_mac_set_mtu(mac, dev->mtu);
2303 t3_mac_set_address(mac, 0, dev->dev_addr);
2304 cxgb_set_rxmode(dev);
2305 t3_link_start(&p->phy, mac, &p->link_config);
2306 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2307 t3_port_intr_enable(adapter, p->port_id);
2308 p->mac.stats.num_resets++;
2309 }
2310 }
2311 rtnl_unlock();
2312}
2313
2314
Divy Le Ray4d22de32007-01-18 22:04:14 -05002315static void t3_adap_check_task(struct work_struct *work)
2316{
2317 struct adapter *adapter = container_of(work, struct adapter,
2318 adap_check_task.work);
2319 const struct adapter_params *p = &adapter->params;
2320
2321 adapter->check_task_cnt++;
2322
2323 /* Check link status for PHYs without interrupts */
2324 if (p->linkpoll_period)
2325 check_link_status(adapter);
2326
2327 /* Accumulate MAC stats if needed */
2328 if (!p->linkpoll_period ||
2329 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2330 p->stats_update_period) {
2331 mac_stats_update(adapter);
2332 adapter->check_task_cnt = 0;
2333 }
2334
Divy Le Rayfc906642007-03-18 13:10:12 -07002335 if (p->rev == T3_REV_B2)
2336 check_t3b2_mac(adapter);
2337
Divy Le Ray4d22de32007-01-18 22:04:14 -05002338 /* Schedule the next check update if any port is active. */
2339 spin_lock(&adapter->work_lock);
2340 if (adapter->open_device_map & PORT_MASK)
2341 schedule_chk_task(adapter);
2342 spin_unlock(&adapter->work_lock);
2343}
2344
2345/*
2346 * Processes external (PHY) interrupts in process context.
2347 */
2348static void ext_intr_task(struct work_struct *work)
2349{
2350 struct adapter *adapter = container_of(work, struct adapter,
2351 ext_intr_handler_task);
2352
2353 t3_phy_intr_handler(adapter);
2354
2355 /* Now reenable external interrupts */
2356 spin_lock_irq(&adapter->work_lock);
2357 if (adapter->slow_intr_mask) {
2358 adapter->slow_intr_mask |= F_T3DBG;
2359 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2360 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2361 adapter->slow_intr_mask);
2362 }
2363 spin_unlock_irq(&adapter->work_lock);
2364}
2365
2366/*
2367 * Interrupt-context handler for external (PHY) interrupts.
2368 */
2369void t3_os_ext_intr_handler(struct adapter *adapter)
2370{
2371 /*
2372 * Schedule a task to handle external interrupts as they may be slow
2373 * and we use a mutex to protect MDIO registers. We disable PHY
2374 * interrupts in the meantime and let the task reenable them when
2375 * it's done.
2376 */
2377 spin_lock(&adapter->work_lock);
2378 if (adapter->slow_intr_mask) {
2379 adapter->slow_intr_mask &= ~F_T3DBG;
2380 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2381 adapter->slow_intr_mask);
2382 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2383 }
2384 spin_unlock(&adapter->work_lock);
2385}
2386
2387void t3_fatal_err(struct adapter *adapter)
2388{
2389 unsigned int fw_status[4];
2390
2391 if (adapter->flags & FULL_INIT_DONE) {
2392 t3_sge_stop(adapter);
Divy Le Rayc64c2ea2007-08-21 20:49:31 -07002393 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2394 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2395 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2396 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002397 t3_intr_disable(adapter);
2398 }
2399 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2400 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2401 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2402 fw_status[0], fw_status[1],
2403 fw_status[2], fw_status[3]);
2404
2405}
2406
Divy Le Ray91a6b502007-11-16 11:21:55 -08002407/**
2408 * t3_io_error_detected - called when PCI error is detected
2409 * @pdev: Pointer to PCI device
2410 * @state: The current pci connection state
2411 *
2412 * This function is called after a PCI bus error affecting
2413 * this device has been detected.
2414 */
2415static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2416 pci_channel_state_t state)
2417{
Divy Le Raybc4b6b52007-12-17 18:47:41 -08002418 struct adapter *adapter = pci_get_drvdata(pdev);
Divy Le Ray91a6b502007-11-16 11:21:55 -08002419 int i;
2420
2421 /* Stop all ports */
2422 for_each_port(adapter, i) {
2423 struct net_device *netdev = adapter->port[i];
2424
2425 if (netif_running(netdev))
2426 cxgb_close(netdev);
2427 }
2428
Jeff Garzik2eab17a2007-11-23 21:59:45 -05002429 if (is_offload(adapter) &&
Divy Le Ray91a6b502007-11-16 11:21:55 -08002430 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2431 offload_close(&adapter->tdev);
2432
Divy Le Ray91a6b502007-11-16 11:21:55 -08002433 adapter->flags &= ~FULL_INIT_DONE;
2434
2435 pci_disable_device(pdev);
2436
Divy Le Ray48c4b6d2008-05-06 19:25:56 -07002437 /* Request a slot reset. */
Divy Le Ray91a6b502007-11-16 11:21:55 -08002438 return PCI_ERS_RESULT_NEED_RESET;
2439}
2440
2441/**
2442 * t3_io_slot_reset - called after the pci bus has been reset.
2443 * @pdev: Pointer to PCI device
2444 *
2445 * Restart the card from scratch, as if from a cold-boot.
2446 */
2447static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2448{
Divy Le Raybc4b6b52007-12-17 18:47:41 -08002449 struct adapter *adapter = pci_get_drvdata(pdev);
Divy Le Ray91a6b502007-11-16 11:21:55 -08002450
2451 if (pci_enable_device(pdev)) {
2452 dev_err(&pdev->dev,
2453 "Cannot re-enable PCI device after reset.\n");
Divy Le Ray48c4b6d2008-05-06 19:25:56 -07002454 goto err;
Divy Le Ray91a6b502007-11-16 11:21:55 -08002455 }
2456 pci_set_master(pdev);
Divy Le Ray204e2f92008-05-06 19:26:01 -07002457 pci_restore_state(pdev);
Divy Le Ray91a6b502007-11-16 11:21:55 -08002458
Divy Le Ray204e2f92008-05-06 19:26:01 -07002459 /* Free sge resources */
2460 t3_free_sge_resources(adapter);
2461
2462 if (t3_replay_prep_adapter(adapter))
Divy Le Ray48c4b6d2008-05-06 19:25:56 -07002463 goto err;
Divy Le Ray91a6b502007-11-16 11:21:55 -08002464
2465 return PCI_ERS_RESULT_RECOVERED;
Divy Le Ray48c4b6d2008-05-06 19:25:56 -07002466err:
2467 return PCI_ERS_RESULT_DISCONNECT;
Divy Le Ray91a6b502007-11-16 11:21:55 -08002468}
2469
2470/**
2471 * t3_io_resume - called when traffic can start flowing again.
2472 * @pdev: Pointer to PCI device
2473 *
2474 * This callback is called when the error recovery driver tells us that
2475 * its OK to resume normal operation.
2476 */
2477static void t3_io_resume(struct pci_dev *pdev)
2478{
Divy Le Raybc4b6b52007-12-17 18:47:41 -08002479 struct adapter *adapter = pci_get_drvdata(pdev);
Divy Le Ray91a6b502007-11-16 11:21:55 -08002480 int i;
2481
2482 /* Restart the ports */
2483 for_each_port(adapter, i) {
2484 struct net_device *netdev = adapter->port[i];
2485
2486 if (netif_running(netdev)) {
2487 if (cxgb_open(netdev)) {
2488 dev_err(&pdev->dev,
2489 "can't bring device back up"
2490 " after reset\n");
2491 continue;
2492 }
2493 netif_device_attach(netdev);
2494 }
2495 }
Divy Le Ray91a6b502007-11-16 11:21:55 -08002496}
2497
2498static struct pci_error_handlers t3_err_handler = {
2499 .error_detected = t3_io_error_detected,
2500 .slot_reset = t3_io_slot_reset,
2501 .resume = t3_io_resume,
2502};
2503
Divy Le Ray4d22de32007-01-18 22:04:14 -05002504static int __devinit cxgb_enable_msix(struct adapter *adap)
2505{
2506 struct msix_entry entries[SGE_QSETS + 1];
2507 int i, err;
2508
2509 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2510 entries[i].entry = i;
2511
2512 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2513 if (!err) {
2514 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2515 adap->msix_info[i].vec = entries[i].vector;
2516 } else if (err > 0)
2517 dev_info(&adap->pdev->dev,
2518 "only %d MSI-X vectors left, not using MSI-X\n", err);
2519 return err;
2520}
2521
2522static void __devinit print_port_info(struct adapter *adap,
2523 const struct adapter_info *ai)
2524{
2525 static const char *pci_variant[] = {
2526 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2527 };
2528
2529 int i;
2530 char buf[80];
2531
2532 if (is_pcie(adap))
2533 snprintf(buf, sizeof(buf), "%s x%d",
2534 pci_variant[adap->params.pci.variant],
2535 adap->params.pci.width);
2536 else
2537 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2538 pci_variant[adap->params.pci.variant],
2539 adap->params.pci.speed, adap->params.pci.width);
2540
2541 for_each_port(adap, i) {
2542 struct net_device *dev = adap->port[i];
2543 const struct port_info *pi = netdev_priv(dev);
2544
2545 if (!test_bit(i, &adap->registered_device_map))
2546 continue;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07002547 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
Divy Le Ray4d22de32007-01-18 22:04:14 -05002548 dev->name, ai->desc, pi->port_type->desc,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07002549 is_offload(adap) ? "R" : "", adap->params.rev, buf,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002550 (adap->flags & USING_MSIX) ? " MSI-X" :
2551 (adap->flags & USING_MSI) ? " MSI" : "");
2552 if (adap->name == dev->name && adap->params.vpd.mclk)
Divy Le Ray167cdf52007-08-21 20:49:36 -07002553 printk(KERN_INFO
2554 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
Divy Le Ray4d22de32007-01-18 22:04:14 -05002555 adap->name, t3_mc7_size(&adap->cm) >> 20,
2556 t3_mc7_size(&adap->pmtx) >> 20,
Divy Le Ray167cdf52007-08-21 20:49:36 -07002557 t3_mc7_size(&adap->pmrx) >> 20,
2558 adap->params.vpd.sn);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002559 }
2560}
2561
2562static int __devinit init_one(struct pci_dev *pdev,
2563 const struct pci_device_id *ent)
2564{
2565 static int version_printed;
2566
2567 int i, err, pci_using_dac = 0;
2568 unsigned long mmio_start, mmio_len;
2569 const struct adapter_info *ai;
2570 struct adapter *adapter = NULL;
2571 struct port_info *pi;
2572
2573 if (!version_printed) {
2574 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2575 ++version_printed;
2576 }
2577
2578 if (!cxgb3_wq) {
2579 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2580 if (!cxgb3_wq) {
2581 printk(KERN_ERR DRV_NAME
2582 ": cannot initialize work queue\n");
2583 return -ENOMEM;
2584 }
2585 }
2586
2587 err = pci_request_regions(pdev, DRV_NAME);
2588 if (err) {
2589 /* Just info, some other driver may have claimed the device. */
2590 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2591 return err;
2592 }
2593
2594 err = pci_enable_device(pdev);
2595 if (err) {
2596 dev_err(&pdev->dev, "cannot enable PCI device\n");
2597 goto out_release_regions;
2598 }
2599
2600 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2601 pci_using_dac = 1;
2602 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2603 if (err) {
2604 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2605 "coherent allocations\n");
2606 goto out_disable_device;
2607 }
2608 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2609 dev_err(&pdev->dev, "no usable DMA configuration\n");
2610 goto out_disable_device;
2611 }
2612
2613 pci_set_master(pdev);
Divy Le Ray204e2f92008-05-06 19:26:01 -07002614 pci_save_state(pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002615
2616 mmio_start = pci_resource_start(pdev, 0);
2617 mmio_len = pci_resource_len(pdev, 0);
2618 ai = t3_get_adapter_info(ent->driver_data);
2619
2620 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2621 if (!adapter) {
2622 err = -ENOMEM;
2623 goto out_disable_device;
2624 }
2625
2626 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2627 if (!adapter->regs) {
2628 dev_err(&pdev->dev, "cannot map device registers\n");
2629 err = -ENOMEM;
2630 goto out_free_adapter;
2631 }
2632
2633 adapter->pdev = pdev;
2634 adapter->name = pci_name(pdev);
2635 adapter->msg_enable = dflt_msg_enable;
2636 adapter->mmio_len = mmio_len;
2637
2638 mutex_init(&adapter->mdio_lock);
2639 spin_lock_init(&adapter->work_lock);
2640 spin_lock_init(&adapter->stats_lock);
2641
2642 INIT_LIST_HEAD(&adapter->adapter_list);
2643 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2644 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2645
2646 for (i = 0; i < ai->nports; ++i) {
2647 struct net_device *netdev;
2648
2649 netdev = alloc_etherdev(sizeof(struct port_info));
2650 if (!netdev) {
2651 err = -ENOMEM;
2652 goto out_free_dev;
2653 }
2654
Divy Le Ray4d22de32007-01-18 22:04:14 -05002655 SET_NETDEV_DEV(netdev, &pdev->dev);
2656
2657 adapter->port[i] = netdev;
2658 pi = netdev_priv(netdev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002659 pi->adapter = adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002660 pi->rx_csum_offload = 1;
2661 pi->nqsets = 1;
2662 pi->first_qset = i;
2663 pi->activity = 0;
2664 pi->port_id = i;
2665 netif_carrier_off(netdev);
2666 netdev->irq = pdev->irq;
2667 netdev->mem_start = mmio_start;
2668 netdev->mem_end = mmio_start + mmio_len - 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002669 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2670 netdev->features |= NETIF_F_LLTX;
2671 if (pci_using_dac)
2672 netdev->features |= NETIF_F_HIGHDMA;
2673
2674 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2675 netdev->vlan_rx_register = vlan_rx_register;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002676
2677 netdev->open = cxgb_open;
2678 netdev->stop = cxgb_close;
2679 netdev->hard_start_xmit = t3_eth_xmit;
2680 netdev->get_stats = cxgb_get_stats;
2681 netdev->set_multicast_list = cxgb_set_rxmode;
2682 netdev->do_ioctl = cxgb_ioctl;
2683 netdev->change_mtu = cxgb_change_mtu;
2684 netdev->set_mac_address = cxgb_set_mac_addr;
2685#ifdef CONFIG_NET_POLL_CONTROLLER
2686 netdev->poll_controller = cxgb_netpoll;
2687#endif
Divy Le Ray4d22de32007-01-18 22:04:14 -05002688
2689 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2690 }
2691
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002692 pci_set_drvdata(pdev, adapter);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002693 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2694 err = -ENODEV;
2695 goto out_free_dev;
2696 }
Jeff Garzik2eab17a2007-11-23 21:59:45 -05002697
Divy Le Ray4d22de32007-01-18 22:04:14 -05002698 /*
2699 * The card is now ready to go. If any errors occur during device
2700 * registration we do not fail the whole card but rather proceed only
2701 * with the ports we manage to register successfully. However we must
2702 * register at least one net device.
2703 */
2704 for_each_port(adapter, i) {
2705 err = register_netdev(adapter->port[i]);
2706 if (err)
2707 dev_warn(&pdev->dev,
2708 "cannot register net device %s, skipping\n",
2709 adapter->port[i]->name);
2710 else {
2711 /*
2712 * Change the name we use for messages to the name of
2713 * the first successfully registered interface.
2714 */
2715 if (!adapter->registered_device_map)
2716 adapter->name = adapter->port[i]->name;
2717
2718 __set_bit(i, &adapter->registered_device_map);
2719 }
2720 }
2721 if (!adapter->registered_device_map) {
2722 dev_err(&pdev->dev, "could not register any net devices\n");
2723 goto out_free_dev;
2724 }
2725
2726 /* Driver's ready. Reflect it on LEDs */
2727 t3_led_ready(adapter);
2728
2729 if (is_offload(adapter)) {
2730 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2731 cxgb3_adapter_ofld(adapter);
2732 }
2733
2734 /* See what interrupts we'll be using */
2735 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2736 adapter->flags |= USING_MSIX;
2737 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2738 adapter->flags |= USING_MSI;
2739
Divy Le Ray0ee8d332007-02-08 16:55:59 -08002740 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002741 &cxgb3_attr_group);
2742
2743 print_port_info(adapter, ai);
2744 return 0;
2745
2746out_free_dev:
2747 iounmap(adapter->regs);
2748 for (i = ai->nports - 1; i >= 0; --i)
2749 if (adapter->port[i])
2750 free_netdev(adapter->port[i]);
2751
2752out_free_adapter:
2753 kfree(adapter);
2754
2755out_disable_device:
2756 pci_disable_device(pdev);
2757out_release_regions:
2758 pci_release_regions(pdev);
2759 pci_set_drvdata(pdev, NULL);
2760 return err;
2761}
2762
2763static void __devexit remove_one(struct pci_dev *pdev)
2764{
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002765 struct adapter *adapter = pci_get_drvdata(pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002766
Divy Le Ray5fbf8162007-08-29 19:15:47 -07002767 if (adapter) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05002768 int i;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002769
2770 t3_sge_stop(adapter);
Divy Le Ray0ee8d332007-02-08 16:55:59 -08002771 sysfs_remove_group(&adapter->port[0]->dev.kobj,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002772 &cxgb3_attr_group);
2773
Divy Le Ray4d22de32007-01-18 22:04:14 -05002774 if (is_offload(adapter)) {
2775 cxgb3_adapter_unofld(adapter);
2776 if (test_bit(OFFLOAD_DEVMAP_BIT,
2777 &adapter->open_device_map))
2778 offload_close(&adapter->tdev);
2779 }
2780
Divy Le Ray67d92ab2007-11-16 11:21:50 -08002781 for_each_port(adapter, i)
2782 if (test_bit(i, &adapter->registered_device_map))
2783 unregister_netdev(adapter->port[i]);
2784
Divy Le Ray4d22de32007-01-18 22:04:14 -05002785 t3_free_sge_resources(adapter);
2786 cxgb_disable_msi(adapter);
2787
Divy Le Ray4d22de32007-01-18 22:04:14 -05002788 for_each_port(adapter, i)
2789 if (adapter->port[i])
2790 free_netdev(adapter->port[i]);
2791
2792 iounmap(adapter->regs);
2793 kfree(adapter);
2794 pci_release_regions(pdev);
2795 pci_disable_device(pdev);
2796 pci_set_drvdata(pdev, NULL);
2797 }
2798}
2799
2800static struct pci_driver driver = {
2801 .name = DRV_NAME,
2802 .id_table = cxgb3_pci_tbl,
2803 .probe = init_one,
2804 .remove = __devexit_p(remove_one),
Divy Le Ray91a6b502007-11-16 11:21:55 -08002805 .err_handler = &t3_err_handler,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002806};
2807
2808static int __init cxgb3_init_module(void)
2809{
2810 int ret;
2811
2812 cxgb3_offload_init();
2813
2814 ret = pci_register_driver(&driver);
2815 return ret;
2816}
2817
2818static void __exit cxgb3_cleanup_module(void)
2819{
2820 pci_unregister_driver(&driver);
2821 if (cxgb3_wq)
2822 destroy_workqueue(cxgb3_wq);
2823}
2824
2825module_init(cxgb3_init_module);
2826module_exit(cxgb3_cleanup_module);