blob: 7e7ee7ae177b608ad9a325454f6d35f2b930190e [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
2 * This file is part of the Chelsio T3 Ethernet driver for Linux.
3 *
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/dma-mapping.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/if_vlan.h>
20#include <linux/mii.h>
21#include <linux/sockios.h>
22#include <linux/workqueue.h>
23#include <linux/proc_fs.h>
24#include <linux/rtnetlink.h>
25#include <asm/uaccess.h>
26
27#include "common.h"
28#include "cxgb3_ioctl.h"
29#include "regs.h"
30#include "cxgb3_offload.h"
31#include "version.h"
32
33#include "cxgb3_ctl_defs.h"
34#include "t3_cpl.h"
35#include "firmware_exports.h"
36
37enum {
38 MAX_TXQ_ENTRIES = 16384,
39 MAX_CTRL_TXQ_ENTRIES = 1024,
40 MAX_RSPQ_ENTRIES = 16384,
41 MAX_RX_BUFFERS = 16384,
42 MAX_RX_JUMBO_BUFFERS = 16384,
43 MIN_TXQ_ENTRIES = 4,
44 MIN_CTRL_TXQ_ENTRIES = 4,
45 MIN_RSPQ_ENTRIES = 32,
46 MIN_FL_ENTRIES = 32
47};
48
49#define PORT_MASK ((1 << MAX_NPORTS) - 1)
50
51#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
52 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
53 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
54
55#define EEPROM_MAGIC 0x38E2F10C
56
57#define to_net_dev(class) container_of(class, struct net_device, class_dev)
58
59#define CH_DEVICE(devid, ssid, idx) \
60 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
61
62static const struct pci_device_id cxgb3_pci_tbl[] = {
63 CH_DEVICE(0x20, 1, 0), /* PE9000 */
64 CH_DEVICE(0x21, 1, 1), /* T302E */
65 CH_DEVICE(0x22, 1, 2), /* T310E */
66 CH_DEVICE(0x23, 1, 3), /* T320X */
67 CH_DEVICE(0x24, 1, 1), /* T302X */
68 CH_DEVICE(0x25, 1, 3), /* T320E */
69 CH_DEVICE(0x26, 1, 2), /* T310X */
70 CH_DEVICE(0x30, 1, 2), /* T3B10 */
71 CH_DEVICE(0x31, 1, 3), /* T3B20 */
72 CH_DEVICE(0x32, 1, 1), /* T3B02 */
73 {0,}
74};
75
76MODULE_DESCRIPTION(DRV_DESC);
77MODULE_AUTHOR("Chelsio Communications");
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_VERSION);
80MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
81
82static int dflt_msg_enable = DFLT_MSG_ENABLE;
83
84module_param(dflt_msg_enable, int, 0644);
85MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
86
87/*
88 * The driver uses the best interrupt scheme available on a platform in the
89 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
90 * of these schemes the driver may consider as follows:
91 *
92 * msi = 2: choose from among all three options
93 * msi = 1: only consider MSI and pin interrupts
94 * msi = 0: force pin interrupts
95 */
96static int msi = 2;
97
98module_param(msi, int, 0644);
99MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
100
101/*
102 * The driver enables offload as a default.
103 * To disable it, use ofld_disable = 1.
104 */
105
106static int ofld_disable = 0;
107
108module_param(ofld_disable, int, 0644);
109MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
110
111/*
112 * We have work elements that we need to cancel when an interface is taken
113 * down. Normally the work elements would be executed by keventd but that
114 * can deadlock because of linkwatch. If our close method takes the rtnl
115 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
116 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
117 * for our work to complete. Get our own work queue to solve this.
118 */
119static struct workqueue_struct *cxgb3_wq;
120
121/**
122 * link_report - show link status and link speed/duplex
123 * @p: the port whose settings are to be reported
124 *
125 * Shows the link status, speed, and duplex of a port.
126 */
127static void link_report(struct net_device *dev)
128{
129 if (!netif_carrier_ok(dev))
130 printk(KERN_INFO "%s: link down\n", dev->name);
131 else {
132 const char *s = "10Mbps";
133 const struct port_info *p = netdev_priv(dev);
134
135 switch (p->link_config.speed) {
136 case SPEED_10000:
137 s = "10Gbps";
138 break;
139 case SPEED_1000:
140 s = "1000Mbps";
141 break;
142 case SPEED_100:
143 s = "100Mbps";
144 break;
145 }
146
147 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149 }
150}
151
152/**
153 * t3_os_link_changed - handle link status changes
154 * @adapter: the adapter associated with the link change
155 * @port_id: the port index whose limk status has changed
156 * @link_stat: the new status of the link
157 * @speed: the new speed setting
158 * @duplex: the new duplex setting
159 * @pause: the new flow-control setting
160 *
161 * This is the OS-dependent handler for link status changes. The OS
162 * neutral handler takes care of most of the processing for these events,
163 * then calls this handler for any OS-specific processing.
164 */
165void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
166 int speed, int duplex, int pause)
167{
168 struct net_device *dev = adapter->port[port_id];
169
170 /* Skip changes from disabled ports. */
171 if (!netif_running(dev))
172 return;
173
174 if (link_stat != netif_carrier_ok(dev)) {
175 if (link_stat)
176 netif_carrier_on(dev);
177 else
178 netif_carrier_off(dev);
179 link_report(dev);
180 }
181}
182
183static void cxgb_set_rxmode(struct net_device *dev)
184{
185 struct t3_rx_mode rm;
186 struct port_info *pi = netdev_priv(dev);
187
188 init_rx_mode(&rm, dev, dev->mc_list);
189 t3_mac_set_rx_mode(&pi->mac, &rm);
190}
191
192/**
193 * link_start - enable a port
194 * @dev: the device to enable
195 *
196 * Performs the MAC and PHY actions needed to enable a port.
197 */
198static void link_start(struct net_device *dev)
199{
200 struct t3_rx_mode rm;
201 struct port_info *pi = netdev_priv(dev);
202 struct cmac *mac = &pi->mac;
203
204 init_rx_mode(&rm, dev, dev->mc_list);
205 t3_mac_reset(mac);
206 t3_mac_set_mtu(mac, dev->mtu);
207 t3_mac_set_address(mac, 0, dev->dev_addr);
208 t3_mac_set_rx_mode(mac, &rm);
209 t3_link_start(&pi->phy, mac, &pi->link_config);
210 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
211}
212
213static inline void cxgb_disable_msi(struct adapter *adapter)
214{
215 if (adapter->flags & USING_MSIX) {
216 pci_disable_msix(adapter->pdev);
217 adapter->flags &= ~USING_MSIX;
218 } else if (adapter->flags & USING_MSI) {
219 pci_disable_msi(adapter->pdev);
220 adapter->flags &= ~USING_MSI;
221 }
222}
223
224/*
225 * Interrupt handler for asynchronous events used with MSI-X.
226 */
227static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
228{
229 t3_slow_intr_handler(cookie);
230 return IRQ_HANDLED;
231}
232
233/*
234 * Name the MSI-X interrupts.
235 */
236static void name_msix_vecs(struct adapter *adap)
237{
238 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
239
240 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
241 adap->msix_info[0].desc[n] = 0;
242
243 for_each_port(adap, j) {
244 struct net_device *d = adap->port[j];
245 const struct port_info *pi = netdev_priv(d);
246
247 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
248 snprintf(adap->msix_info[msi_idx].desc, n,
249 "%s (queue %d)", d->name, i);
250 adap->msix_info[msi_idx].desc[n] = 0;
251 }
252 }
253}
254
255static int request_msix_data_irqs(struct adapter *adap)
256{
257 int i, j, err, qidx = 0;
258
259 for_each_port(adap, i) {
260 int nqsets = adap2pinfo(adap, i)->nqsets;
261
262 for (j = 0; j < nqsets; ++j) {
263 err = request_irq(adap->msix_info[qidx + 1].vec,
264 t3_intr_handler(adap,
265 adap->sge.qs[qidx].
266 rspq.polling), 0,
267 adap->msix_info[qidx + 1].desc,
268 &adap->sge.qs[qidx]);
269 if (err) {
270 while (--qidx >= 0)
271 free_irq(adap->msix_info[qidx + 1].vec,
272 &adap->sge.qs[qidx]);
273 return err;
274 }
275 qidx++;
276 }
277 }
278 return 0;
279}
280
281/**
282 * setup_rss - configure RSS
283 * @adap: the adapter
284 *
285 * Sets up RSS to distribute packets to multiple receive queues. We
286 * configure the RSS CPU lookup table to distribute to the number of HW
287 * receive queues, and the response queue lookup table to narrow that
288 * down to the response queues actually configured for each port.
289 * We always configure the RSS mapping for two ports since the mapping
290 * table has plenty of entries.
291 */
292static void setup_rss(struct adapter *adap)
293{
294 int i;
295 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
296 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
297 u8 cpus[SGE_QSETS + 1];
298 u16 rspq_map[RSS_TABLE_SIZE];
299
300 for (i = 0; i < SGE_QSETS; ++i)
301 cpus[i] = i;
302 cpus[SGE_QSETS] = 0xff; /* terminator */
303
304 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
305 rspq_map[i] = i % nq0;
306 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
307 }
308
309 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
310 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
311 V_RRCPLCPUSIZE(6), cpus, rspq_map);
312}
313
314/*
315 * If we have multiple receive queues per port serviced by NAPI we need one
316 * netdevice per queue as NAPI operates on netdevices. We already have one
317 * netdevice, namely the one associated with the interface, so we use dummy
318 * ones for any additional queues. Note that these netdevices exist purely
319 * so that NAPI has something to work with, they do not represent network
320 * ports and are not registered.
321 */
322static int init_dummy_netdevs(struct adapter *adap)
323{
324 int i, j, dummy_idx = 0;
325 struct net_device *nd;
326
327 for_each_port(adap, i) {
328 struct net_device *dev = adap->port[i];
329 const struct port_info *pi = netdev_priv(dev);
330
331 for (j = 0; j < pi->nqsets - 1; j++) {
332 if (!adap->dummy_netdev[dummy_idx]) {
333 nd = alloc_netdev(0, "", ether_setup);
334 if (!nd)
335 goto free_all;
336
337 nd->priv = adap;
338 nd->weight = 64;
339 set_bit(__LINK_STATE_START, &nd->state);
340 adap->dummy_netdev[dummy_idx] = nd;
341 }
342 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
343 dummy_idx++;
344 }
345 }
346 return 0;
347
348free_all:
349 while (--dummy_idx >= 0) {
350 free_netdev(adap->dummy_netdev[dummy_idx]);
351 adap->dummy_netdev[dummy_idx] = NULL;
352 }
353 return -ENOMEM;
354}
355
356/*
357 * Wait until all NAPI handlers are descheduled. This includes the handlers of
358 * both netdevices representing interfaces and the dummy ones for the extra
359 * queues.
360 */
361static void quiesce_rx(struct adapter *adap)
362{
363 int i;
364 struct net_device *dev;
365
366 for_each_port(adap, i) {
367 dev = adap->port[i];
368 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
369 msleep(1);
370 }
371
372 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
373 dev = adap->dummy_netdev[i];
374 if (dev)
375 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
376 msleep(1);
377 }
378}
379
380/**
381 * setup_sge_qsets - configure SGE Tx/Rx/response queues
382 * @adap: the adapter
383 *
384 * Determines how many sets of SGE queues to use and initializes them.
385 * We support multiple queue sets per port if we have MSI-X, otherwise
386 * just one queue set per port.
387 */
388static int setup_sge_qsets(struct adapter *adap)
389{
390 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
391 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
392
393 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
394 irq_idx = -1;
395
396 for_each_port(adap, i) {
397 struct net_device *dev = adap->port[i];
398 const struct port_info *pi = netdev_priv(dev);
399
400 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
401 err = t3_sge_alloc_qset(adap, qset_idx, 1,
402 (adap->flags & USING_MSIX) ? qset_idx + 1 :
403 irq_idx,
404 &adap->params.sge.qset[qset_idx], ntxq,
405 j == 0 ? dev :
406 adap-> dummy_netdev[dummy_dev_idx++]);
407 if (err) {
408 t3_free_sge_resources(adap);
409 return err;
410 }
411 }
412 }
413
414 return 0;
415}
416
417static ssize_t attr_show(struct class_device *cd, char *buf,
418 ssize_t(*format) (struct adapter *, char *))
419{
420 ssize_t len;
421 struct adapter *adap = to_net_dev(cd)->priv;
422
423 /* Synchronize with ioctls that may shut down the device */
424 rtnl_lock();
425 len = (*format) (adap, buf);
426 rtnl_unlock();
427 return len;
428}
429
430static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
431 ssize_t(*set) (struct adapter *, unsigned int),
432 unsigned int min_val, unsigned int max_val)
433{
434 char *endp;
435 ssize_t ret;
436 unsigned int val;
437 struct adapter *adap = to_net_dev(cd)->priv;
438
439 if (!capable(CAP_NET_ADMIN))
440 return -EPERM;
441
442 val = simple_strtoul(buf, &endp, 0);
443 if (endp == buf || val < min_val || val > max_val)
444 return -EINVAL;
445
446 rtnl_lock();
447 ret = (*set) (adap, val);
448 if (!ret)
449 ret = len;
450 rtnl_unlock();
451 return ret;
452}
453
454#define CXGB3_SHOW(name, val_expr) \
455static ssize_t format_##name(struct adapter *adap, char *buf) \
456{ \
457 return sprintf(buf, "%u\n", val_expr); \
458} \
459static ssize_t show_##name(struct class_device *cd, char *buf) \
460{ \
461 return attr_show(cd, buf, format_##name); \
462}
463
464static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
465{
466 if (adap->flags & FULL_INIT_DONE)
467 return -EBUSY;
468 if (val && adap->params.rev == 0)
469 return -EINVAL;
470 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
471 return -EINVAL;
472 adap->params.mc5.nfilters = val;
473 return 0;
474}
475
476static ssize_t store_nfilters(struct class_device *cd, const char *buf,
477 size_t len)
478{
479 return attr_store(cd, buf, len, set_nfilters, 0, ~0);
480}
481
482static ssize_t set_nservers(struct adapter *adap, unsigned int val)
483{
484 if (adap->flags & FULL_INIT_DONE)
485 return -EBUSY;
486 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
487 return -EINVAL;
488 adap->params.mc5.nservers = val;
489 return 0;
490}
491
492static ssize_t store_nservers(struct class_device *cd, const char *buf,
493 size_t len)
494{
495 return attr_store(cd, buf, len, set_nservers, 0, ~0);
496}
497
498#define CXGB3_ATTR_R(name, val_expr) \
499CXGB3_SHOW(name, val_expr) \
500static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
501
502#define CXGB3_ATTR_RW(name, val_expr, store_method) \
503CXGB3_SHOW(name, val_expr) \
504static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
505
506CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
507CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
508CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
509
510static struct attribute *cxgb3_attrs[] = {
511 &class_device_attr_cam_size.attr,
512 &class_device_attr_nfilters.attr,
513 &class_device_attr_nservers.attr,
514 NULL
515};
516
517static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
518
519static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
520{
521 ssize_t len;
522 unsigned int v, addr, bpt, cpt;
523 struct adapter *adap = to_net_dev(cd)->priv;
524
525 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
526 rtnl_lock();
527 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
528 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
529 if (sched & 1)
530 v >>= 16;
531 bpt = (v >> 8) & 0xff;
532 cpt = v & 0xff;
533 if (!cpt)
534 len = sprintf(buf, "disabled\n");
535 else {
536 v = (adap->params.vpd.cclk * 1000) / cpt;
537 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
538 }
539 rtnl_unlock();
540 return len;
541}
542
543static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
544 size_t len, int sched)
545{
546 char *endp;
547 ssize_t ret;
548 unsigned int val;
549 struct adapter *adap = to_net_dev(cd)->priv;
550
551 if (!capable(CAP_NET_ADMIN))
552 return -EPERM;
553
554 val = simple_strtoul(buf, &endp, 0);
555 if (endp == buf || val > 10000000)
556 return -EINVAL;
557
558 rtnl_lock();
559 ret = t3_config_sched(adap, val, sched);
560 if (!ret)
561 ret = len;
562 rtnl_unlock();
563 return ret;
564}
565
566#define TM_ATTR(name, sched) \
567static ssize_t show_##name(struct class_device *cd, char *buf) \
568{ \
569 return tm_attr_show(cd, buf, sched); \
570} \
571static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
572{ \
573 return tm_attr_store(cd, buf, len, sched); \
574} \
575static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
576
577TM_ATTR(sched0, 0);
578TM_ATTR(sched1, 1);
579TM_ATTR(sched2, 2);
580TM_ATTR(sched3, 3);
581TM_ATTR(sched4, 4);
582TM_ATTR(sched5, 5);
583TM_ATTR(sched6, 6);
584TM_ATTR(sched7, 7);
585
586static struct attribute *offload_attrs[] = {
587 &class_device_attr_sched0.attr,
588 &class_device_attr_sched1.attr,
589 &class_device_attr_sched2.attr,
590 &class_device_attr_sched3.attr,
591 &class_device_attr_sched4.attr,
592 &class_device_attr_sched5.attr,
593 &class_device_attr_sched6.attr,
594 &class_device_attr_sched7.attr,
595 NULL
596};
597
598static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
599
600/*
601 * Sends an sk_buff to an offload queue driver
602 * after dealing with any active network taps.
603 */
604static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
605{
606 int ret;
607
608 local_bh_disable();
609 ret = t3_offload_tx(tdev, skb);
610 local_bh_enable();
611 return ret;
612}
613
614static int write_smt_entry(struct adapter *adapter, int idx)
615{
616 struct cpl_smt_write_req *req;
617 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
618
619 if (!skb)
620 return -ENOMEM;
621
622 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
623 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
624 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
625 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
626 req->iff = idx;
627 memset(req->src_mac1, 0, sizeof(req->src_mac1));
628 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
629 skb->priority = 1;
630 offload_tx(&adapter->tdev, skb);
631 return 0;
632}
633
634static int init_smt(struct adapter *adapter)
635{
636 int i;
637
638 for_each_port(adapter, i)
639 write_smt_entry(adapter, i);
640 return 0;
641}
642
643static void init_port_mtus(struct adapter *adapter)
644{
645 unsigned int mtus = adapter->port[0]->mtu;
646
647 if (adapter->port[1])
648 mtus |= adapter->port[1]->mtu << 16;
649 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
650}
651
Divy Le Ray14ab9892007-01-30 19:43:50 -0800652static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
653 int hi, int port)
654{
655 struct sk_buff *skb;
656 struct mngt_pktsched_wr *req;
657
658 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
659 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
660 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
661 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
662 req->sched = sched;
663 req->idx = qidx;
664 req->min = lo;
665 req->max = hi;
666 req->binding = port;
667 t3_mgmt_tx(adap, skb);
668}
669
670static void bind_qsets(struct adapter *adap)
671{
672 int i, j;
673
674 for_each_port(adap, i) {
675 const struct port_info *pi = adap2pinfo(adap, i);
676
677 for (j = 0; j < pi->nqsets; ++j)
678 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
679 -1, i);
680 }
681}
682
Divy Le Ray4d22de32007-01-18 22:04:14 -0500683/**
684 * cxgb_up - enable the adapter
685 * @adapter: adapter being enabled
686 *
687 * Called when the first port is enabled, this function performs the
688 * actions necessary to make an adapter operational, such as completing
689 * the initialization of HW modules, and enabling interrupts.
690 *
691 * Must be called with the rtnl lock held.
692 */
693static int cxgb_up(struct adapter *adap)
694{
695 int err = 0;
696
697 if (!(adap->flags & FULL_INIT_DONE)) {
698 err = t3_check_fw_version(adap);
Divy Le Ray4aac3892007-01-30 19:43:45 -0800699 if (err)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500700 goto out;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500701
702 err = init_dummy_netdevs(adap);
703 if (err)
704 goto out;
705
706 err = t3_init_hw(adap, 0);
707 if (err)
708 goto out;
709
710 err = setup_sge_qsets(adap);
711 if (err)
712 goto out;
713
714 setup_rss(adap);
715 adap->flags |= FULL_INIT_DONE;
716 }
717
718 t3_intr_clear(adap);
719
720 if (adap->flags & USING_MSIX) {
721 name_msix_vecs(adap);
722 err = request_irq(adap->msix_info[0].vec,
723 t3_async_intr_handler, 0,
724 adap->msix_info[0].desc, adap);
725 if (err)
726 goto irq_err;
727
728 if (request_msix_data_irqs(adap)) {
729 free_irq(adap->msix_info[0].vec, adap);
730 goto irq_err;
731 }
732 } else if ((err = request_irq(adap->pdev->irq,
733 t3_intr_handler(adap,
734 adap->sge.qs[0].rspq.
735 polling),
736 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
737 adap->name, adap)))
738 goto irq_err;
739
740 t3_sge_start(adap);
741 t3_intr_enable(adap);
Divy Le Ray14ab9892007-01-30 19:43:50 -0800742
743 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
744 bind_qsets(adap);
745 adap->flags |= QUEUES_BOUND;
746
Divy Le Ray4d22de32007-01-18 22:04:14 -0500747out:
748 return err;
749irq_err:
750 CH_ERR(adap, "request_irq failed, err %d\n", err);
751 goto out;
752}
753
754/*
755 * Release resources when all the ports and offloading have been stopped.
756 */
757static void cxgb_down(struct adapter *adapter)
758{
759 t3_sge_stop(adapter);
760 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
761 t3_intr_disable(adapter);
762 spin_unlock_irq(&adapter->work_lock);
763
764 if (adapter->flags & USING_MSIX) {
765 int i, n = 0;
766
767 free_irq(adapter->msix_info[0].vec, adapter);
768 for_each_port(adapter, i)
769 n += adap2pinfo(adapter, i)->nqsets;
770
771 for (i = 0; i < n; ++i)
772 free_irq(adapter->msix_info[i + 1].vec,
773 &adapter->sge.qs[i]);
774 } else
775 free_irq(adapter->pdev->irq, adapter);
776
777 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
778 quiesce_rx(adapter);
779}
780
781static void schedule_chk_task(struct adapter *adap)
782{
783 unsigned int timeo;
784
785 timeo = adap->params.linkpoll_period ?
786 (HZ * adap->params.linkpoll_period) / 10 :
787 adap->params.stats_update_period * HZ;
788 if (timeo)
789 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
790}
791
792static int offload_open(struct net_device *dev)
793{
794 struct adapter *adapter = dev->priv;
795 struct t3cdev *tdev = T3CDEV(dev);
796 int adap_up = adapter->open_device_map & PORT_MASK;
797 int err = 0;
798
799 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
800 return 0;
801
802 if (!adap_up && (err = cxgb_up(adapter)) < 0)
803 return err;
804
805 t3_tp_set_offload_mode(adapter, 1);
806 tdev->lldev = adapter->port[0];
807 err = cxgb3_offload_activate(adapter);
808 if (err)
809 goto out;
810
811 init_port_mtus(adapter);
812 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
813 adapter->params.b_wnd,
814 adapter->params.rev == 0 ?
815 adapter->port[0]->mtu : 0xffff);
816 init_smt(adapter);
817
818 /* Never mind if the next step fails */
819 sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
820
821 /* Call back all registered clients */
822 cxgb3_add_clients(tdev);
823
824out:
825 /* restore them in case the offload module has changed them */
826 if (err) {
827 t3_tp_set_offload_mode(adapter, 0);
828 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
829 cxgb3_set_dummy_ops(tdev);
830 }
831 return err;
832}
833
834static int offload_close(struct t3cdev *tdev)
835{
836 struct adapter *adapter = tdev2adap(tdev);
837
838 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
839 return 0;
840
841 /* Call back all registered clients */
842 cxgb3_remove_clients(tdev);
843
844 sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
845
846 tdev->lldev = NULL;
847 cxgb3_set_dummy_ops(tdev);
848 t3_tp_set_offload_mode(adapter, 0);
849 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
850
851 if (!adapter->open_device_map)
852 cxgb_down(adapter);
853
854 cxgb3_offload_deactivate(adapter);
855 return 0;
856}
857
858static int cxgb_open(struct net_device *dev)
859{
860 int err;
861 struct adapter *adapter = dev->priv;
862 struct port_info *pi = netdev_priv(dev);
863 int other_ports = adapter->open_device_map & PORT_MASK;
864
865 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
866 return err;
867
868 set_bit(pi->port_id, &adapter->open_device_map);
869 if (!ofld_disable) {
870 err = offload_open(dev);
871 if (err)
872 printk(KERN_WARNING
873 "Could not initialize offload capabilities\n");
874 }
875
876 link_start(dev);
877 t3_port_intr_enable(adapter, pi->port_id);
878 netif_start_queue(dev);
879 if (!other_ports)
880 schedule_chk_task(adapter);
881
882 return 0;
883}
884
885static int cxgb_close(struct net_device *dev)
886{
887 struct adapter *adapter = dev->priv;
888 struct port_info *p = netdev_priv(dev);
889
890 t3_port_intr_disable(adapter, p->port_id);
891 netif_stop_queue(dev);
892 p->phy.ops->power_down(&p->phy, 1);
893 netif_carrier_off(dev);
894 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
895
896 spin_lock(&adapter->work_lock); /* sync with update task */
897 clear_bit(p->port_id, &adapter->open_device_map);
898 spin_unlock(&adapter->work_lock);
899
900 if (!(adapter->open_device_map & PORT_MASK))
901 cancel_rearming_delayed_workqueue(cxgb3_wq,
902 &adapter->adap_check_task);
903
904 if (!adapter->open_device_map)
905 cxgb_down(adapter);
906
907 return 0;
908}
909
910static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
911{
912 struct adapter *adapter = dev->priv;
913 struct port_info *p = netdev_priv(dev);
914 struct net_device_stats *ns = &p->netstats;
915 const struct mac_stats *pstats;
916
917 spin_lock(&adapter->stats_lock);
918 pstats = t3_mac_update_stats(&p->mac);
919 spin_unlock(&adapter->stats_lock);
920
921 ns->tx_bytes = pstats->tx_octets;
922 ns->tx_packets = pstats->tx_frames;
923 ns->rx_bytes = pstats->rx_octets;
924 ns->rx_packets = pstats->rx_frames;
925 ns->multicast = pstats->rx_mcast_frames;
926
927 ns->tx_errors = pstats->tx_underrun;
928 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
929 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
930 pstats->rx_fifo_ovfl;
931
932 /* detailed rx_errors */
933 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
934 ns->rx_over_errors = 0;
935 ns->rx_crc_errors = pstats->rx_fcs_errs;
936 ns->rx_frame_errors = pstats->rx_symbol_errs;
937 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
938 ns->rx_missed_errors = pstats->rx_cong_drops;
939
940 /* detailed tx_errors */
941 ns->tx_aborted_errors = 0;
942 ns->tx_carrier_errors = 0;
943 ns->tx_fifo_errors = pstats->tx_underrun;
944 ns->tx_heartbeat_errors = 0;
945 ns->tx_window_errors = 0;
946 return ns;
947}
948
949static u32 get_msglevel(struct net_device *dev)
950{
951 struct adapter *adapter = dev->priv;
952
953 return adapter->msg_enable;
954}
955
956static void set_msglevel(struct net_device *dev, u32 val)
957{
958 struct adapter *adapter = dev->priv;
959
960 adapter->msg_enable = val;
961}
962
963static char stats_strings[][ETH_GSTRING_LEN] = {
964 "TxOctetsOK ",
965 "TxFramesOK ",
966 "TxMulticastFramesOK",
967 "TxBroadcastFramesOK",
968 "TxPauseFrames ",
969 "TxUnderrun ",
970 "TxExtUnderrun ",
971
972 "TxFrames64 ",
973 "TxFrames65To127 ",
974 "TxFrames128To255 ",
975 "TxFrames256To511 ",
976 "TxFrames512To1023 ",
977 "TxFrames1024To1518 ",
978 "TxFrames1519ToMax ",
979
980 "RxOctetsOK ",
981 "RxFramesOK ",
982 "RxMulticastFramesOK",
983 "RxBroadcastFramesOK",
984 "RxPauseFrames ",
985 "RxFCSErrors ",
986 "RxSymbolErrors ",
987 "RxShortErrors ",
988 "RxJabberErrors ",
989 "RxLengthErrors ",
990 "RxFIFOoverflow ",
991
992 "RxFrames64 ",
993 "RxFrames65To127 ",
994 "RxFrames128To255 ",
995 "RxFrames256To511 ",
996 "RxFrames512To1023 ",
997 "RxFrames1024To1518 ",
998 "RxFrames1519ToMax ",
999
1000 "PhyFIFOErrors ",
1001 "TSO ",
1002 "VLANextractions ",
1003 "VLANinsertions ",
1004 "TxCsumOffload ",
1005 "RxCsumGood ",
1006 "RxDrops "
1007};
1008
1009static int get_stats_count(struct net_device *dev)
1010{
1011 return ARRAY_SIZE(stats_strings);
1012}
1013
1014#define T3_REGMAP_SIZE (3 * 1024)
1015
1016static int get_regs_len(struct net_device *dev)
1017{
1018 return T3_REGMAP_SIZE;
1019}
1020
1021static int get_eeprom_len(struct net_device *dev)
1022{
1023 return EEPROMSIZE;
1024}
1025
1026static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1027{
1028 u32 fw_vers = 0;
1029 struct adapter *adapter = dev->priv;
1030
1031 t3_get_fw_version(adapter, &fw_vers);
1032
1033 strcpy(info->driver, DRV_NAME);
1034 strcpy(info->version, DRV_VERSION);
1035 strcpy(info->bus_info, pci_name(adapter->pdev));
1036 if (!fw_vers)
1037 strcpy(info->fw_version, "N/A");
Divy Le Ray4aac3892007-01-30 19:43:45 -08001038 else {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001039 snprintf(info->fw_version, sizeof(info->fw_version),
Divy Le Ray4aac3892007-01-30 19:43:45 -08001040 "%s %u.%u.%u",
1041 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1042 G_FW_VERSION_MAJOR(fw_vers),
1043 G_FW_VERSION_MINOR(fw_vers),
1044 G_FW_VERSION_MICRO(fw_vers));
1045 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001046}
1047
1048static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1049{
1050 if (stringset == ETH_SS_STATS)
1051 memcpy(data, stats_strings, sizeof(stats_strings));
1052}
1053
1054static unsigned long collect_sge_port_stats(struct adapter *adapter,
1055 struct port_info *p, int idx)
1056{
1057 int i;
1058 unsigned long tot = 0;
1059
1060 for (i = 0; i < p->nqsets; ++i)
1061 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1062 return tot;
1063}
1064
1065static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1066 u64 *data)
1067{
1068 struct adapter *adapter = dev->priv;
1069 struct port_info *pi = netdev_priv(dev);
1070 const struct mac_stats *s;
1071
1072 spin_lock(&adapter->stats_lock);
1073 s = t3_mac_update_stats(&pi->mac);
1074 spin_unlock(&adapter->stats_lock);
1075
1076 *data++ = s->tx_octets;
1077 *data++ = s->tx_frames;
1078 *data++ = s->tx_mcast_frames;
1079 *data++ = s->tx_bcast_frames;
1080 *data++ = s->tx_pause;
1081 *data++ = s->tx_underrun;
1082 *data++ = s->tx_fifo_urun;
1083
1084 *data++ = s->tx_frames_64;
1085 *data++ = s->tx_frames_65_127;
1086 *data++ = s->tx_frames_128_255;
1087 *data++ = s->tx_frames_256_511;
1088 *data++ = s->tx_frames_512_1023;
1089 *data++ = s->tx_frames_1024_1518;
1090 *data++ = s->tx_frames_1519_max;
1091
1092 *data++ = s->rx_octets;
1093 *data++ = s->rx_frames;
1094 *data++ = s->rx_mcast_frames;
1095 *data++ = s->rx_bcast_frames;
1096 *data++ = s->rx_pause;
1097 *data++ = s->rx_fcs_errs;
1098 *data++ = s->rx_symbol_errs;
1099 *data++ = s->rx_short;
1100 *data++ = s->rx_jabber;
1101 *data++ = s->rx_too_long;
1102 *data++ = s->rx_fifo_ovfl;
1103
1104 *data++ = s->rx_frames_64;
1105 *data++ = s->rx_frames_65_127;
1106 *data++ = s->rx_frames_128_255;
1107 *data++ = s->rx_frames_256_511;
1108 *data++ = s->rx_frames_512_1023;
1109 *data++ = s->rx_frames_1024_1518;
1110 *data++ = s->rx_frames_1519_max;
1111
1112 *data++ = pi->phy.fifo_errors;
1113
1114 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1115 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1116 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1117 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1118 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1119 *data++ = s->rx_cong_drops;
1120}
1121
1122static inline void reg_block_dump(struct adapter *ap, void *buf,
1123 unsigned int start, unsigned int end)
1124{
1125 u32 *p = buf + start;
1126
1127 for (; start <= end; start += sizeof(u32))
1128 *p++ = t3_read_reg(ap, start);
1129}
1130
1131static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1132 void *buf)
1133{
1134 struct adapter *ap = dev->priv;
1135
1136 /*
1137 * Version scheme:
1138 * bits 0..9: chip version
1139 * bits 10..15: chip revision
1140 * bit 31: set for PCIe cards
1141 */
1142 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1143
1144 /*
1145 * We skip the MAC statistics registers because they are clear-on-read.
1146 * Also reading multi-register stats would need to synchronize with the
1147 * periodic mac stats accumulation. Hard to justify the complexity.
1148 */
1149 memset(buf, 0, T3_REGMAP_SIZE);
1150 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1151 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1152 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1153 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1154 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1155 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1156 XGM_REG(A_XGM_SERDES_STAT3, 1));
1157 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1158 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1159}
1160
1161static int restart_autoneg(struct net_device *dev)
1162{
1163 struct port_info *p = netdev_priv(dev);
1164
1165 if (!netif_running(dev))
1166 return -EAGAIN;
1167 if (p->link_config.autoneg != AUTONEG_ENABLE)
1168 return -EINVAL;
1169 p->phy.ops->autoneg_restart(&p->phy);
1170 return 0;
1171}
1172
1173static int cxgb3_phys_id(struct net_device *dev, u32 data)
1174{
1175 int i;
1176 struct adapter *adapter = dev->priv;
1177
1178 if (data == 0)
1179 data = 2;
1180
1181 for (i = 0; i < data * 2; i++) {
1182 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1183 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1184 if (msleep_interruptible(500))
1185 break;
1186 }
1187 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1188 F_GPIO0_OUT_VAL);
1189 return 0;
1190}
1191
1192static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1193{
1194 struct port_info *p = netdev_priv(dev);
1195
1196 cmd->supported = p->link_config.supported;
1197 cmd->advertising = p->link_config.advertising;
1198
1199 if (netif_carrier_ok(dev)) {
1200 cmd->speed = p->link_config.speed;
1201 cmd->duplex = p->link_config.duplex;
1202 } else {
1203 cmd->speed = -1;
1204 cmd->duplex = -1;
1205 }
1206
1207 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1208 cmd->phy_address = p->phy.addr;
1209 cmd->transceiver = XCVR_EXTERNAL;
1210 cmd->autoneg = p->link_config.autoneg;
1211 cmd->maxtxpkt = 0;
1212 cmd->maxrxpkt = 0;
1213 return 0;
1214}
1215
1216static int speed_duplex_to_caps(int speed, int duplex)
1217{
1218 int cap = 0;
1219
1220 switch (speed) {
1221 case SPEED_10:
1222 if (duplex == DUPLEX_FULL)
1223 cap = SUPPORTED_10baseT_Full;
1224 else
1225 cap = SUPPORTED_10baseT_Half;
1226 break;
1227 case SPEED_100:
1228 if (duplex == DUPLEX_FULL)
1229 cap = SUPPORTED_100baseT_Full;
1230 else
1231 cap = SUPPORTED_100baseT_Half;
1232 break;
1233 case SPEED_1000:
1234 if (duplex == DUPLEX_FULL)
1235 cap = SUPPORTED_1000baseT_Full;
1236 else
1237 cap = SUPPORTED_1000baseT_Half;
1238 break;
1239 case SPEED_10000:
1240 if (duplex == DUPLEX_FULL)
1241 cap = SUPPORTED_10000baseT_Full;
1242 }
1243 return cap;
1244}
1245
1246#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1247 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1248 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1249 ADVERTISED_10000baseT_Full)
1250
1251static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1252{
1253 struct port_info *p = netdev_priv(dev);
1254 struct link_config *lc = &p->link_config;
1255
1256 if (!(lc->supported & SUPPORTED_Autoneg))
1257 return -EOPNOTSUPP; /* can't change speed/duplex */
1258
1259 if (cmd->autoneg == AUTONEG_DISABLE) {
1260 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1261
1262 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1263 return -EINVAL;
1264 lc->requested_speed = cmd->speed;
1265 lc->requested_duplex = cmd->duplex;
1266 lc->advertising = 0;
1267 } else {
1268 cmd->advertising &= ADVERTISED_MASK;
1269 cmd->advertising &= lc->supported;
1270 if (!cmd->advertising)
1271 return -EINVAL;
1272 lc->requested_speed = SPEED_INVALID;
1273 lc->requested_duplex = DUPLEX_INVALID;
1274 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1275 }
1276 lc->autoneg = cmd->autoneg;
1277 if (netif_running(dev))
1278 t3_link_start(&p->phy, &p->mac, lc);
1279 return 0;
1280}
1281
1282static void get_pauseparam(struct net_device *dev,
1283 struct ethtool_pauseparam *epause)
1284{
1285 struct port_info *p = netdev_priv(dev);
1286
1287 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1288 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1289 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1290}
1291
1292static int set_pauseparam(struct net_device *dev,
1293 struct ethtool_pauseparam *epause)
1294{
1295 struct port_info *p = netdev_priv(dev);
1296 struct link_config *lc = &p->link_config;
1297
1298 if (epause->autoneg == AUTONEG_DISABLE)
1299 lc->requested_fc = 0;
1300 else if (lc->supported & SUPPORTED_Autoneg)
1301 lc->requested_fc = PAUSE_AUTONEG;
1302 else
1303 return -EINVAL;
1304
1305 if (epause->rx_pause)
1306 lc->requested_fc |= PAUSE_RX;
1307 if (epause->tx_pause)
1308 lc->requested_fc |= PAUSE_TX;
1309 if (lc->autoneg == AUTONEG_ENABLE) {
1310 if (netif_running(dev))
1311 t3_link_start(&p->phy, &p->mac, lc);
1312 } else {
1313 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1314 if (netif_running(dev))
1315 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1316 }
1317 return 0;
1318}
1319
1320static u32 get_rx_csum(struct net_device *dev)
1321{
1322 struct port_info *p = netdev_priv(dev);
1323
1324 return p->rx_csum_offload;
1325}
1326
1327static int set_rx_csum(struct net_device *dev, u32 data)
1328{
1329 struct port_info *p = netdev_priv(dev);
1330
1331 p->rx_csum_offload = data;
1332 return 0;
1333}
1334
1335static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1336{
1337 struct adapter *adapter = dev->priv;
1338
1339 e->rx_max_pending = MAX_RX_BUFFERS;
1340 e->rx_mini_max_pending = 0;
1341 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1342 e->tx_max_pending = MAX_TXQ_ENTRIES;
1343
1344 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1345 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1346 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1347 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1348}
1349
1350static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1351{
1352 int i;
1353 struct adapter *adapter = dev->priv;
1354
1355 if (e->rx_pending > MAX_RX_BUFFERS ||
1356 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1357 e->tx_pending > MAX_TXQ_ENTRIES ||
1358 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1359 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1360 e->rx_pending < MIN_FL_ENTRIES ||
1361 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1362 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1363 return -EINVAL;
1364
1365 if (adapter->flags & FULL_INIT_DONE)
1366 return -EBUSY;
1367
1368 for (i = 0; i < SGE_QSETS; ++i) {
1369 struct qset_params *q = &adapter->params.sge.qset[i];
1370
1371 q->rspq_size = e->rx_mini_pending;
1372 q->fl_size = e->rx_pending;
1373 q->jumbo_size = e->rx_jumbo_pending;
1374 q->txq_size[0] = e->tx_pending;
1375 q->txq_size[1] = e->tx_pending;
1376 q->txq_size[2] = e->tx_pending;
1377 }
1378 return 0;
1379}
1380
1381static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1382{
1383 struct adapter *adapter = dev->priv;
1384 struct qset_params *qsp = &adapter->params.sge.qset[0];
1385 struct sge_qset *qs = &adapter->sge.qs[0];
1386
1387 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1388 return -EINVAL;
1389
1390 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1391 t3_update_qset_coalesce(qs, qsp);
1392 return 0;
1393}
1394
1395static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1396{
1397 struct adapter *adapter = dev->priv;
1398 struct qset_params *q = adapter->params.sge.qset;
1399
1400 c->rx_coalesce_usecs = q->coalesce_usecs;
1401 return 0;
1402}
1403
1404static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1405 u8 * data)
1406{
1407 int i, err = 0;
1408 struct adapter *adapter = dev->priv;
1409
1410 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1411 if (!buf)
1412 return -ENOMEM;
1413
1414 e->magic = EEPROM_MAGIC;
1415 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1416 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1417
1418 if (!err)
1419 memcpy(data, buf + e->offset, e->len);
1420 kfree(buf);
1421 return err;
1422}
1423
1424static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1425 u8 * data)
1426{
1427 u8 *buf;
1428 int err = 0;
1429 u32 aligned_offset, aligned_len, *p;
1430 struct adapter *adapter = dev->priv;
1431
1432 if (eeprom->magic != EEPROM_MAGIC)
1433 return -EINVAL;
1434
1435 aligned_offset = eeprom->offset & ~3;
1436 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1437
1438 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1439 buf = kmalloc(aligned_len, GFP_KERNEL);
1440 if (!buf)
1441 return -ENOMEM;
1442 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1443 if (!err && aligned_len > 4)
1444 err = t3_seeprom_read(adapter,
1445 aligned_offset + aligned_len - 4,
1446 (u32 *) & buf[aligned_len - 4]);
1447 if (err)
1448 goto out;
1449 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1450 } else
1451 buf = data;
1452
1453 err = t3_seeprom_wp(adapter, 0);
1454 if (err)
1455 goto out;
1456
1457 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1458 err = t3_seeprom_write(adapter, aligned_offset, *p);
1459 aligned_offset += 4;
1460 }
1461
1462 if (!err)
1463 err = t3_seeprom_wp(adapter, 1);
1464out:
1465 if (buf != data)
1466 kfree(buf);
1467 return err;
1468}
1469
1470static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1471{
1472 wol->supported = 0;
1473 wol->wolopts = 0;
1474 memset(&wol->sopass, 0, sizeof(wol->sopass));
1475}
1476
1477static const struct ethtool_ops cxgb_ethtool_ops = {
1478 .get_settings = get_settings,
1479 .set_settings = set_settings,
1480 .get_drvinfo = get_drvinfo,
1481 .get_msglevel = get_msglevel,
1482 .set_msglevel = set_msglevel,
1483 .get_ringparam = get_sge_param,
1484 .set_ringparam = set_sge_param,
1485 .get_coalesce = get_coalesce,
1486 .set_coalesce = set_coalesce,
1487 .get_eeprom_len = get_eeprom_len,
1488 .get_eeprom = get_eeprom,
1489 .set_eeprom = set_eeprom,
1490 .get_pauseparam = get_pauseparam,
1491 .set_pauseparam = set_pauseparam,
1492 .get_rx_csum = get_rx_csum,
1493 .set_rx_csum = set_rx_csum,
1494 .get_tx_csum = ethtool_op_get_tx_csum,
1495 .set_tx_csum = ethtool_op_set_tx_csum,
1496 .get_sg = ethtool_op_get_sg,
1497 .set_sg = ethtool_op_set_sg,
1498 .get_link = ethtool_op_get_link,
1499 .get_strings = get_strings,
1500 .phys_id = cxgb3_phys_id,
1501 .nway_reset = restart_autoneg,
1502 .get_stats_count = get_stats_count,
1503 .get_ethtool_stats = get_stats,
1504 .get_regs_len = get_regs_len,
1505 .get_regs = get_regs,
1506 .get_wol = get_wol,
1507 .get_tso = ethtool_op_get_tso,
1508 .set_tso = ethtool_op_set_tso,
1509 .get_perm_addr = ethtool_op_get_perm_addr
1510};
1511
1512static int in_range(int val, int lo, int hi)
1513{
1514 return val < 0 || (val <= hi && val >= lo);
1515}
1516
1517static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1518{
1519 int ret;
1520 u32 cmd;
1521 struct adapter *adapter = dev->priv;
1522
1523 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1524 return -EFAULT;
1525
1526 switch (cmd) {
1527 case CHELSIO_SETREG:{
1528 struct ch_reg edata;
1529
1530 if (!capable(CAP_NET_ADMIN))
1531 return -EPERM;
1532 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1533 return -EFAULT;
1534 if ((edata.addr & 3) != 0
1535 || edata.addr >= adapter->mmio_len)
1536 return -EINVAL;
1537 writel(edata.val, adapter->regs + edata.addr);
1538 break;
1539 }
1540 case CHELSIO_GETREG:{
1541 struct ch_reg edata;
1542
1543 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1544 return -EFAULT;
1545 if ((edata.addr & 3) != 0
1546 || edata.addr >= adapter->mmio_len)
1547 return -EINVAL;
1548 edata.val = readl(adapter->regs + edata.addr);
1549 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1550 return -EFAULT;
1551 break;
1552 }
1553 case CHELSIO_SET_QSET_PARAMS:{
1554 int i;
1555 struct qset_params *q;
1556 struct ch_qset_params t;
1557
1558 if (!capable(CAP_NET_ADMIN))
1559 return -EPERM;
1560 if (copy_from_user(&t, useraddr, sizeof(t)))
1561 return -EFAULT;
1562 if (t.qset_idx >= SGE_QSETS)
1563 return -EINVAL;
1564 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1565 !in_range(t.cong_thres, 0, 255) ||
1566 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1567 MAX_TXQ_ENTRIES) ||
1568 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1569 MAX_TXQ_ENTRIES) ||
1570 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1571 MAX_CTRL_TXQ_ENTRIES) ||
1572 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1573 MAX_RX_BUFFERS)
1574 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1575 MAX_RX_JUMBO_BUFFERS)
1576 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1577 MAX_RSPQ_ENTRIES))
1578 return -EINVAL;
1579 if ((adapter->flags & FULL_INIT_DONE) &&
1580 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1581 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1582 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1583 t.polling >= 0 || t.cong_thres >= 0))
1584 return -EBUSY;
1585
1586 q = &adapter->params.sge.qset[t.qset_idx];
1587
1588 if (t.rspq_size >= 0)
1589 q->rspq_size = t.rspq_size;
1590 if (t.fl_size[0] >= 0)
1591 q->fl_size = t.fl_size[0];
1592 if (t.fl_size[1] >= 0)
1593 q->jumbo_size = t.fl_size[1];
1594 if (t.txq_size[0] >= 0)
1595 q->txq_size[0] = t.txq_size[0];
1596 if (t.txq_size[1] >= 0)
1597 q->txq_size[1] = t.txq_size[1];
1598 if (t.txq_size[2] >= 0)
1599 q->txq_size[2] = t.txq_size[2];
1600 if (t.cong_thres >= 0)
1601 q->cong_thres = t.cong_thres;
1602 if (t.intr_lat >= 0) {
1603 struct sge_qset *qs =
1604 &adapter->sge.qs[t.qset_idx];
1605
1606 q->coalesce_usecs = t.intr_lat;
1607 t3_update_qset_coalesce(qs, q);
1608 }
1609 if (t.polling >= 0) {
1610 if (adapter->flags & USING_MSIX)
1611 q->polling = t.polling;
1612 else {
1613 /* No polling with INTx for T3A */
1614 if (adapter->params.rev == 0 &&
1615 !(adapter->flags & USING_MSI))
1616 t.polling = 0;
1617
1618 for (i = 0; i < SGE_QSETS; i++) {
1619 q = &adapter->params.sge.
1620 qset[i];
1621 q->polling = t.polling;
1622 }
1623 }
1624 }
1625 break;
1626 }
1627 case CHELSIO_GET_QSET_PARAMS:{
1628 struct qset_params *q;
1629 struct ch_qset_params t;
1630
1631 if (copy_from_user(&t, useraddr, sizeof(t)))
1632 return -EFAULT;
1633 if (t.qset_idx >= SGE_QSETS)
1634 return -EINVAL;
1635
1636 q = &adapter->params.sge.qset[t.qset_idx];
1637 t.rspq_size = q->rspq_size;
1638 t.txq_size[0] = q->txq_size[0];
1639 t.txq_size[1] = q->txq_size[1];
1640 t.txq_size[2] = q->txq_size[2];
1641 t.fl_size[0] = q->fl_size;
1642 t.fl_size[1] = q->jumbo_size;
1643 t.polling = q->polling;
1644 t.intr_lat = q->coalesce_usecs;
1645 t.cong_thres = q->cong_thres;
1646
1647 if (copy_to_user(useraddr, &t, sizeof(t)))
1648 return -EFAULT;
1649 break;
1650 }
1651 case CHELSIO_SET_QSET_NUM:{
1652 struct ch_reg edata;
1653 struct port_info *pi = netdev_priv(dev);
1654 unsigned int i, first_qset = 0, other_qsets = 0;
1655
1656 if (!capable(CAP_NET_ADMIN))
1657 return -EPERM;
1658 if (adapter->flags & FULL_INIT_DONE)
1659 return -EBUSY;
1660 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1661 return -EFAULT;
1662 if (edata.val < 1 ||
1663 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1664 return -EINVAL;
1665
1666 for_each_port(adapter, i)
1667 if (adapter->port[i] && adapter->port[i] != dev)
1668 other_qsets += adap2pinfo(adapter, i)->nqsets;
1669
1670 if (edata.val + other_qsets > SGE_QSETS)
1671 return -EINVAL;
1672
1673 pi->nqsets = edata.val;
1674
1675 for_each_port(adapter, i)
1676 if (adapter->port[i]) {
1677 pi = adap2pinfo(adapter, i);
1678 pi->first_qset = first_qset;
1679 first_qset += pi->nqsets;
1680 }
1681 break;
1682 }
1683 case CHELSIO_GET_QSET_NUM:{
1684 struct ch_reg edata;
1685 struct port_info *pi = netdev_priv(dev);
1686
1687 edata.cmd = CHELSIO_GET_QSET_NUM;
1688 edata.val = pi->nqsets;
1689 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1690 return -EFAULT;
1691 break;
1692 }
1693 case CHELSIO_LOAD_FW:{
1694 u8 *fw_data;
1695 struct ch_mem_range t;
1696
1697 if (!capable(CAP_NET_ADMIN))
1698 return -EPERM;
1699 if (copy_from_user(&t, useraddr, sizeof(t)))
1700 return -EFAULT;
1701
1702 fw_data = kmalloc(t.len, GFP_KERNEL);
1703 if (!fw_data)
1704 return -ENOMEM;
1705
1706 if (copy_from_user
1707 (fw_data, useraddr + sizeof(t), t.len)) {
1708 kfree(fw_data);
1709 return -EFAULT;
1710 }
1711
1712 ret = t3_load_fw(adapter, fw_data, t.len);
1713 kfree(fw_data);
1714 if (ret)
1715 return ret;
1716 break;
1717 }
1718 case CHELSIO_SETMTUTAB:{
1719 struct ch_mtus m;
1720 int i;
1721
1722 if (!is_offload(adapter))
1723 return -EOPNOTSUPP;
1724 if (!capable(CAP_NET_ADMIN))
1725 return -EPERM;
1726 if (offload_running(adapter))
1727 return -EBUSY;
1728 if (copy_from_user(&m, useraddr, sizeof(m)))
1729 return -EFAULT;
1730 if (m.nmtus != NMTUS)
1731 return -EINVAL;
1732 if (m.mtus[0] < 81) /* accommodate SACK */
1733 return -EINVAL;
1734
1735 /* MTUs must be in ascending order */
1736 for (i = 1; i < NMTUS; ++i)
1737 if (m.mtus[i] < m.mtus[i - 1])
1738 return -EINVAL;
1739
1740 memcpy(adapter->params.mtus, m.mtus,
1741 sizeof(adapter->params.mtus));
1742 break;
1743 }
1744 case CHELSIO_GET_PM:{
1745 struct tp_params *p = &adapter->params.tp;
1746 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1747
1748 if (!is_offload(adapter))
1749 return -EOPNOTSUPP;
1750 m.tx_pg_sz = p->tx_pg_size;
1751 m.tx_num_pg = p->tx_num_pgs;
1752 m.rx_pg_sz = p->rx_pg_size;
1753 m.rx_num_pg = p->rx_num_pgs;
1754 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1755 if (copy_to_user(useraddr, &m, sizeof(m)))
1756 return -EFAULT;
1757 break;
1758 }
1759 case CHELSIO_SET_PM:{
1760 struct ch_pm m;
1761 struct tp_params *p = &adapter->params.tp;
1762
1763 if (!is_offload(adapter))
1764 return -EOPNOTSUPP;
1765 if (!capable(CAP_NET_ADMIN))
1766 return -EPERM;
1767 if (adapter->flags & FULL_INIT_DONE)
1768 return -EBUSY;
1769 if (copy_from_user(&m, useraddr, sizeof(m)))
1770 return -EFAULT;
1771 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1772 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1773 return -EINVAL; /* not power of 2 */
1774 if (!(m.rx_pg_sz & 0x14000))
1775 return -EINVAL; /* not 16KB or 64KB */
1776 if (!(m.tx_pg_sz & 0x1554000))
1777 return -EINVAL;
1778 if (m.tx_num_pg == -1)
1779 m.tx_num_pg = p->tx_num_pgs;
1780 if (m.rx_num_pg == -1)
1781 m.rx_num_pg = p->rx_num_pgs;
1782 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1783 return -EINVAL;
1784 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1785 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1786 return -EINVAL;
1787 p->rx_pg_size = m.rx_pg_sz;
1788 p->tx_pg_size = m.tx_pg_sz;
1789 p->rx_num_pgs = m.rx_num_pg;
1790 p->tx_num_pgs = m.tx_num_pg;
1791 break;
1792 }
1793 case CHELSIO_GET_MEM:{
1794 struct ch_mem_range t;
1795 struct mc7 *mem;
1796 u64 buf[32];
1797
1798 if (!is_offload(adapter))
1799 return -EOPNOTSUPP;
1800 if (!(adapter->flags & FULL_INIT_DONE))
1801 return -EIO; /* need the memory controllers */
1802 if (copy_from_user(&t, useraddr, sizeof(t)))
1803 return -EFAULT;
1804 if ((t.addr & 7) || (t.len & 7))
1805 return -EINVAL;
1806 if (t.mem_id == MEM_CM)
1807 mem = &adapter->cm;
1808 else if (t.mem_id == MEM_PMRX)
1809 mem = &adapter->pmrx;
1810 else if (t.mem_id == MEM_PMTX)
1811 mem = &adapter->pmtx;
1812 else
1813 return -EINVAL;
1814
1815 /*
1816 * Version scheme:
1817 * bits 0..9: chip version
1818 * bits 10..15: chip revision
1819 */
1820 t.version = 3 | (adapter->params.rev << 10);
1821 if (copy_to_user(useraddr, &t, sizeof(t)))
1822 return -EFAULT;
1823
1824 /*
1825 * Read 256 bytes at a time as len can be large and we don't
1826 * want to use huge intermediate buffers.
1827 */
1828 useraddr += sizeof(t); /* advance to start of buffer */
1829 while (t.len) {
1830 unsigned int chunk =
1831 min_t(unsigned int, t.len, sizeof(buf));
1832
1833 ret =
1834 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1835 buf);
1836 if (ret)
1837 return ret;
1838 if (copy_to_user(useraddr, buf, chunk))
1839 return -EFAULT;
1840 useraddr += chunk;
1841 t.addr += chunk;
1842 t.len -= chunk;
1843 }
1844 break;
1845 }
1846 case CHELSIO_SET_TRACE_FILTER:{
1847 struct ch_trace t;
1848 const struct trace_params *tp;
1849
1850 if (!capable(CAP_NET_ADMIN))
1851 return -EPERM;
1852 if (!offload_running(adapter))
1853 return -EAGAIN;
1854 if (copy_from_user(&t, useraddr, sizeof(t)))
1855 return -EFAULT;
1856
1857 tp = (const struct trace_params *)&t.sip;
1858 if (t.config_tx)
1859 t3_config_trace_filter(adapter, tp, 0,
1860 t.invert_match,
1861 t.trace_tx);
1862 if (t.config_rx)
1863 t3_config_trace_filter(adapter, tp, 1,
1864 t.invert_match,
1865 t.trace_rx);
1866 break;
1867 }
1868 case CHELSIO_SET_PKTSCHED:{
Divy Le Ray4d22de32007-01-18 22:04:14 -05001869 struct ch_pktsched_params p;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001870
Divy Le Ray14ab9892007-01-30 19:43:50 -08001871 if (!capable(CAP_NET_ADMIN))
1872 return -EPERM;
1873 if (!adapter->open_device_map)
1874 return -EAGAIN; /* uP and SGE must be running */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001875 if (copy_from_user(&p, useraddr, sizeof(p)))
Divy Le Ray14ab9892007-01-30 19:43:50 -08001876 return -EFAULT;
1877 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1878 p.binding);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001879 break;
Divy Le Ray14ab9892007-01-30 19:43:50 -08001880
Divy Le Ray4d22de32007-01-18 22:04:14 -05001881 }
1882 default:
1883 return -EOPNOTSUPP;
1884 }
1885 return 0;
1886}
1887
1888static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1889{
1890 int ret, mmd;
1891 struct adapter *adapter = dev->priv;
1892 struct port_info *pi = netdev_priv(dev);
1893 struct mii_ioctl_data *data = if_mii(req);
1894
1895 switch (cmd) {
1896 case SIOCGMIIPHY:
1897 data->phy_id = pi->phy.addr;
1898 /* FALLTHRU */
1899 case SIOCGMIIREG:{
1900 u32 val;
1901 struct cphy *phy = &pi->phy;
1902
1903 if (!phy->mdio_read)
1904 return -EOPNOTSUPP;
1905 if (is_10G(adapter)) {
1906 mmd = data->phy_id >> 8;
1907 if (!mmd)
1908 mmd = MDIO_DEV_PCS;
1909 else if (mmd > MDIO_DEV_XGXS)
1910 return -EINVAL;
1911
1912 ret =
1913 phy->mdio_read(adapter, data->phy_id & 0x1f,
1914 mmd, data->reg_num, &val);
1915 } else
1916 ret =
1917 phy->mdio_read(adapter, data->phy_id & 0x1f,
1918 0, data->reg_num & 0x1f,
1919 &val);
1920 if (!ret)
1921 data->val_out = val;
1922 break;
1923 }
1924 case SIOCSMIIREG:{
1925 struct cphy *phy = &pi->phy;
1926
1927 if (!capable(CAP_NET_ADMIN))
1928 return -EPERM;
1929 if (!phy->mdio_write)
1930 return -EOPNOTSUPP;
1931 if (is_10G(adapter)) {
1932 mmd = data->phy_id >> 8;
1933 if (!mmd)
1934 mmd = MDIO_DEV_PCS;
1935 else if (mmd > MDIO_DEV_XGXS)
1936 return -EINVAL;
1937
1938 ret =
1939 phy->mdio_write(adapter,
1940 data->phy_id & 0x1f, mmd,
1941 data->reg_num,
1942 data->val_in);
1943 } else
1944 ret =
1945 phy->mdio_write(adapter,
1946 data->phy_id & 0x1f, 0,
1947 data->reg_num & 0x1f,
1948 data->val_in);
1949 break;
1950 }
1951 case SIOCCHIOCTL:
1952 return cxgb_extension_ioctl(dev, req->ifr_data);
1953 default:
1954 return -EOPNOTSUPP;
1955 }
1956 return ret;
1957}
1958
1959static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1960{
1961 int ret;
1962 struct adapter *adapter = dev->priv;
1963 struct port_info *pi = netdev_priv(dev);
1964
1965 if (new_mtu < 81) /* accommodate SACK */
1966 return -EINVAL;
1967 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1968 return ret;
1969 dev->mtu = new_mtu;
1970 init_port_mtus(adapter);
1971 if (adapter->params.rev == 0 && offload_running(adapter))
1972 t3_load_mtus(adapter, adapter->params.mtus,
1973 adapter->params.a_wnd, adapter->params.b_wnd,
1974 adapter->port[0]->mtu);
1975 return 0;
1976}
1977
1978static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1979{
1980 struct adapter *adapter = dev->priv;
1981 struct port_info *pi = netdev_priv(dev);
1982 struct sockaddr *addr = p;
1983
1984 if (!is_valid_ether_addr(addr->sa_data))
1985 return -EINVAL;
1986
1987 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1988 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
1989 if (offload_running(adapter))
1990 write_smt_entry(adapter, pi->port_id);
1991 return 0;
1992}
1993
1994/**
1995 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1996 * @adap: the adapter
1997 * @p: the port
1998 *
1999 * Ensures that current Rx processing on any of the queues associated with
2000 * the given port completes before returning. We do this by acquiring and
2001 * releasing the locks of the response queues associated with the port.
2002 */
2003static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2004{
2005 int i;
2006
2007 for (i = 0; i < p->nqsets; i++) {
2008 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2009
2010 spin_lock_irq(&q->lock);
2011 spin_unlock_irq(&q->lock);
2012 }
2013}
2014
2015static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2016{
2017 struct adapter *adapter = dev->priv;
2018 struct port_info *pi = netdev_priv(dev);
2019
2020 pi->vlan_grp = grp;
2021 if (adapter->params.rev > 0)
2022 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2023 else {
2024 /* single control for all ports */
2025 unsigned int i, have_vlans = 0;
2026 for_each_port(adapter, i)
2027 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2028
2029 t3_set_vlan_accel(adapter, 1, have_vlans);
2030 }
2031 t3_synchronize_rx(adapter, pi);
2032}
2033
2034static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2035{
2036 /* nothing */
2037}
2038
2039#ifdef CONFIG_NET_POLL_CONTROLLER
2040static void cxgb_netpoll(struct net_device *dev)
2041{
2042 struct adapter *adapter = dev->priv;
2043 struct sge_qset *qs = dev2qset(dev);
2044
2045 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2046 adapter);
2047}
2048#endif
2049
2050/*
2051 * Periodic accumulation of MAC statistics.
2052 */
2053static void mac_stats_update(struct adapter *adapter)
2054{
2055 int i;
2056
2057 for_each_port(adapter, i) {
2058 struct net_device *dev = adapter->port[i];
2059 struct port_info *p = netdev_priv(dev);
2060
2061 if (netif_running(dev)) {
2062 spin_lock(&adapter->stats_lock);
2063 t3_mac_update_stats(&p->mac);
2064 spin_unlock(&adapter->stats_lock);
2065 }
2066 }
2067}
2068
2069static void check_link_status(struct adapter *adapter)
2070{
2071 int i;
2072
2073 for_each_port(adapter, i) {
2074 struct net_device *dev = adapter->port[i];
2075 struct port_info *p = netdev_priv(dev);
2076
2077 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2078 t3_link_changed(adapter, i);
2079 }
2080}
2081
2082static void t3_adap_check_task(struct work_struct *work)
2083{
2084 struct adapter *adapter = container_of(work, struct adapter,
2085 adap_check_task.work);
2086 const struct adapter_params *p = &adapter->params;
2087
2088 adapter->check_task_cnt++;
2089
2090 /* Check link status for PHYs without interrupts */
2091 if (p->linkpoll_period)
2092 check_link_status(adapter);
2093
2094 /* Accumulate MAC stats if needed */
2095 if (!p->linkpoll_period ||
2096 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2097 p->stats_update_period) {
2098 mac_stats_update(adapter);
2099 adapter->check_task_cnt = 0;
2100 }
2101
2102 /* Schedule the next check update if any port is active. */
2103 spin_lock(&adapter->work_lock);
2104 if (adapter->open_device_map & PORT_MASK)
2105 schedule_chk_task(adapter);
2106 spin_unlock(&adapter->work_lock);
2107}
2108
2109/*
2110 * Processes external (PHY) interrupts in process context.
2111 */
2112static void ext_intr_task(struct work_struct *work)
2113{
2114 struct adapter *adapter = container_of(work, struct adapter,
2115 ext_intr_handler_task);
2116
2117 t3_phy_intr_handler(adapter);
2118
2119 /* Now reenable external interrupts */
2120 spin_lock_irq(&adapter->work_lock);
2121 if (adapter->slow_intr_mask) {
2122 adapter->slow_intr_mask |= F_T3DBG;
2123 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2124 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2125 adapter->slow_intr_mask);
2126 }
2127 spin_unlock_irq(&adapter->work_lock);
2128}
2129
2130/*
2131 * Interrupt-context handler for external (PHY) interrupts.
2132 */
2133void t3_os_ext_intr_handler(struct adapter *adapter)
2134{
2135 /*
2136 * Schedule a task to handle external interrupts as they may be slow
2137 * and we use a mutex to protect MDIO registers. We disable PHY
2138 * interrupts in the meantime and let the task reenable them when
2139 * it's done.
2140 */
2141 spin_lock(&adapter->work_lock);
2142 if (adapter->slow_intr_mask) {
2143 adapter->slow_intr_mask &= ~F_T3DBG;
2144 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2145 adapter->slow_intr_mask);
2146 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2147 }
2148 spin_unlock(&adapter->work_lock);
2149}
2150
2151void t3_fatal_err(struct adapter *adapter)
2152{
2153 unsigned int fw_status[4];
2154
2155 if (adapter->flags & FULL_INIT_DONE) {
2156 t3_sge_stop(adapter);
2157 t3_intr_disable(adapter);
2158 }
2159 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2160 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2161 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2162 fw_status[0], fw_status[1],
2163 fw_status[2], fw_status[3]);
2164
2165}
2166
2167static int __devinit cxgb_enable_msix(struct adapter *adap)
2168{
2169 struct msix_entry entries[SGE_QSETS + 1];
2170 int i, err;
2171
2172 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2173 entries[i].entry = i;
2174
2175 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2176 if (!err) {
2177 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2178 adap->msix_info[i].vec = entries[i].vector;
2179 } else if (err > 0)
2180 dev_info(&adap->pdev->dev,
2181 "only %d MSI-X vectors left, not using MSI-X\n", err);
2182 return err;
2183}
2184
2185static void __devinit print_port_info(struct adapter *adap,
2186 const struct adapter_info *ai)
2187{
2188 static const char *pci_variant[] = {
2189 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2190 };
2191
2192 int i;
2193 char buf[80];
2194
2195 if (is_pcie(adap))
2196 snprintf(buf, sizeof(buf), "%s x%d",
2197 pci_variant[adap->params.pci.variant],
2198 adap->params.pci.width);
2199 else
2200 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2201 pci_variant[adap->params.pci.variant],
2202 adap->params.pci.speed, adap->params.pci.width);
2203
2204 for_each_port(adap, i) {
2205 struct net_device *dev = adap->port[i];
2206 const struct port_info *pi = netdev_priv(dev);
2207
2208 if (!test_bit(i, &adap->registered_device_map))
2209 continue;
2210 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2211 dev->name, ai->desc, pi->port_type->desc,
2212 adap->params.rev, buf,
2213 (adap->flags & USING_MSIX) ? " MSI-X" :
2214 (adap->flags & USING_MSI) ? " MSI" : "");
2215 if (adap->name == dev->name && adap->params.vpd.mclk)
2216 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2217 adap->name, t3_mc7_size(&adap->cm) >> 20,
2218 t3_mc7_size(&adap->pmtx) >> 20,
2219 t3_mc7_size(&adap->pmrx) >> 20);
2220 }
2221}
2222
2223static int __devinit init_one(struct pci_dev *pdev,
2224 const struct pci_device_id *ent)
2225{
2226 static int version_printed;
2227
2228 int i, err, pci_using_dac = 0;
2229 unsigned long mmio_start, mmio_len;
2230 const struct adapter_info *ai;
2231 struct adapter *adapter = NULL;
2232 struct port_info *pi;
2233
2234 if (!version_printed) {
2235 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2236 ++version_printed;
2237 }
2238
2239 if (!cxgb3_wq) {
2240 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2241 if (!cxgb3_wq) {
2242 printk(KERN_ERR DRV_NAME
2243 ": cannot initialize work queue\n");
2244 return -ENOMEM;
2245 }
2246 }
2247
2248 err = pci_request_regions(pdev, DRV_NAME);
2249 if (err) {
2250 /* Just info, some other driver may have claimed the device. */
2251 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2252 return err;
2253 }
2254
2255 err = pci_enable_device(pdev);
2256 if (err) {
2257 dev_err(&pdev->dev, "cannot enable PCI device\n");
2258 goto out_release_regions;
2259 }
2260
2261 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2262 pci_using_dac = 1;
2263 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2264 if (err) {
2265 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2266 "coherent allocations\n");
2267 goto out_disable_device;
2268 }
2269 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2270 dev_err(&pdev->dev, "no usable DMA configuration\n");
2271 goto out_disable_device;
2272 }
2273
2274 pci_set_master(pdev);
2275
2276 mmio_start = pci_resource_start(pdev, 0);
2277 mmio_len = pci_resource_len(pdev, 0);
2278 ai = t3_get_adapter_info(ent->driver_data);
2279
2280 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2281 if (!adapter) {
2282 err = -ENOMEM;
2283 goto out_disable_device;
2284 }
2285
2286 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2287 if (!adapter->regs) {
2288 dev_err(&pdev->dev, "cannot map device registers\n");
2289 err = -ENOMEM;
2290 goto out_free_adapter;
2291 }
2292
2293 adapter->pdev = pdev;
2294 adapter->name = pci_name(pdev);
2295 adapter->msg_enable = dflt_msg_enable;
2296 adapter->mmio_len = mmio_len;
2297
2298 mutex_init(&adapter->mdio_lock);
2299 spin_lock_init(&adapter->work_lock);
2300 spin_lock_init(&adapter->stats_lock);
2301
2302 INIT_LIST_HEAD(&adapter->adapter_list);
2303 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2304 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2305
2306 for (i = 0; i < ai->nports; ++i) {
2307 struct net_device *netdev;
2308
2309 netdev = alloc_etherdev(sizeof(struct port_info));
2310 if (!netdev) {
2311 err = -ENOMEM;
2312 goto out_free_dev;
2313 }
2314
2315 SET_MODULE_OWNER(netdev);
2316 SET_NETDEV_DEV(netdev, &pdev->dev);
2317
2318 adapter->port[i] = netdev;
2319 pi = netdev_priv(netdev);
2320 pi->rx_csum_offload = 1;
2321 pi->nqsets = 1;
2322 pi->first_qset = i;
2323 pi->activity = 0;
2324 pi->port_id = i;
2325 netif_carrier_off(netdev);
2326 netdev->irq = pdev->irq;
2327 netdev->mem_start = mmio_start;
2328 netdev->mem_end = mmio_start + mmio_len - 1;
2329 netdev->priv = adapter;
2330 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2331 netdev->features |= NETIF_F_LLTX;
2332 if (pci_using_dac)
2333 netdev->features |= NETIF_F_HIGHDMA;
2334
2335 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2336 netdev->vlan_rx_register = vlan_rx_register;
2337 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2338
2339 netdev->open = cxgb_open;
2340 netdev->stop = cxgb_close;
2341 netdev->hard_start_xmit = t3_eth_xmit;
2342 netdev->get_stats = cxgb_get_stats;
2343 netdev->set_multicast_list = cxgb_set_rxmode;
2344 netdev->do_ioctl = cxgb_ioctl;
2345 netdev->change_mtu = cxgb_change_mtu;
2346 netdev->set_mac_address = cxgb_set_mac_addr;
2347#ifdef CONFIG_NET_POLL_CONTROLLER
2348 netdev->poll_controller = cxgb_netpoll;
2349#endif
2350 netdev->weight = 64;
2351
2352 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2353 }
2354
2355 pci_set_drvdata(pdev, adapter->port[0]);
2356 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2357 err = -ENODEV;
2358 goto out_free_dev;
2359 }
2360
2361 /*
2362 * The card is now ready to go. If any errors occur during device
2363 * registration we do not fail the whole card but rather proceed only
2364 * with the ports we manage to register successfully. However we must
2365 * register at least one net device.
2366 */
2367 for_each_port(adapter, i) {
2368 err = register_netdev(adapter->port[i]);
2369 if (err)
2370 dev_warn(&pdev->dev,
2371 "cannot register net device %s, skipping\n",
2372 adapter->port[i]->name);
2373 else {
2374 /*
2375 * Change the name we use for messages to the name of
2376 * the first successfully registered interface.
2377 */
2378 if (!adapter->registered_device_map)
2379 adapter->name = adapter->port[i]->name;
2380
2381 __set_bit(i, &adapter->registered_device_map);
2382 }
2383 }
2384 if (!adapter->registered_device_map) {
2385 dev_err(&pdev->dev, "could not register any net devices\n");
2386 goto out_free_dev;
2387 }
2388
2389 /* Driver's ready. Reflect it on LEDs */
2390 t3_led_ready(adapter);
2391
2392 if (is_offload(adapter)) {
2393 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2394 cxgb3_adapter_ofld(adapter);
2395 }
2396
2397 /* See what interrupts we'll be using */
2398 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2399 adapter->flags |= USING_MSIX;
2400 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2401 adapter->flags |= USING_MSI;
2402
2403 err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2404 &cxgb3_attr_group);
2405
2406 print_port_info(adapter, ai);
2407 return 0;
2408
2409out_free_dev:
2410 iounmap(adapter->regs);
2411 for (i = ai->nports - 1; i >= 0; --i)
2412 if (adapter->port[i])
2413 free_netdev(adapter->port[i]);
2414
2415out_free_adapter:
2416 kfree(adapter);
2417
2418out_disable_device:
2419 pci_disable_device(pdev);
2420out_release_regions:
2421 pci_release_regions(pdev);
2422 pci_set_drvdata(pdev, NULL);
2423 return err;
2424}
2425
2426static void __devexit remove_one(struct pci_dev *pdev)
2427{
2428 struct net_device *dev = pci_get_drvdata(pdev);
2429
2430 if (dev) {
2431 int i;
2432 struct adapter *adapter = dev->priv;
2433
2434 t3_sge_stop(adapter);
2435 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2436 &cxgb3_attr_group);
2437
2438 for_each_port(adapter, i)
2439 if (test_bit(i, &adapter->registered_device_map))
2440 unregister_netdev(adapter->port[i]);
2441
2442 if (is_offload(adapter)) {
2443 cxgb3_adapter_unofld(adapter);
2444 if (test_bit(OFFLOAD_DEVMAP_BIT,
2445 &adapter->open_device_map))
2446 offload_close(&adapter->tdev);
2447 }
2448
2449 t3_free_sge_resources(adapter);
2450 cxgb_disable_msi(adapter);
2451
2452 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2453 if (adapter->dummy_netdev[i]) {
2454 free_netdev(adapter->dummy_netdev[i]);
2455 adapter->dummy_netdev[i] = NULL;
2456 }
2457
2458 for_each_port(adapter, i)
2459 if (adapter->port[i])
2460 free_netdev(adapter->port[i]);
2461
2462 iounmap(adapter->regs);
2463 kfree(adapter);
2464 pci_release_regions(pdev);
2465 pci_disable_device(pdev);
2466 pci_set_drvdata(pdev, NULL);
2467 }
2468}
2469
2470static struct pci_driver driver = {
2471 .name = DRV_NAME,
2472 .id_table = cxgb3_pci_tbl,
2473 .probe = init_one,
2474 .remove = __devexit_p(remove_one),
2475};
2476
2477static int __init cxgb3_init_module(void)
2478{
2479 int ret;
2480
2481 cxgb3_offload_init();
2482
2483 ret = pci_register_driver(&driver);
2484 return ret;
2485}
2486
2487static void __exit cxgb3_cleanup_module(void)
2488{
2489 pci_unregister_driver(&driver);
2490 if (cxgb3_wq)
2491 destroy_workqueue(cxgb3_wq);
2492}
2493
2494module_init(cxgb3_init_module);
2495module_exit(cxgb3_cleanup_module);