blob: 352c770901235a7249d47f4ad278f5bab4f294ce [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
70#define DRV_VERSION "1.0.0-ko"
71#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
80enum {
81 MEMWIN0_APERTURE = 65536,
82 MEMWIN0_BASE = 0x30000,
83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000,
85 MEMWIN2_APERTURE = 2048,
86 MEMWIN2_BASE = 0x1b800,
87};
88
89enum {
90 MAX_TXQ_ENTRIES = 16384,
91 MAX_CTRL_TXQ_ENTRIES = 1024,
92 MAX_RSPQ_ENTRIES = 16384,
93 MAX_RX_BUFFERS = 16384,
94 MIN_TXQ_ENTRIES = 32,
95 MIN_CTRL_TXQ_ENTRIES = 32,
96 MIN_RSPQ_ENTRIES = 128,
97 MIN_FL_ENTRIES = 16
98};
99
100#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
103
104#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
105
106static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
107 CH_DEVICE(0xa000), /* PE10K */
108 { 0, }
109};
110
111#define FW_FNAME "cxgb4/t4fw.bin"
112
113MODULE_DESCRIPTION(DRV_DESC);
114MODULE_AUTHOR("Chelsio Communications");
115MODULE_LICENSE("Dual BSD/GPL");
116MODULE_VERSION(DRV_VERSION);
117MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
118MODULE_FIRMWARE(FW_FNAME);
119
120static int dflt_msg_enable = DFLT_MSG_ENABLE;
121
122module_param(dflt_msg_enable, int, 0644);
123MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
124
125/*
126 * The driver uses the best interrupt scheme available on a platform in the
127 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
128 * of these schemes the driver may consider as follows:
129 *
130 * msi = 2: choose from among all three options
131 * msi = 1: only consider MSI and INTx interrupts
132 * msi = 0: force INTx interrupts
133 */
134static int msi = 2;
135
136module_param(msi, int, 0644);
137MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
138
139/*
140 * Queue interrupt hold-off timer values. Queues default to the first of these
141 * upon creation.
142 */
143static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
144
145module_param_array(intr_holdoff, uint, NULL, 0644);
146MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
147 "0..4 in microseconds");
148
149static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
150
151module_param_array(intr_cnt, uint, NULL, 0644);
152MODULE_PARM_DESC(intr_cnt,
153 "thresholds 1..3 for queue interrupt packet counters");
154
155static int vf_acls;
156
157#ifdef CONFIG_PCI_IOV
158module_param(vf_acls, bool, 0644);
159MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
160
161static unsigned int num_vf[4];
162
163module_param_array(num_vf, uint, NULL, 0644);
164MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
165#endif
166
167static struct dentry *cxgb4_debugfs_root;
168
169static LIST_HEAD(adapter_list);
170static DEFINE_MUTEX(uld_mutex);
171static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
172static const char *uld_str[] = { "RDMA", "iSCSI" };
173
174static void link_report(struct net_device *dev)
175{
176 if (!netif_carrier_ok(dev))
177 netdev_info(dev, "link down\n");
178 else {
179 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
180
181 const char *s = "10Mbps";
182 const struct port_info *p = netdev_priv(dev);
183
184 switch (p->link_cfg.speed) {
185 case SPEED_10000:
186 s = "10Gbps";
187 break;
188 case SPEED_1000:
189 s = "1000Mbps";
190 break;
191 case SPEED_100:
192 s = "100Mbps";
193 break;
194 }
195
196 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
197 fc[p->link_cfg.fc]);
198 }
199}
200
201void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
202{
203 struct net_device *dev = adapter->port[port_id];
204
205 /* Skip changes from disabled ports. */
206 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
207 if (link_stat)
208 netif_carrier_on(dev);
209 else
210 netif_carrier_off(dev);
211
212 link_report(dev);
213 }
214}
215
216void t4_os_portmod_changed(const struct adapter *adap, int port_id)
217{
218 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000219 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000220 };
221
222 const struct net_device *dev = adap->port[port_id];
223 const struct port_info *pi = netdev_priv(dev);
224
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000227 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000228 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
229}
230
231/*
232 * Configure the exact and hash address filters to handle a port's multicast
233 * and secondary unicast MAC addresses.
234 */
235static int set_addr_filters(const struct net_device *dev, bool sleep)
236{
237 u64 mhash = 0;
238 u64 uhash = 0;
239 bool free = true;
240 u16 filt_idx[7];
241 const u8 *addr[7];
242 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000243 const struct netdev_hw_addr *ha;
244 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700245 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000246 const struct port_info *pi = netdev_priv(dev);
247
248 /* first do the secondary unicast addresses */
249 netdev_for_each_uc_addr(ha, dev) {
250 addr[naddr++] = ha->addr;
251 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
252 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
253 naddr, addr, filt_idx, &uhash, sleep);
254 if (ret < 0)
255 return ret;
256
257 free = false;
258 naddr = 0;
259 }
260 }
261
262 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700263 netdev_for_each_mc_addr(ha, dev) {
264 addr[naddr++] = ha->addr;
265 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep);
268 if (ret < 0)
269 return ret;
270
271 free = false;
272 naddr = 0;
273 }
274 }
275
276 return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
277 uhash | mhash, sleep);
278}
279
280/*
281 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
282 * If @mtu is -1 it is left unchanged.
283 */
284static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
285{
286 int ret;
287 struct port_info *pi = netdev_priv(dev);
288
289 ret = set_addr_filters(dev, sleep_ok);
290 if (ret == 0)
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000294 sleep_ok);
295 return ret;
296}
297
298/**
299 * link_start - enable a port
300 * @dev: the port to enable
301 *
302 * Performs the MAC and PHY actions needed to enable a port.
303 */
304static int link_start(struct net_device *dev)
305{
306 int ret;
307 struct port_info *pi = netdev_priv(dev);
308
309 /*
310 * We do not set address filters and promiscuity here, the stack does
311 * that step explicitly.
312 */
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000314 pi->vlan_grp != NULL, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000315 if (ret == 0) {
316 ret = t4_change_mac(pi->adapter, 0, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000318 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000319 if (ret >= 0) {
320 pi->xact_addr_filt = ret;
321 ret = 0;
322 }
323 }
324 if (ret == 0)
325 ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
326 if (ret == 0)
327 ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
328 return ret;
329}
330
331/*
332 * Response queue handler for the FW event queue.
333 */
334static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
335 const struct pkt_gl *gl)
336{
337 u8 opcode = ((const struct rss_header *)rsp)->opcode;
338
339 rsp++; /* skip RSS header */
340 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
341 const struct cpl_sge_egr_update *p = (void *)rsp;
342 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
343 struct sge_txq *txq = q->adap->sge.egr_map[qid];
344
345 txq->restarts++;
346 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
347 struct sge_eth_txq *eq;
348
349 eq = container_of(txq, struct sge_eth_txq, q);
350 netif_tx_wake_queue(eq->txq);
351 } else {
352 struct sge_ofld_txq *oq;
353
354 oq = container_of(txq, struct sge_ofld_txq, q);
355 tasklet_schedule(&oq->qresume_tsk);
356 }
357 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
358 const struct cpl_fw6_msg *p = (void *)rsp;
359
360 if (p->type == 0)
361 t4_handle_fw_rpl(q->adap, p->data);
362 } else if (opcode == CPL_L2T_WRITE_RPL) {
363 const struct cpl_l2t_write_rpl *p = (void *)rsp;
364
365 do_l2t_write_rpl(q->adap, p);
366 } else
367 dev_err(q->adap->pdev_dev,
368 "unexpected CPL %#x on FW event queue\n", opcode);
369 return 0;
370}
371
372/**
373 * uldrx_handler - response queue handler for ULD queues
374 * @q: the response queue that received the packet
375 * @rsp: the response queue descriptor holding the offload message
376 * @gl: the gather list of packet fragments
377 *
378 * Deliver an ingress offload packet to a ULD. All processing is done by
379 * the ULD, we just maintain statistics.
380 */
381static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
382 const struct pkt_gl *gl)
383{
384 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
385
386 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
387 rxq->stats.nomem++;
388 return -1;
389 }
390 if (gl == NULL)
391 rxq->stats.imm++;
392 else if (gl == CXGB4_MSG_AN)
393 rxq->stats.an++;
394 else
395 rxq->stats.pkts++;
396 return 0;
397}
398
399static void disable_msi(struct adapter *adapter)
400{
401 if (adapter->flags & USING_MSIX) {
402 pci_disable_msix(adapter->pdev);
403 adapter->flags &= ~USING_MSIX;
404 } else if (adapter->flags & USING_MSI) {
405 pci_disable_msi(adapter->pdev);
406 adapter->flags &= ~USING_MSI;
407 }
408}
409
410/*
411 * Interrupt handler for non-data events used with MSI-X.
412 */
413static irqreturn_t t4_nondata_intr(int irq, void *cookie)
414{
415 struct adapter *adap = cookie;
416
417 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
418 if (v & PFSW) {
419 adap->swintr = 1;
420 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
421 }
422 t4_slow_intr_handler(adap);
423 return IRQ_HANDLED;
424}
425
426/*
427 * Name the MSI-X interrupts.
428 */
429static void name_msix_vecs(struct adapter *adap)
430{
431 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
432
433 /* non-data interrupts */
434 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
435 adap->msix_info[0].desc[n] = 0;
436
437 /* FW events */
438 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
439 adap->msix_info[1].desc[n] = 0;
440
441 /* Ethernet queues */
442 for_each_port(adap, j) {
443 struct net_device *d = adap->port[j];
444 const struct port_info *pi = netdev_priv(d);
445
446 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
447 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
448 d->name, i);
449 adap->msix_info[msi_idx].desc[n] = 0;
450 }
451 }
452
453 /* offload queues */
454 for_each_ofldrxq(&adap->sge, i) {
455 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
456 adap->name, i);
457 adap->msix_info[msi_idx++].desc[n] = 0;
458 }
459 for_each_rdmarxq(&adap->sge, i) {
460 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
461 adap->name, i);
462 adap->msix_info[msi_idx++].desc[n] = 0;
463 }
464}
465
466static int request_msix_queue_irqs(struct adapter *adap)
467{
468 struct sge *s = &adap->sge;
469 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
470
471 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
472 adap->msix_info[1].desc, &s->fw_evtq);
473 if (err)
474 return err;
475
476 for_each_ethrxq(s, ethqidx) {
477 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
478 adap->msix_info[msi].desc,
479 &s->ethrxq[ethqidx].rspq);
480 if (err)
481 goto unwind;
482 msi++;
483 }
484 for_each_ofldrxq(s, ofldqidx) {
485 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
486 adap->msix_info[msi].desc,
487 &s->ofldrxq[ofldqidx].rspq);
488 if (err)
489 goto unwind;
490 msi++;
491 }
492 for_each_rdmarxq(s, rdmaqidx) {
493 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
494 adap->msix_info[msi].desc,
495 &s->rdmarxq[rdmaqidx].rspq);
496 if (err)
497 goto unwind;
498 msi++;
499 }
500 return 0;
501
502unwind:
503 while (--rdmaqidx >= 0)
504 free_irq(adap->msix_info[--msi].vec,
505 &s->rdmarxq[rdmaqidx].rspq);
506 while (--ofldqidx >= 0)
507 free_irq(adap->msix_info[--msi].vec,
508 &s->ofldrxq[ofldqidx].rspq);
509 while (--ethqidx >= 0)
510 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
511 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
512 return err;
513}
514
515static void free_msix_queue_irqs(struct adapter *adap)
516{
517 int i, msi = 2;
518 struct sge *s = &adap->sge;
519
520 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
521 for_each_ethrxq(s, i)
522 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
523 for_each_ofldrxq(s, i)
524 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
525 for_each_rdmarxq(s, i)
526 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
527}
528
529/**
530 * setup_rss - configure RSS
531 * @adap: the adapter
532 *
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for all ports since the mapping
538 * table has plenty of entries.
539 */
540static int setup_rss(struct adapter *adap)
541{
542 int i, j, err;
543 u16 rss[MAX_ETH_QSETS];
544
545 for_each_port(adap, i) {
546 const struct port_info *pi = adap2pinfo(adap, i);
547 const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
548
549 for (j = 0; j < pi->nqsets; j++)
550 rss[j] = q[j].rspq.abs_id;
551
552 err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
553 rss, pi->nqsets);
554 if (err)
555 return err;
556 }
557 return 0;
558}
559
560/*
561 * Wait until all NAPI handlers are descheduled.
562 */
563static void quiesce_rx(struct adapter *adap)
564{
565 int i;
566
567 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
568 struct sge_rspq *q = adap->sge.ingr_map[i];
569
570 if (q && q->handler)
571 napi_disable(&q->napi);
572 }
573}
574
575/*
576 * Enable NAPI scheduling and interrupt generation for all Rx queues.
577 */
578static void enable_rx(struct adapter *adap)
579{
580 int i;
581
582 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
583 struct sge_rspq *q = adap->sge.ingr_map[i];
584
585 if (!q)
586 continue;
587 if (q->handler)
588 napi_enable(&q->napi);
589 /* 0-increment GTS to start the timer and enable interrupts */
590 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
591 SEINTARM(q->intr_params) |
592 INGRESSQID(q->cntxt_id));
593 }
594}
595
596/**
597 * setup_sge_queues - configure SGE Tx/Rx/response queues
598 * @adap: the adapter
599 *
600 * Determines how many sets of SGE queues to use and initializes them.
601 * We support multiple queue sets per port if we have MSI-X, otherwise
602 * just one queue set per port.
603 */
604static int setup_sge_queues(struct adapter *adap)
605{
606 int err, msi_idx, i, j;
607 struct sge *s = &adap->sge;
608
609 bitmap_zero(s->starving_fl, MAX_EGRQ);
610 bitmap_zero(s->txq_maperr, MAX_EGRQ);
611
612 if (adap->flags & USING_MSIX)
613 msi_idx = 1; /* vector 0 is for non-queue interrupts */
614 else {
615 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
616 NULL, NULL);
617 if (err)
618 return err;
619 msi_idx = -((int)s->intrq.abs_id + 1);
620 }
621
622 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
623 msi_idx, NULL, fwevtq_handler);
624 if (err) {
625freeout: t4_free_sge_resources(adap);
626 return err;
627 }
628
629 for_each_port(adap, i) {
630 struct net_device *dev = adap->port[i];
631 struct port_info *pi = netdev_priv(dev);
632 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
633 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
634
635 for (j = 0; j < pi->nqsets; j++, q++) {
636 if (msi_idx > 0)
637 msi_idx++;
638 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
639 msi_idx, &q->fl,
640 t4_ethrx_handler);
641 if (err)
642 goto freeout;
643 q->rspq.idx = j;
644 memset(&q->stats, 0, sizeof(q->stats));
645 }
646 for (j = 0; j < pi->nqsets; j++, t++) {
647 err = t4_sge_alloc_eth_txq(adap, t, dev,
648 netdev_get_tx_queue(dev, j),
649 s->fw_evtq.cntxt_id);
650 if (err)
651 goto freeout;
652 }
653 }
654
655 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
656 for_each_ofldrxq(s, i) {
657 struct sge_ofld_rxq *q = &s->ofldrxq[i];
658 struct net_device *dev = adap->port[i / j];
659
660 if (msi_idx > 0)
661 msi_idx++;
662 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
663 &q->fl, uldrx_handler);
664 if (err)
665 goto freeout;
666 memset(&q->stats, 0, sizeof(q->stats));
667 s->ofld_rxq[i] = q->rspq.abs_id;
668 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
669 s->fw_evtq.cntxt_id);
670 if (err)
671 goto freeout;
672 }
673
674 for_each_rdmarxq(s, i) {
675 struct sge_ofld_rxq *q = &s->rdmarxq[i];
676
677 if (msi_idx > 0)
678 msi_idx++;
679 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
680 msi_idx, &q->fl, uldrx_handler);
681 if (err)
682 goto freeout;
683 memset(&q->stats, 0, sizeof(q->stats));
684 s->rdma_rxq[i] = q->rspq.abs_id;
685 }
686
687 for_each_port(adap, i) {
688 /*
689 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
690 * have RDMA queues, and that's the right value.
691 */
692 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
693 s->fw_evtq.cntxt_id,
694 s->rdmarxq[i].rspq.cntxt_id);
695 if (err)
696 goto freeout;
697 }
698
699 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
700 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
701 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
702 return 0;
703}
704
705/*
706 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
707 * started but failed, and a negative errno if flash load couldn't start.
708 */
709static int upgrade_fw(struct adapter *adap)
710{
711 int ret;
712 u32 vers;
713 const struct fw_hdr *hdr;
714 const struct firmware *fw;
715 struct device *dev = adap->pdev_dev;
716
717 ret = request_firmware(&fw, FW_FNAME, dev);
718 if (ret < 0) {
719 dev_err(dev, "unable to load firmware image " FW_FNAME
720 ", error %d\n", ret);
721 return ret;
722 }
723
724 hdr = (const struct fw_hdr *)fw->data;
725 vers = ntohl(hdr->fw_ver);
726 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
727 ret = -EINVAL; /* wrong major version, won't do */
728 goto out;
729 }
730
731 /*
732 * If the flash FW is unusable or we found something newer, load it.
733 */
734 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
735 vers > adap->params.fw_vers) {
736 ret = -t4_load_fw(adap, fw->data, fw->size);
737 if (!ret)
738 dev_info(dev, "firmware upgraded to version %pI4 from "
739 FW_FNAME "\n", &hdr->fw_ver);
740 }
741out: release_firmware(fw);
742 return ret;
743}
744
745/*
746 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
747 * The allocated memory is cleared.
748 */
749void *t4_alloc_mem(size_t size)
750{
751 void *p = kmalloc(size, GFP_KERNEL);
752
753 if (!p)
754 p = vmalloc(size);
755 if (p)
756 memset(p, 0, size);
757 return p;
758}
759
760/*
761 * Free memory allocated through alloc_mem().
762 */
763void t4_free_mem(void *addr)
764{
765 if (is_vmalloc_addr(addr))
766 vfree(addr);
767 else
768 kfree(addr);
769}
770
771static inline int is_offload(const struct adapter *adap)
772{
773 return adap->params.offload;
774}
775
776/*
777 * Implementation of ethtool operations.
778 */
779
780static u32 get_msglevel(struct net_device *dev)
781{
782 return netdev2adap(dev)->msg_enable;
783}
784
785static void set_msglevel(struct net_device *dev, u32 val)
786{
787 netdev2adap(dev)->msg_enable = val;
788}
789
790static char stats_strings[][ETH_GSTRING_LEN] = {
791 "TxOctetsOK ",
792 "TxFramesOK ",
793 "TxBroadcastFrames ",
794 "TxMulticastFrames ",
795 "TxUnicastFrames ",
796 "TxErrorFrames ",
797
798 "TxFrames64 ",
799 "TxFrames65To127 ",
800 "TxFrames128To255 ",
801 "TxFrames256To511 ",
802 "TxFrames512To1023 ",
803 "TxFrames1024To1518 ",
804 "TxFrames1519ToMax ",
805
806 "TxFramesDropped ",
807 "TxPauseFrames ",
808 "TxPPP0Frames ",
809 "TxPPP1Frames ",
810 "TxPPP2Frames ",
811 "TxPPP3Frames ",
812 "TxPPP4Frames ",
813 "TxPPP5Frames ",
814 "TxPPP6Frames ",
815 "TxPPP7Frames ",
816
817 "RxOctetsOK ",
818 "RxFramesOK ",
819 "RxBroadcastFrames ",
820 "RxMulticastFrames ",
821 "RxUnicastFrames ",
822
823 "RxFramesTooLong ",
824 "RxJabberErrors ",
825 "RxFCSErrors ",
826 "RxLengthErrors ",
827 "RxSymbolErrors ",
828 "RxRuntFrames ",
829
830 "RxFrames64 ",
831 "RxFrames65To127 ",
832 "RxFrames128To255 ",
833 "RxFrames256To511 ",
834 "RxFrames512To1023 ",
835 "RxFrames1024To1518 ",
836 "RxFrames1519ToMax ",
837
838 "RxPauseFrames ",
839 "RxPPP0Frames ",
840 "RxPPP1Frames ",
841 "RxPPP2Frames ",
842 "RxPPP3Frames ",
843 "RxPPP4Frames ",
844 "RxPPP5Frames ",
845 "RxPPP6Frames ",
846 "RxPPP7Frames ",
847
848 "RxBG0FramesDropped ",
849 "RxBG1FramesDropped ",
850 "RxBG2FramesDropped ",
851 "RxBG3FramesDropped ",
852 "RxBG0FramesTrunc ",
853 "RxBG1FramesTrunc ",
854 "RxBG2FramesTrunc ",
855 "RxBG3FramesTrunc ",
856
857 "TSO ",
858 "TxCsumOffload ",
859 "RxCsumGood ",
860 "VLANextractions ",
861 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000862 "GROpackets ",
863 "GROmerged ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000864};
865
866static int get_sset_count(struct net_device *dev, int sset)
867{
868 switch (sset) {
869 case ETH_SS_STATS:
870 return ARRAY_SIZE(stats_strings);
871 default:
872 return -EOPNOTSUPP;
873 }
874}
875
876#define T4_REGMAP_SIZE (160 * 1024)
877
878static int get_regs_len(struct net_device *dev)
879{
880 return T4_REGMAP_SIZE;
881}
882
883static int get_eeprom_len(struct net_device *dev)
884{
885 return EEPROMSIZE;
886}
887
888static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
889{
890 struct adapter *adapter = netdev2adap(dev);
891
892 strcpy(info->driver, KBUILD_MODNAME);
893 strcpy(info->version, DRV_VERSION);
894 strcpy(info->bus_info, pci_name(adapter->pdev));
895
896 if (!adapter->params.fw_vers)
897 strcpy(info->fw_version, "N/A");
898 else
899 snprintf(info->fw_version, sizeof(info->fw_version),
900 "%u.%u.%u.%u, TP %u.%u.%u.%u",
901 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
902 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
903 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
904 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
905 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
906 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
907 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
908 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
909}
910
911static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
912{
913 if (stringset == ETH_SS_STATS)
914 memcpy(data, stats_strings, sizeof(stats_strings));
915}
916
917/*
918 * port stats maintained per queue of the port. They should be in the same
919 * order as in stats_strings above.
920 */
921struct queue_port_stats {
922 u64 tso;
923 u64 tx_csum;
924 u64 rx_csum;
925 u64 vlan_ex;
926 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000927 u64 gro_pkts;
928 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000929};
930
931static void collect_sge_port_stats(const struct adapter *adap,
932 const struct port_info *p, struct queue_port_stats *s)
933{
934 int i;
935 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
936 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
937
938 memset(s, 0, sizeof(*s));
939 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
940 s->tso += tx->tso;
941 s->tx_csum += tx->tx_cso;
942 s->rx_csum += rx->stats.rx_cso;
943 s->vlan_ex += rx->stats.vlan_ex;
944 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000945 s->gro_pkts += rx->stats.lro_pkts;
946 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000947 }
948}
949
950static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
951 u64 *data)
952{
953 struct port_info *pi = netdev_priv(dev);
954 struct adapter *adapter = pi->adapter;
955
956 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
957
958 data += sizeof(struct port_stats) / sizeof(u64);
959 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
960}
961
962/*
963 * Return a version number to identify the type of adapter. The scheme is:
964 * - bits 0..9: chip version
965 * - bits 10..15: chip revision
966 */
967static inline unsigned int mk_adap_vers(const struct adapter *ap)
968{
969 return 4 | (ap->params.rev << 10);
970}
971
972static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
973 unsigned int end)
974{
975 u32 *p = buf + start;
976
977 for ( ; start <= end; start += sizeof(u32))
978 *p++ = t4_read_reg(ap, start);
979}
980
981static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
982 void *buf)
983{
984 static const unsigned int reg_ranges[] = {
985 0x1008, 0x1108,
986 0x1180, 0x11b4,
987 0x11fc, 0x123c,
988 0x1300, 0x173c,
989 0x1800, 0x18fc,
990 0x3000, 0x30d8,
991 0x30e0, 0x5924,
992 0x5960, 0x59d4,
993 0x5a00, 0x5af8,
994 0x6000, 0x6098,
995 0x6100, 0x6150,
996 0x6200, 0x6208,
997 0x6240, 0x6248,
998 0x6280, 0x6338,
999 0x6370, 0x638c,
1000 0x6400, 0x643c,
1001 0x6500, 0x6524,
1002 0x6a00, 0x6a38,
1003 0x6a60, 0x6a78,
1004 0x6b00, 0x6b84,
1005 0x6bf0, 0x6c84,
1006 0x6cf0, 0x6d84,
1007 0x6df0, 0x6e84,
1008 0x6ef0, 0x6f84,
1009 0x6ff0, 0x7084,
1010 0x70f0, 0x7184,
1011 0x71f0, 0x7284,
1012 0x72f0, 0x7384,
1013 0x73f0, 0x7450,
1014 0x7500, 0x7530,
1015 0x7600, 0x761c,
1016 0x7680, 0x76cc,
1017 0x7700, 0x7798,
1018 0x77c0, 0x77fc,
1019 0x7900, 0x79fc,
1020 0x7b00, 0x7c38,
1021 0x7d00, 0x7efc,
1022 0x8dc0, 0x8e1c,
1023 0x8e30, 0x8e78,
1024 0x8ea0, 0x8f6c,
1025 0x8fc0, 0x9074,
1026 0x90fc, 0x90fc,
1027 0x9400, 0x9458,
1028 0x9600, 0x96bc,
1029 0x9800, 0x9808,
1030 0x9820, 0x983c,
1031 0x9850, 0x9864,
1032 0x9c00, 0x9c6c,
1033 0x9c80, 0x9cec,
1034 0x9d00, 0x9d6c,
1035 0x9d80, 0x9dec,
1036 0x9e00, 0x9e6c,
1037 0x9e80, 0x9eec,
1038 0x9f00, 0x9f6c,
1039 0x9f80, 0x9fec,
1040 0xd004, 0xd03c,
1041 0xdfc0, 0xdfe0,
1042 0xe000, 0xea7c,
1043 0xf000, 0x11190,
1044 0x19040, 0x19124,
1045 0x19150, 0x191b0,
1046 0x191d0, 0x191e8,
1047 0x19238, 0x1924c,
1048 0x193f8, 0x19474,
1049 0x19490, 0x194f8,
1050 0x19800, 0x19f30,
1051 0x1a000, 0x1a06c,
1052 0x1a0b0, 0x1a120,
1053 0x1a128, 0x1a138,
1054 0x1a190, 0x1a1c4,
1055 0x1a1fc, 0x1a1fc,
1056 0x1e040, 0x1e04c,
1057 0x1e240, 0x1e28c,
1058 0x1e2c0, 0x1e2c0,
1059 0x1e2e0, 0x1e2e0,
1060 0x1e300, 0x1e384,
1061 0x1e3c0, 0x1e3c8,
1062 0x1e440, 0x1e44c,
1063 0x1e640, 0x1e68c,
1064 0x1e6c0, 0x1e6c0,
1065 0x1e6e0, 0x1e6e0,
1066 0x1e700, 0x1e784,
1067 0x1e7c0, 0x1e7c8,
1068 0x1e840, 0x1e84c,
1069 0x1ea40, 0x1ea8c,
1070 0x1eac0, 0x1eac0,
1071 0x1eae0, 0x1eae0,
1072 0x1eb00, 0x1eb84,
1073 0x1ebc0, 0x1ebc8,
1074 0x1ec40, 0x1ec4c,
1075 0x1ee40, 0x1ee8c,
1076 0x1eec0, 0x1eec0,
1077 0x1eee0, 0x1eee0,
1078 0x1ef00, 0x1ef84,
1079 0x1efc0, 0x1efc8,
1080 0x1f040, 0x1f04c,
1081 0x1f240, 0x1f28c,
1082 0x1f2c0, 0x1f2c0,
1083 0x1f2e0, 0x1f2e0,
1084 0x1f300, 0x1f384,
1085 0x1f3c0, 0x1f3c8,
1086 0x1f440, 0x1f44c,
1087 0x1f640, 0x1f68c,
1088 0x1f6c0, 0x1f6c0,
1089 0x1f6e0, 0x1f6e0,
1090 0x1f700, 0x1f784,
1091 0x1f7c0, 0x1f7c8,
1092 0x1f840, 0x1f84c,
1093 0x1fa40, 0x1fa8c,
1094 0x1fac0, 0x1fac0,
1095 0x1fae0, 0x1fae0,
1096 0x1fb00, 0x1fb84,
1097 0x1fbc0, 0x1fbc8,
1098 0x1fc40, 0x1fc4c,
1099 0x1fe40, 0x1fe8c,
1100 0x1fec0, 0x1fec0,
1101 0x1fee0, 0x1fee0,
1102 0x1ff00, 0x1ff84,
1103 0x1ffc0, 0x1ffc8,
1104 0x20000, 0x2002c,
1105 0x20100, 0x2013c,
1106 0x20190, 0x201c8,
1107 0x20200, 0x20318,
1108 0x20400, 0x20528,
1109 0x20540, 0x20614,
1110 0x21000, 0x21040,
1111 0x2104c, 0x21060,
1112 0x210c0, 0x210ec,
1113 0x21200, 0x21268,
1114 0x21270, 0x21284,
1115 0x212fc, 0x21388,
1116 0x21400, 0x21404,
1117 0x21500, 0x21518,
1118 0x2152c, 0x2153c,
1119 0x21550, 0x21554,
1120 0x21600, 0x21600,
1121 0x21608, 0x21628,
1122 0x21630, 0x2163c,
1123 0x21700, 0x2171c,
1124 0x21780, 0x2178c,
1125 0x21800, 0x21c38,
1126 0x21c80, 0x21d7c,
1127 0x21e00, 0x21e04,
1128 0x22000, 0x2202c,
1129 0x22100, 0x2213c,
1130 0x22190, 0x221c8,
1131 0x22200, 0x22318,
1132 0x22400, 0x22528,
1133 0x22540, 0x22614,
1134 0x23000, 0x23040,
1135 0x2304c, 0x23060,
1136 0x230c0, 0x230ec,
1137 0x23200, 0x23268,
1138 0x23270, 0x23284,
1139 0x232fc, 0x23388,
1140 0x23400, 0x23404,
1141 0x23500, 0x23518,
1142 0x2352c, 0x2353c,
1143 0x23550, 0x23554,
1144 0x23600, 0x23600,
1145 0x23608, 0x23628,
1146 0x23630, 0x2363c,
1147 0x23700, 0x2371c,
1148 0x23780, 0x2378c,
1149 0x23800, 0x23c38,
1150 0x23c80, 0x23d7c,
1151 0x23e00, 0x23e04,
1152 0x24000, 0x2402c,
1153 0x24100, 0x2413c,
1154 0x24190, 0x241c8,
1155 0x24200, 0x24318,
1156 0x24400, 0x24528,
1157 0x24540, 0x24614,
1158 0x25000, 0x25040,
1159 0x2504c, 0x25060,
1160 0x250c0, 0x250ec,
1161 0x25200, 0x25268,
1162 0x25270, 0x25284,
1163 0x252fc, 0x25388,
1164 0x25400, 0x25404,
1165 0x25500, 0x25518,
1166 0x2552c, 0x2553c,
1167 0x25550, 0x25554,
1168 0x25600, 0x25600,
1169 0x25608, 0x25628,
1170 0x25630, 0x2563c,
1171 0x25700, 0x2571c,
1172 0x25780, 0x2578c,
1173 0x25800, 0x25c38,
1174 0x25c80, 0x25d7c,
1175 0x25e00, 0x25e04,
1176 0x26000, 0x2602c,
1177 0x26100, 0x2613c,
1178 0x26190, 0x261c8,
1179 0x26200, 0x26318,
1180 0x26400, 0x26528,
1181 0x26540, 0x26614,
1182 0x27000, 0x27040,
1183 0x2704c, 0x27060,
1184 0x270c0, 0x270ec,
1185 0x27200, 0x27268,
1186 0x27270, 0x27284,
1187 0x272fc, 0x27388,
1188 0x27400, 0x27404,
1189 0x27500, 0x27518,
1190 0x2752c, 0x2753c,
1191 0x27550, 0x27554,
1192 0x27600, 0x27600,
1193 0x27608, 0x27628,
1194 0x27630, 0x2763c,
1195 0x27700, 0x2771c,
1196 0x27780, 0x2778c,
1197 0x27800, 0x27c38,
1198 0x27c80, 0x27d7c,
1199 0x27e00, 0x27e04
1200 };
1201
1202 int i;
1203 struct adapter *ap = netdev2adap(dev);
1204
1205 regs->version = mk_adap_vers(ap);
1206
1207 memset(buf, 0, T4_REGMAP_SIZE);
1208 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1209 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1210}
1211
1212static int restart_autoneg(struct net_device *dev)
1213{
1214 struct port_info *p = netdev_priv(dev);
1215
1216 if (!netif_running(dev))
1217 return -EAGAIN;
1218 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1219 return -EINVAL;
1220 t4_restart_aneg(p->adapter, 0, p->tx_chan);
1221 return 0;
1222}
1223
1224static int identify_port(struct net_device *dev, u32 data)
1225{
1226 if (data == 0)
1227 data = 2; /* default to 2 seconds */
1228
1229 return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
1230 data * 5);
1231}
1232
1233static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1234{
1235 unsigned int v = 0;
1236
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001237 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1238 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001239 v |= SUPPORTED_TP;
1240 if (caps & FW_PORT_CAP_SPEED_100M)
1241 v |= SUPPORTED_100baseT_Full;
1242 if (caps & FW_PORT_CAP_SPEED_1G)
1243 v |= SUPPORTED_1000baseT_Full;
1244 if (caps & FW_PORT_CAP_SPEED_10G)
1245 v |= SUPPORTED_10000baseT_Full;
1246 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1247 v |= SUPPORTED_Backplane;
1248 if (caps & FW_PORT_CAP_SPEED_1G)
1249 v |= SUPPORTED_1000baseKX_Full;
1250 if (caps & FW_PORT_CAP_SPEED_10G)
1251 v |= SUPPORTED_10000baseKX4_Full;
1252 } else if (type == FW_PORT_TYPE_KR)
1253 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001254 else if (type == FW_PORT_TYPE_BP_AP)
1255 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
1256 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1257 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001258 v |= SUPPORTED_FIBRE;
1259
1260 if (caps & FW_PORT_CAP_ANEG)
1261 v |= SUPPORTED_Autoneg;
1262 return v;
1263}
1264
1265static unsigned int to_fw_linkcaps(unsigned int caps)
1266{
1267 unsigned int v = 0;
1268
1269 if (caps & ADVERTISED_100baseT_Full)
1270 v |= FW_PORT_CAP_SPEED_100M;
1271 if (caps & ADVERTISED_1000baseT_Full)
1272 v |= FW_PORT_CAP_SPEED_1G;
1273 if (caps & ADVERTISED_10000baseT_Full)
1274 v |= FW_PORT_CAP_SPEED_10G;
1275 return v;
1276}
1277
1278static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1279{
1280 const struct port_info *p = netdev_priv(dev);
1281
1282 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001283 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001284 p->port_type == FW_PORT_TYPE_BT_XAUI)
1285 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001286 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1287 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001288 cmd->port = PORT_FIBRE;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001289 else if (p->port_type == FW_PORT_TYPE_SFP) {
1290 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1291 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1292 cmd->port = PORT_DA;
1293 else
1294 cmd->port = PORT_FIBRE;
1295 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001296 cmd->port = PORT_OTHER;
1297
1298 if (p->mdio_addr >= 0) {
1299 cmd->phy_address = p->mdio_addr;
1300 cmd->transceiver = XCVR_EXTERNAL;
1301 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1302 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1303 } else {
1304 cmd->phy_address = 0; /* not really, but no better option */
1305 cmd->transceiver = XCVR_INTERNAL;
1306 cmd->mdio_support = 0;
1307 }
1308
1309 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1310 cmd->advertising = from_fw_linkcaps(p->port_type,
1311 p->link_cfg.advertising);
1312 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1313 cmd->duplex = DUPLEX_FULL;
1314 cmd->autoneg = p->link_cfg.autoneg;
1315 cmd->maxtxpkt = 0;
1316 cmd->maxrxpkt = 0;
1317 return 0;
1318}
1319
1320static unsigned int speed_to_caps(int speed)
1321{
1322 if (speed == SPEED_100)
1323 return FW_PORT_CAP_SPEED_100M;
1324 if (speed == SPEED_1000)
1325 return FW_PORT_CAP_SPEED_1G;
1326 if (speed == SPEED_10000)
1327 return FW_PORT_CAP_SPEED_10G;
1328 return 0;
1329}
1330
1331static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1332{
1333 unsigned int cap;
1334 struct port_info *p = netdev_priv(dev);
1335 struct link_config *lc = &p->link_cfg;
1336
1337 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1338 return -EINVAL;
1339
1340 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1341 /*
1342 * PHY offers a single speed. See if that's what's
1343 * being requested.
1344 */
1345 if (cmd->autoneg == AUTONEG_DISABLE &&
1346 (lc->supported & speed_to_caps(cmd->speed)))
1347 return 0;
1348 return -EINVAL;
1349 }
1350
1351 if (cmd->autoneg == AUTONEG_DISABLE) {
1352 cap = speed_to_caps(cmd->speed);
1353
1354 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1355 cmd->speed == SPEED_10000)
1356 return -EINVAL;
1357 lc->requested_speed = cap;
1358 lc->advertising = 0;
1359 } else {
1360 cap = to_fw_linkcaps(cmd->advertising);
1361 if (!(lc->supported & cap))
1362 return -EINVAL;
1363 lc->requested_speed = 0;
1364 lc->advertising = cap | FW_PORT_CAP_ANEG;
1365 }
1366 lc->autoneg = cmd->autoneg;
1367
1368 if (netif_running(dev))
1369 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1370 return 0;
1371}
1372
1373static void get_pauseparam(struct net_device *dev,
1374 struct ethtool_pauseparam *epause)
1375{
1376 struct port_info *p = netdev_priv(dev);
1377
1378 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1379 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1380 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1381}
1382
1383static int set_pauseparam(struct net_device *dev,
1384 struct ethtool_pauseparam *epause)
1385{
1386 struct port_info *p = netdev_priv(dev);
1387 struct link_config *lc = &p->link_cfg;
1388
1389 if (epause->autoneg == AUTONEG_DISABLE)
1390 lc->requested_fc = 0;
1391 else if (lc->supported & FW_PORT_CAP_ANEG)
1392 lc->requested_fc = PAUSE_AUTONEG;
1393 else
1394 return -EINVAL;
1395
1396 if (epause->rx_pause)
1397 lc->requested_fc |= PAUSE_RX;
1398 if (epause->tx_pause)
1399 lc->requested_fc |= PAUSE_TX;
1400 if (netif_running(dev))
1401 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1402 return 0;
1403}
1404
1405static u32 get_rx_csum(struct net_device *dev)
1406{
1407 struct port_info *p = netdev_priv(dev);
1408
1409 return p->rx_offload & RX_CSO;
1410}
1411
1412static int set_rx_csum(struct net_device *dev, u32 data)
1413{
1414 struct port_info *p = netdev_priv(dev);
1415
1416 if (data)
1417 p->rx_offload |= RX_CSO;
1418 else
1419 p->rx_offload &= ~RX_CSO;
1420 return 0;
1421}
1422
1423static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1424{
1425 const struct port_info *pi = netdev_priv(dev);
1426 const struct sge *s = &pi->adapter->sge;
1427
1428 e->rx_max_pending = MAX_RX_BUFFERS;
1429 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1430 e->rx_jumbo_max_pending = 0;
1431 e->tx_max_pending = MAX_TXQ_ENTRIES;
1432
1433 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1434 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1435 e->rx_jumbo_pending = 0;
1436 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1437}
1438
1439static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1440{
1441 int i;
1442 const struct port_info *pi = netdev_priv(dev);
1443 struct adapter *adapter = pi->adapter;
1444 struct sge *s = &adapter->sge;
1445
1446 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1447 e->tx_pending > MAX_TXQ_ENTRIES ||
1448 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1449 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1450 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1451 return -EINVAL;
1452
1453 if (adapter->flags & FULL_INIT_DONE)
1454 return -EBUSY;
1455
1456 for (i = 0; i < pi->nqsets; ++i) {
1457 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1458 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1459 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1460 }
1461 return 0;
1462}
1463
1464static int closest_timer(const struct sge *s, int time)
1465{
1466 int i, delta, match = 0, min_delta = INT_MAX;
1467
1468 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1469 delta = time - s->timer_val[i];
1470 if (delta < 0)
1471 delta = -delta;
1472 if (delta < min_delta) {
1473 min_delta = delta;
1474 match = i;
1475 }
1476 }
1477 return match;
1478}
1479
1480static int closest_thres(const struct sge *s, int thres)
1481{
1482 int i, delta, match = 0, min_delta = INT_MAX;
1483
1484 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1485 delta = thres - s->counter_val[i];
1486 if (delta < 0)
1487 delta = -delta;
1488 if (delta < min_delta) {
1489 min_delta = delta;
1490 match = i;
1491 }
1492 }
1493 return match;
1494}
1495
1496/*
1497 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1498 */
1499static unsigned int qtimer_val(const struct adapter *adap,
1500 const struct sge_rspq *q)
1501{
1502 unsigned int idx = q->intr_params >> 1;
1503
1504 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1505}
1506
1507/**
1508 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1509 * @adap: the adapter
1510 * @q: the Rx queue
1511 * @us: the hold-off time in us, or 0 to disable timer
1512 * @cnt: the hold-off packet count, or 0 to disable counter
1513 *
1514 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1515 * one of the two needs to be enabled for the queue to generate interrupts.
1516 */
1517static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1518 unsigned int us, unsigned int cnt)
1519{
1520 if ((us | cnt) == 0)
1521 cnt = 1;
1522
1523 if (cnt) {
1524 int err;
1525 u32 v, new_idx;
1526
1527 new_idx = closest_thres(&adap->sge, cnt);
1528 if (q->desc && q->pktcnt_idx != new_idx) {
1529 /* the queue has already been created, update it */
1530 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1531 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1532 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1533 err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
1534 if (err)
1535 return err;
1536 }
1537 q->pktcnt_idx = new_idx;
1538 }
1539
1540 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1541 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1542 return 0;
1543}
1544
1545static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1546{
1547 const struct port_info *pi = netdev_priv(dev);
1548 struct adapter *adap = pi->adapter;
1549
1550 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1551 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1552}
1553
1554static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1555{
1556 const struct port_info *pi = netdev_priv(dev);
1557 const struct adapter *adap = pi->adapter;
1558 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1559
1560 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1561 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1562 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1563 return 0;
1564}
1565
1566/*
1567 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1568 * through virtual addresses starting at 31K, the rest is accessed through
1569 * virtual addresses starting at 0. This mapping is correct only for PF0.
1570 */
1571static int eeprom_ptov(unsigned int phys_addr)
1572{
1573 if (phys_addr < 1024)
1574 return phys_addr + (31 << 10);
1575 if (phys_addr < EEPROMSIZE)
1576 return phys_addr - 1024;
1577 return -EINVAL;
1578}
1579
1580/*
1581 * The next two routines implement eeprom read/write from physical addresses.
1582 * The physical->virtual translation is correct only for PF0.
1583 */
1584static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1585{
1586 int vaddr = eeprom_ptov(phys_addr);
1587
1588 if (vaddr >= 0)
1589 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1590 return vaddr < 0 ? vaddr : 0;
1591}
1592
1593static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1594{
1595 int vaddr = eeprom_ptov(phys_addr);
1596
1597 if (vaddr >= 0)
1598 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1599 return vaddr < 0 ? vaddr : 0;
1600}
1601
1602#define EEPROM_MAGIC 0x38E2F10C
1603
1604static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1605 u8 *data)
1606{
1607 int i, err = 0;
1608 struct adapter *adapter = netdev2adap(dev);
1609
1610 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1611 if (!buf)
1612 return -ENOMEM;
1613
1614 e->magic = EEPROM_MAGIC;
1615 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1616 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1617
1618 if (!err)
1619 memcpy(data, buf + e->offset, e->len);
1620 kfree(buf);
1621 return err;
1622}
1623
1624static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1625 u8 *data)
1626{
1627 u8 *buf;
1628 int err = 0;
1629 u32 aligned_offset, aligned_len, *p;
1630 struct adapter *adapter = netdev2adap(dev);
1631
1632 if (eeprom->magic != EEPROM_MAGIC)
1633 return -EINVAL;
1634
1635 aligned_offset = eeprom->offset & ~3;
1636 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1637
1638 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1639 /*
1640 * RMW possibly needed for first or last words.
1641 */
1642 buf = kmalloc(aligned_len, GFP_KERNEL);
1643 if (!buf)
1644 return -ENOMEM;
1645 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1646 if (!err && aligned_len > 4)
1647 err = eeprom_rd_phys(adapter,
1648 aligned_offset + aligned_len - 4,
1649 (u32 *)&buf[aligned_len - 4]);
1650 if (err)
1651 goto out;
1652 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1653 } else
1654 buf = data;
1655
1656 err = t4_seeprom_wp(adapter, false);
1657 if (err)
1658 goto out;
1659
1660 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1661 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1662 aligned_offset += 4;
1663 }
1664
1665 if (!err)
1666 err = t4_seeprom_wp(adapter, true);
1667out:
1668 if (buf != data)
1669 kfree(buf);
1670 return err;
1671}
1672
1673static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1674{
1675 int ret;
1676 const struct firmware *fw;
1677 struct adapter *adap = netdev2adap(netdev);
1678
1679 ef->data[sizeof(ef->data) - 1] = '\0';
1680 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1681 if (ret < 0)
1682 return ret;
1683
1684 ret = t4_load_fw(adap, fw->data, fw->size);
1685 release_firmware(fw);
1686 if (!ret)
1687 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1688 return ret;
1689}
1690
1691#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1692#define BCAST_CRC 0xa0ccc1a6
1693
1694static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1695{
1696 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1697 wol->wolopts = netdev2adap(dev)->wol;
1698 memset(&wol->sopass, 0, sizeof(wol->sopass));
1699}
1700
1701static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1702{
1703 int err = 0;
1704 struct port_info *pi = netdev_priv(dev);
1705
1706 if (wol->wolopts & ~WOL_SUPPORTED)
1707 return -EINVAL;
1708 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1709 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1710 if (wol->wolopts & WAKE_BCAST) {
1711 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1712 ~0ULL, 0, false);
1713 if (!err)
1714 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1715 ~6ULL, ~0ULL, BCAST_CRC, true);
1716 } else
1717 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1718 return err;
1719}
1720
1721static int set_tso(struct net_device *dev, u32 value)
1722{
1723 if (value)
1724 dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1725 else
1726 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1727 return 0;
1728}
1729
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001730static int set_flags(struct net_device *dev, u32 flags)
1731{
1732 if (flags & ~ETH_FLAG_RXHASH)
1733 return -EOPNOTSUPP;
1734
1735 if (flags & ETH_FLAG_RXHASH)
1736 dev->features |= NETIF_F_RXHASH;
1737 else
1738 dev->features &= ~NETIF_F_RXHASH;
1739 return 0;
1740}
1741
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001742static struct ethtool_ops cxgb_ethtool_ops = {
1743 .get_settings = get_settings,
1744 .set_settings = set_settings,
1745 .get_drvinfo = get_drvinfo,
1746 .get_msglevel = get_msglevel,
1747 .set_msglevel = set_msglevel,
1748 .get_ringparam = get_sge_param,
1749 .set_ringparam = set_sge_param,
1750 .get_coalesce = get_coalesce,
1751 .set_coalesce = set_coalesce,
1752 .get_eeprom_len = get_eeprom_len,
1753 .get_eeprom = get_eeprom,
1754 .set_eeprom = set_eeprom,
1755 .get_pauseparam = get_pauseparam,
1756 .set_pauseparam = set_pauseparam,
1757 .get_rx_csum = get_rx_csum,
1758 .set_rx_csum = set_rx_csum,
1759 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1760 .set_sg = ethtool_op_set_sg,
1761 .get_link = ethtool_op_get_link,
1762 .get_strings = get_strings,
1763 .phys_id = identify_port,
1764 .nway_reset = restart_autoneg,
1765 .get_sset_count = get_sset_count,
1766 .get_ethtool_stats = get_stats,
1767 .get_regs_len = get_regs_len,
1768 .get_regs = get_regs,
1769 .get_wol = get_wol,
1770 .set_wol = set_wol,
1771 .set_tso = set_tso,
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001772 .set_flags = set_flags,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001773 .flash_device = set_flash,
1774};
1775
1776/*
1777 * debugfs support
1778 */
1779
1780static int mem_open(struct inode *inode, struct file *file)
1781{
1782 file->private_data = inode->i_private;
1783 return 0;
1784}
1785
1786static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1787 loff_t *ppos)
1788{
1789 loff_t pos = *ppos;
1790 loff_t avail = file->f_path.dentry->d_inode->i_size;
1791 unsigned int mem = (uintptr_t)file->private_data & 3;
1792 struct adapter *adap = file->private_data - mem;
1793
1794 if (pos < 0)
1795 return -EINVAL;
1796 if (pos >= avail)
1797 return 0;
1798 if (count > avail - pos)
1799 count = avail - pos;
1800
1801 while (count) {
1802 size_t len;
1803 int ret, ofst;
1804 __be32 data[16];
1805
1806 if (mem == MEM_MC)
1807 ret = t4_mc_read(adap, pos, data, NULL);
1808 else
1809 ret = t4_edc_read(adap, mem, pos, data, NULL);
1810 if (ret)
1811 return ret;
1812
1813 ofst = pos % sizeof(data);
1814 len = min(count, sizeof(data) - ofst);
1815 if (copy_to_user(buf, (u8 *)data + ofst, len))
1816 return -EFAULT;
1817
1818 buf += len;
1819 pos += len;
1820 count -= len;
1821 }
1822 count = pos - *ppos;
1823 *ppos = pos;
1824 return count;
1825}
1826
1827static const struct file_operations mem_debugfs_fops = {
1828 .owner = THIS_MODULE,
1829 .open = mem_open,
1830 .read = mem_read,
1831};
1832
1833static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
1834 unsigned int idx, unsigned int size_mb)
1835{
1836 struct dentry *de;
1837
1838 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
1839 (void *)adap + idx, &mem_debugfs_fops);
1840 if (de && de->d_inode)
1841 de->d_inode->i_size = size_mb << 20;
1842}
1843
1844static int __devinit setup_debugfs(struct adapter *adap)
1845{
1846 int i;
1847
1848 if (IS_ERR_OR_NULL(adap->debugfs_root))
1849 return -1;
1850
1851 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
1852 if (i & EDRAM0_ENABLE)
1853 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
1854 if (i & EDRAM1_ENABLE)
1855 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
1856 if (i & EXT_MEM_ENABLE)
1857 add_debugfs_mem(adap, "mc", MEM_MC,
1858 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
1859 if (adap->l2t)
1860 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
1861 &t4_l2t_fops);
1862 return 0;
1863}
1864
1865/*
1866 * upper-layer driver support
1867 */
1868
1869/*
1870 * Allocate an active-open TID and set it to the supplied value.
1871 */
1872int cxgb4_alloc_atid(struct tid_info *t, void *data)
1873{
1874 int atid = -1;
1875
1876 spin_lock_bh(&t->atid_lock);
1877 if (t->afree) {
1878 union aopen_entry *p = t->afree;
1879
1880 atid = p - t->atid_tab;
1881 t->afree = p->next;
1882 p->data = data;
1883 t->atids_in_use++;
1884 }
1885 spin_unlock_bh(&t->atid_lock);
1886 return atid;
1887}
1888EXPORT_SYMBOL(cxgb4_alloc_atid);
1889
1890/*
1891 * Release an active-open TID.
1892 */
1893void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1894{
1895 union aopen_entry *p = &t->atid_tab[atid];
1896
1897 spin_lock_bh(&t->atid_lock);
1898 p->next = t->afree;
1899 t->afree = p;
1900 t->atids_in_use--;
1901 spin_unlock_bh(&t->atid_lock);
1902}
1903EXPORT_SYMBOL(cxgb4_free_atid);
1904
1905/*
1906 * Allocate a server TID and set it to the supplied value.
1907 */
1908int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1909{
1910 int stid;
1911
1912 spin_lock_bh(&t->stid_lock);
1913 if (family == PF_INET) {
1914 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1915 if (stid < t->nstids)
1916 __set_bit(stid, t->stid_bmap);
1917 else
1918 stid = -1;
1919 } else {
1920 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1921 if (stid < 0)
1922 stid = -1;
1923 }
1924 if (stid >= 0) {
1925 t->stid_tab[stid].data = data;
1926 stid += t->stid_base;
1927 t->stids_in_use++;
1928 }
1929 spin_unlock_bh(&t->stid_lock);
1930 return stid;
1931}
1932EXPORT_SYMBOL(cxgb4_alloc_stid);
1933
1934/*
1935 * Release a server TID.
1936 */
1937void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1938{
1939 stid -= t->stid_base;
1940 spin_lock_bh(&t->stid_lock);
1941 if (family == PF_INET)
1942 __clear_bit(stid, t->stid_bmap);
1943 else
1944 bitmap_release_region(t->stid_bmap, stid, 2);
1945 t->stid_tab[stid].data = NULL;
1946 t->stids_in_use--;
1947 spin_unlock_bh(&t->stid_lock);
1948}
1949EXPORT_SYMBOL(cxgb4_free_stid);
1950
1951/*
1952 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1953 */
1954static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1955 unsigned int tid)
1956{
1957 struct cpl_tid_release *req;
1958
1959 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1960 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1961 INIT_TP_WR(req, tid);
1962 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1963}
1964
1965/*
1966 * Queue a TID release request and if necessary schedule a work queue to
1967 * process it.
1968 */
1969void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1970 unsigned int tid)
1971{
1972 void **p = &t->tid_tab[tid];
1973 struct adapter *adap = container_of(t, struct adapter, tids);
1974
1975 spin_lock_bh(&adap->tid_release_lock);
1976 *p = adap->tid_release_head;
1977 /* Low 2 bits encode the Tx channel number */
1978 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1979 if (!adap->tid_release_task_busy) {
1980 adap->tid_release_task_busy = true;
1981 schedule_work(&adap->tid_release_task);
1982 }
1983 spin_unlock_bh(&adap->tid_release_lock);
1984}
1985EXPORT_SYMBOL(cxgb4_queue_tid_release);
1986
1987/*
1988 * Process the list of pending TID release requests.
1989 */
1990static void process_tid_release_list(struct work_struct *work)
1991{
1992 struct sk_buff *skb;
1993 struct adapter *adap;
1994
1995 adap = container_of(work, struct adapter, tid_release_task);
1996
1997 spin_lock_bh(&adap->tid_release_lock);
1998 while (adap->tid_release_head) {
1999 void **p = adap->tid_release_head;
2000 unsigned int chan = (uintptr_t)p & 3;
2001 p = (void *)p - chan;
2002
2003 adap->tid_release_head = *p;
2004 *p = NULL;
2005 spin_unlock_bh(&adap->tid_release_lock);
2006
2007 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2008 GFP_KERNEL)))
2009 schedule_timeout_uninterruptible(1);
2010
2011 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2012 t4_ofld_send(adap, skb);
2013 spin_lock_bh(&adap->tid_release_lock);
2014 }
2015 adap->tid_release_task_busy = false;
2016 spin_unlock_bh(&adap->tid_release_lock);
2017}
2018
2019/*
2020 * Release a TID and inform HW. If we are unable to allocate the release
2021 * message we defer to a work queue.
2022 */
2023void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2024{
2025 void *old;
2026 struct sk_buff *skb;
2027 struct adapter *adap = container_of(t, struct adapter, tids);
2028
2029 old = t->tid_tab[tid];
2030 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2031 if (likely(skb)) {
2032 t->tid_tab[tid] = NULL;
2033 mk_tid_release(skb, chan, tid);
2034 t4_ofld_send(adap, skb);
2035 } else
2036 cxgb4_queue_tid_release(t, chan, tid);
2037 if (old)
2038 atomic_dec(&t->tids_in_use);
2039}
2040EXPORT_SYMBOL(cxgb4_remove_tid);
2041
2042/*
2043 * Allocate and initialize the TID tables. Returns 0 on success.
2044 */
2045static int tid_init(struct tid_info *t)
2046{
2047 size_t size;
2048 unsigned int natids = t->natids;
2049
2050 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2051 t->nstids * sizeof(*t->stid_tab) +
2052 BITS_TO_LONGS(t->nstids) * sizeof(long);
2053 t->tid_tab = t4_alloc_mem(size);
2054 if (!t->tid_tab)
2055 return -ENOMEM;
2056
2057 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2058 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2059 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2060 spin_lock_init(&t->stid_lock);
2061 spin_lock_init(&t->atid_lock);
2062
2063 t->stids_in_use = 0;
2064 t->afree = NULL;
2065 t->atids_in_use = 0;
2066 atomic_set(&t->tids_in_use, 0);
2067
2068 /* Setup the free list for atid_tab and clear the stid bitmap. */
2069 if (natids) {
2070 while (--natids)
2071 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2072 t->afree = t->atid_tab;
2073 }
2074 bitmap_zero(t->stid_bmap, t->nstids);
2075 return 0;
2076}
2077
2078/**
2079 * cxgb4_create_server - create an IP server
2080 * @dev: the device
2081 * @stid: the server TID
2082 * @sip: local IP address to bind server to
2083 * @sport: the server's TCP port
2084 * @queue: queue to direct messages from this server to
2085 *
2086 * Create an IP server for the given port and address.
2087 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2088 */
2089int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2090 __be32 sip, __be16 sport, unsigned int queue)
2091{
2092 unsigned int chan;
2093 struct sk_buff *skb;
2094 struct adapter *adap;
2095 struct cpl_pass_open_req *req;
2096
2097 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2098 if (!skb)
2099 return -ENOMEM;
2100
2101 adap = netdev2adap(dev);
2102 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2103 INIT_TP_WR(req, 0);
2104 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2105 req->local_port = sport;
2106 req->peer_port = htons(0);
2107 req->local_ip = sip;
2108 req->peer_ip = htonl(0);
2109 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2110 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2111 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2112 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2113 return t4_mgmt_tx(adap, skb);
2114}
2115EXPORT_SYMBOL(cxgb4_create_server);
2116
2117/**
2118 * cxgb4_create_server6 - create an IPv6 server
2119 * @dev: the device
2120 * @stid: the server TID
2121 * @sip: local IPv6 address to bind server to
2122 * @sport: the server's TCP port
2123 * @queue: queue to direct messages from this server to
2124 *
2125 * Create an IPv6 server for the given port and address.
2126 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2127 */
2128int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2129 const struct in6_addr *sip, __be16 sport,
2130 unsigned int queue)
2131{
2132 unsigned int chan;
2133 struct sk_buff *skb;
2134 struct adapter *adap;
2135 struct cpl_pass_open_req6 *req;
2136
2137 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2138 if (!skb)
2139 return -ENOMEM;
2140
2141 adap = netdev2adap(dev);
2142 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2143 INIT_TP_WR(req, 0);
2144 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2145 req->local_port = sport;
2146 req->peer_port = htons(0);
2147 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2148 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2149 req->peer_ip_hi = cpu_to_be64(0);
2150 req->peer_ip_lo = cpu_to_be64(0);
2151 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2152 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2153 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2154 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2155 return t4_mgmt_tx(adap, skb);
2156}
2157EXPORT_SYMBOL(cxgb4_create_server6);
2158
2159/**
2160 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2161 * @mtus: the HW MTU table
2162 * @mtu: the target MTU
2163 * @idx: index of selected entry in the MTU table
2164 *
2165 * Returns the index and the value in the HW MTU table that is closest to
2166 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2167 * table, in which case that smallest available value is selected.
2168 */
2169unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2170 unsigned int *idx)
2171{
2172 unsigned int i = 0;
2173
2174 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2175 ++i;
2176 if (idx)
2177 *idx = i;
2178 return mtus[i];
2179}
2180EXPORT_SYMBOL(cxgb4_best_mtu);
2181
2182/**
2183 * cxgb4_port_chan - get the HW channel of a port
2184 * @dev: the net device for the port
2185 *
2186 * Return the HW Tx channel of the given port.
2187 */
2188unsigned int cxgb4_port_chan(const struct net_device *dev)
2189{
2190 return netdev2pinfo(dev)->tx_chan;
2191}
2192EXPORT_SYMBOL(cxgb4_port_chan);
2193
2194/**
2195 * cxgb4_port_viid - get the VI id of a port
2196 * @dev: the net device for the port
2197 *
2198 * Return the VI id of the given port.
2199 */
2200unsigned int cxgb4_port_viid(const struct net_device *dev)
2201{
2202 return netdev2pinfo(dev)->viid;
2203}
2204EXPORT_SYMBOL(cxgb4_port_viid);
2205
2206/**
2207 * cxgb4_port_idx - get the index of a port
2208 * @dev: the net device for the port
2209 *
2210 * Return the index of the given port.
2211 */
2212unsigned int cxgb4_port_idx(const struct net_device *dev)
2213{
2214 return netdev2pinfo(dev)->port_id;
2215}
2216EXPORT_SYMBOL(cxgb4_port_idx);
2217
2218/**
2219 * cxgb4_netdev_by_hwid - return the net device of a HW port
2220 * @pdev: identifies the adapter
2221 * @id: the HW port id
2222 *
2223 * Return the net device associated with the interface with the given HW
2224 * id.
2225 */
2226struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2227{
2228 const struct adapter *adap = pci_get_drvdata(pdev);
2229
2230 if (!adap || id >= NCHAN)
2231 return NULL;
2232 id = adap->chan_map[id];
2233 return id < MAX_NPORTS ? adap->port[id] : NULL;
2234}
2235EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2236
2237void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2238 struct tp_tcp_stats *v6)
2239{
2240 struct adapter *adap = pci_get_drvdata(pdev);
2241
2242 spin_lock(&adap->stats_lock);
2243 t4_tp_get_tcp_stats(adap, v4, v6);
2244 spin_unlock(&adap->stats_lock);
2245}
2246EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2247
2248void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2249 const unsigned int *pgsz_order)
2250{
2251 struct adapter *adap = netdev2adap(dev);
2252
2253 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2254 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2255 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2256 HPZ3(pgsz_order[3]));
2257}
2258EXPORT_SYMBOL(cxgb4_iscsi_init);
2259
2260static struct pci_driver cxgb4_driver;
2261
2262static void check_neigh_update(struct neighbour *neigh)
2263{
2264 const struct device *parent;
2265 const struct net_device *netdev = neigh->dev;
2266
2267 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2268 netdev = vlan_dev_real_dev(netdev);
2269 parent = netdev->dev.parent;
2270 if (parent && parent->driver == &cxgb4_driver.driver)
2271 t4_l2t_update(dev_get_drvdata(parent), neigh);
2272}
2273
2274static int netevent_cb(struct notifier_block *nb, unsigned long event,
2275 void *data)
2276{
2277 switch (event) {
2278 case NETEVENT_NEIGH_UPDATE:
2279 check_neigh_update(data);
2280 break;
2281 case NETEVENT_PMTU_UPDATE:
2282 case NETEVENT_REDIRECT:
2283 default:
2284 break;
2285 }
2286 return 0;
2287}
2288
2289static bool netevent_registered;
2290static struct notifier_block cxgb4_netevent_nb = {
2291 .notifier_call = netevent_cb
2292};
2293
2294static void uld_attach(struct adapter *adap, unsigned int uld)
2295{
2296 void *handle;
2297 struct cxgb4_lld_info lli;
2298
2299 lli.pdev = adap->pdev;
2300 lli.l2t = adap->l2t;
2301 lli.tids = &adap->tids;
2302 lli.ports = adap->port;
2303 lli.vr = &adap->vres;
2304 lli.mtus = adap->params.mtus;
2305 if (uld == CXGB4_ULD_RDMA) {
2306 lli.rxq_ids = adap->sge.rdma_rxq;
2307 lli.nrxq = adap->sge.rdmaqs;
2308 } else if (uld == CXGB4_ULD_ISCSI) {
2309 lli.rxq_ids = adap->sge.ofld_rxq;
2310 lli.nrxq = adap->sge.ofldqsets;
2311 }
2312 lli.ntxq = adap->sge.ofldqsets;
2313 lli.nchan = adap->params.nports;
2314 lli.nports = adap->params.nports;
2315 lli.wr_cred = adap->params.ofldq_wr_cred;
2316 lli.adapter_type = adap->params.rev;
2317 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2318 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2319 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
2320 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2321 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
2322 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2323 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2324 lli.fw_vers = adap->params.fw_vers;
2325
2326 handle = ulds[uld].add(&lli);
2327 if (IS_ERR(handle)) {
2328 dev_warn(adap->pdev_dev,
2329 "could not attach to the %s driver, error %ld\n",
2330 uld_str[uld], PTR_ERR(handle));
2331 return;
2332 }
2333
2334 adap->uld_handle[uld] = handle;
2335
2336 if (!netevent_registered) {
2337 register_netevent_notifier(&cxgb4_netevent_nb);
2338 netevent_registered = true;
2339 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00002340
2341 if (adap->flags & FULL_INIT_DONE)
2342 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002343}
2344
2345static void attach_ulds(struct adapter *adap)
2346{
2347 unsigned int i;
2348
2349 mutex_lock(&uld_mutex);
2350 list_add_tail(&adap->list_node, &adapter_list);
2351 for (i = 0; i < CXGB4_ULD_MAX; i++)
2352 if (ulds[i].add)
2353 uld_attach(adap, i);
2354 mutex_unlock(&uld_mutex);
2355}
2356
2357static void detach_ulds(struct adapter *adap)
2358{
2359 unsigned int i;
2360
2361 mutex_lock(&uld_mutex);
2362 list_del(&adap->list_node);
2363 for (i = 0; i < CXGB4_ULD_MAX; i++)
2364 if (adap->uld_handle[i]) {
2365 ulds[i].state_change(adap->uld_handle[i],
2366 CXGB4_STATE_DETACH);
2367 adap->uld_handle[i] = NULL;
2368 }
2369 if (netevent_registered && list_empty(&adapter_list)) {
2370 unregister_netevent_notifier(&cxgb4_netevent_nb);
2371 netevent_registered = false;
2372 }
2373 mutex_unlock(&uld_mutex);
2374}
2375
2376static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2377{
2378 unsigned int i;
2379
2380 mutex_lock(&uld_mutex);
2381 for (i = 0; i < CXGB4_ULD_MAX; i++)
2382 if (adap->uld_handle[i])
2383 ulds[i].state_change(adap->uld_handle[i], new_state);
2384 mutex_unlock(&uld_mutex);
2385}
2386
2387/**
2388 * cxgb4_register_uld - register an upper-layer driver
2389 * @type: the ULD type
2390 * @p: the ULD methods
2391 *
2392 * Registers an upper-layer driver with this driver and notifies the ULD
2393 * about any presently available devices that support its type. Returns
2394 * %-EBUSY if a ULD of the same type is already registered.
2395 */
2396int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2397{
2398 int ret = 0;
2399 struct adapter *adap;
2400
2401 if (type >= CXGB4_ULD_MAX)
2402 return -EINVAL;
2403 mutex_lock(&uld_mutex);
2404 if (ulds[type].add) {
2405 ret = -EBUSY;
2406 goto out;
2407 }
2408 ulds[type] = *p;
2409 list_for_each_entry(adap, &adapter_list, list_node)
2410 uld_attach(adap, type);
2411out: mutex_unlock(&uld_mutex);
2412 return ret;
2413}
2414EXPORT_SYMBOL(cxgb4_register_uld);
2415
2416/**
2417 * cxgb4_unregister_uld - unregister an upper-layer driver
2418 * @type: the ULD type
2419 *
2420 * Unregisters an existing upper-layer driver.
2421 */
2422int cxgb4_unregister_uld(enum cxgb4_uld type)
2423{
2424 struct adapter *adap;
2425
2426 if (type >= CXGB4_ULD_MAX)
2427 return -EINVAL;
2428 mutex_lock(&uld_mutex);
2429 list_for_each_entry(adap, &adapter_list, list_node)
2430 adap->uld_handle[type] = NULL;
2431 ulds[type].add = NULL;
2432 mutex_unlock(&uld_mutex);
2433 return 0;
2434}
2435EXPORT_SYMBOL(cxgb4_unregister_uld);
2436
2437/**
2438 * cxgb_up - enable the adapter
2439 * @adap: adapter being enabled
2440 *
2441 * Called when the first port is enabled, this function performs the
2442 * actions necessary to make an adapter operational, such as completing
2443 * the initialization of HW modules, and enabling interrupts.
2444 *
2445 * Must be called with the rtnl lock held.
2446 */
2447static int cxgb_up(struct adapter *adap)
2448{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002449 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002450
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002451 err = setup_sge_queues(adap);
2452 if (err)
2453 goto out;
2454 err = setup_rss(adap);
2455 if (err)
2456 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002457
2458 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002459 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002460 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2461 adap->msix_info[0].desc, adap);
2462 if (err)
2463 goto irq_err;
2464
2465 err = request_msix_queue_irqs(adap);
2466 if (err) {
2467 free_irq(adap->msix_info[0].vec, adap);
2468 goto irq_err;
2469 }
2470 } else {
2471 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2472 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2473 adap->name, adap);
2474 if (err)
2475 goto irq_err;
2476 }
2477 enable_rx(adap);
2478 t4_sge_start(adap);
2479 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002480 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002481 notify_ulds(adap, CXGB4_STATE_UP);
2482 out:
2483 return err;
2484 irq_err:
2485 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002486 freeq:
2487 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002488 goto out;
2489}
2490
2491static void cxgb_down(struct adapter *adapter)
2492{
2493 t4_intr_disable(adapter);
2494 cancel_work_sync(&adapter->tid_release_task);
2495 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002496 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002497
2498 if (adapter->flags & USING_MSIX) {
2499 free_msix_queue_irqs(adapter);
2500 free_irq(adapter->msix_info[0].vec, adapter);
2501 } else
2502 free_irq(adapter->pdev->irq, adapter);
2503 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002504 t4_sge_stop(adapter);
2505 t4_free_sge_resources(adapter);
2506 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002507}
2508
2509/*
2510 * net_device operations
2511 */
2512static int cxgb_open(struct net_device *dev)
2513{
2514 int err;
2515 struct port_info *pi = netdev_priv(dev);
2516 struct adapter *adapter = pi->adapter;
2517
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002518 if (!(adapter->flags & FULL_INIT_DONE)) {
2519 err = cxgb_up(adapter);
2520 if (err < 0)
2521 return err;
2522 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002523
2524 dev->real_num_tx_queues = pi->nqsets;
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00002525 err = link_start(dev);
2526 if (!err)
2527 netif_tx_start_all_queues(dev);
2528 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002529}
2530
2531static int cxgb_close(struct net_device *dev)
2532{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002533 struct port_info *pi = netdev_priv(dev);
2534 struct adapter *adapter = pi->adapter;
2535
2536 netif_tx_stop_all_queues(dev);
2537 netif_carrier_off(dev);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002538 return t4_enable_vi(adapter, 0, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002539}
2540
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002541static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002542{
2543 struct port_stats stats;
2544 struct port_info *p = netdev_priv(dev);
2545 struct adapter *adapter = p->adapter;
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002546 struct rtnl_link_stats64 *ns = &dev->stats64;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002547
2548 spin_lock(&adapter->stats_lock);
2549 t4_get_port_stats(adapter, p->tx_chan, &stats);
2550 spin_unlock(&adapter->stats_lock);
2551
2552 ns->tx_bytes = stats.tx_octets;
2553 ns->tx_packets = stats.tx_frames;
2554 ns->rx_bytes = stats.rx_octets;
2555 ns->rx_packets = stats.rx_frames;
2556 ns->multicast = stats.rx_mcast_frames;
2557
2558 /* detailed rx_errors */
2559 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2560 stats.rx_runt;
2561 ns->rx_over_errors = 0;
2562 ns->rx_crc_errors = stats.rx_fcs_err;
2563 ns->rx_frame_errors = stats.rx_symbol_err;
2564 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2565 stats.rx_ovflow2 + stats.rx_ovflow3 +
2566 stats.rx_trunc0 + stats.rx_trunc1 +
2567 stats.rx_trunc2 + stats.rx_trunc3;
2568 ns->rx_missed_errors = 0;
2569
2570 /* detailed tx_errors */
2571 ns->tx_aborted_errors = 0;
2572 ns->tx_carrier_errors = 0;
2573 ns->tx_fifo_errors = 0;
2574 ns->tx_heartbeat_errors = 0;
2575 ns->tx_window_errors = 0;
2576
2577 ns->tx_errors = stats.tx_error_frames;
2578 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2579 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2580 return ns;
2581}
2582
2583static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2584{
2585 int ret = 0, prtad, devad;
2586 struct port_info *pi = netdev_priv(dev);
2587 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2588
2589 switch (cmd) {
2590 case SIOCGMIIPHY:
2591 if (pi->mdio_addr < 0)
2592 return -EOPNOTSUPP;
2593 data->phy_id = pi->mdio_addr;
2594 break;
2595 case SIOCGMIIREG:
2596 case SIOCSMIIREG:
2597 if (mdio_phy_id_is_c45(data->phy_id)) {
2598 prtad = mdio_phy_id_prtad(data->phy_id);
2599 devad = mdio_phy_id_devad(data->phy_id);
2600 } else if (data->phy_id < 32) {
2601 prtad = data->phy_id;
2602 devad = 0;
2603 data->reg_num &= 0x1f;
2604 } else
2605 return -EINVAL;
2606
2607 if (cmd == SIOCGMIIREG)
2608 ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
2609 data->reg_num, &data->val_out);
2610 else
2611 ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
2612 data->reg_num, data->val_in);
2613 break;
2614 default:
2615 return -EOPNOTSUPP;
2616 }
2617 return ret;
2618}
2619
2620static void cxgb_set_rxmode(struct net_device *dev)
2621{
2622 /* unfortunately we can't return errors to the stack */
2623 set_rxmode(dev, -1, false);
2624}
2625
2626static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2627{
2628 int ret;
2629 struct port_info *pi = netdev_priv(dev);
2630
2631 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2632 return -EINVAL;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002633 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002634 true);
2635 if (!ret)
2636 dev->mtu = new_mtu;
2637 return ret;
2638}
2639
2640static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2641{
2642 int ret;
2643 struct sockaddr *addr = p;
2644 struct port_info *pi = netdev_priv(dev);
2645
2646 if (!is_valid_ether_addr(addr->sa_data))
2647 return -EINVAL;
2648
2649 ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
2650 addr->sa_data, true, true);
2651 if (ret < 0)
2652 return ret;
2653
2654 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2655 pi->xact_addr_filt = ret;
2656 return 0;
2657}
2658
2659static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2660{
2661 struct port_info *pi = netdev_priv(dev);
2662
2663 pi->vlan_grp = grp;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002664 t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
2665 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002666}
2667
2668#ifdef CONFIG_NET_POLL_CONTROLLER
2669static void cxgb_netpoll(struct net_device *dev)
2670{
2671 struct port_info *pi = netdev_priv(dev);
2672 struct adapter *adap = pi->adapter;
2673
2674 if (adap->flags & USING_MSIX) {
2675 int i;
2676 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2677
2678 for (i = pi->nqsets; i; i--, rx++)
2679 t4_sge_intr_msix(0, &rx->rspq);
2680 } else
2681 t4_intr_handler(adap)(0, adap);
2682}
2683#endif
2684
2685static const struct net_device_ops cxgb4_netdev_ops = {
2686 .ndo_open = cxgb_open,
2687 .ndo_stop = cxgb_close,
2688 .ndo_start_xmit = t4_eth_xmit,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002689 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002690 .ndo_set_rx_mode = cxgb_set_rxmode,
2691 .ndo_set_mac_address = cxgb_set_mac_addr,
2692 .ndo_validate_addr = eth_validate_addr,
2693 .ndo_do_ioctl = cxgb_ioctl,
2694 .ndo_change_mtu = cxgb_change_mtu,
2695 .ndo_vlan_rx_register = vlan_rx_register,
2696#ifdef CONFIG_NET_POLL_CONTROLLER
2697 .ndo_poll_controller = cxgb_netpoll,
2698#endif
2699};
2700
2701void t4_fatal_err(struct adapter *adap)
2702{
2703 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2704 t4_intr_disable(adap);
2705 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2706}
2707
2708static void setup_memwin(struct adapter *adap)
2709{
2710 u32 bar0;
2711
2712 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2713 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2714 (bar0 + MEMWIN0_BASE) | BIR(0) |
2715 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2716 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2717 (bar0 + MEMWIN1_BASE) | BIR(0) |
2718 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2719 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2720 (bar0 + MEMWIN2_BASE) | BIR(0) |
2721 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2722}
2723
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002724static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2725{
2726 u32 v;
2727 int ret;
2728
2729 /* get device capabilities */
2730 memset(c, 0, sizeof(*c));
2731 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2732 FW_CMD_REQUEST | FW_CMD_READ);
2733 c->retval_len16 = htonl(FW_LEN16(*c));
2734 ret = t4_wr_mbox(adap, 0, c, sizeof(*c), c);
2735 if (ret < 0)
2736 return ret;
2737
2738 /* select capabilities we'll be using */
2739 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2740 if (!vf_acls)
2741 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2742 else
2743 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2744 } else if (vf_acls) {
2745 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2746 return ret;
2747 }
2748 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2749 FW_CMD_REQUEST | FW_CMD_WRITE);
2750 ret = t4_wr_mbox(adap, 0, c, sizeof(*c), NULL);
2751 if (ret < 0)
2752 return ret;
2753
2754 ret = t4_config_glbl_rss(adap, 0,
2755 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2756 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2757 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2758 if (ret < 0)
2759 return ret;
2760
2761 ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
2762 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2763 if (ret < 0)
2764 return ret;
2765
2766 t4_sge_init(adap);
2767
2768 /* get basic stuff going */
2769 ret = t4_early_init(adap, 0);
2770 if (ret < 0)
2771 return ret;
2772
2773 /* tweak some settings */
2774 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2775 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2776 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2777 v = t4_read_reg(adap, TP_PIO_DATA);
2778 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2779 setup_memwin(adap);
2780 return 0;
2781}
2782
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002783/*
2784 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2785 */
2786#define MAX_ATIDS 8192U
2787
2788/*
2789 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2790 */
2791static int adap_init0(struct adapter *adap)
2792{
2793 int ret;
2794 u32 v, port_vec;
2795 enum dev_state state;
2796 u32 params[7], val[7];
2797 struct fw_caps_config_cmd c;
2798
2799 ret = t4_check_fw_version(adap);
2800 if (ret == -EINVAL || ret > 0) {
2801 if (upgrade_fw(adap) >= 0) /* recache FW version */
2802 ret = t4_check_fw_version(adap);
2803 }
2804 if (ret < 0)
2805 return ret;
2806
2807 /* contact FW, request master */
2808 ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
2809 if (ret < 0) {
2810 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2811 ret);
2812 return ret;
2813 }
2814
2815 /* reset device */
2816 ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
2817 if (ret < 0)
2818 goto bye;
2819
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002820 for (v = 0; v < SGE_NTIMERS - 1; v++)
2821 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2822 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2823 adap->sge.counter_val[0] = 1;
2824 for (v = 1; v < SGE_NCOUNTERS; v++)
2825 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2826 THRESHOLD_3_MASK);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002827#define FW_PARAM_DEV(param) \
2828 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2829 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2830
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002831 params[0] = FW_PARAM_DEV(CCLK);
2832 ret = t4_query_params(adap, 0, 0, 0, 1, params, val);
2833 if (ret < 0)
2834 goto bye;
2835 adap->params.vpd.cclk = val[0];
2836
2837 ret = adap_init1(adap, &c);
2838 if (ret < 0)
2839 goto bye;
2840
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002841#define FW_PARAM_PFVF(param) \
2842 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2843 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2844
2845 params[0] = FW_PARAM_DEV(PORTVEC);
2846 params[1] = FW_PARAM_PFVF(L2T_START);
2847 params[2] = FW_PARAM_PFVF(L2T_END);
2848 params[3] = FW_PARAM_PFVF(FILTER_START);
2849 params[4] = FW_PARAM_PFVF(FILTER_END);
2850 ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
2851 if (ret < 0)
2852 goto bye;
2853 port_vec = val[0];
2854 adap->tids.ftid_base = val[3];
2855 adap->tids.nftids = val[4] - val[3] + 1;
2856
2857 if (c.ofldcaps) {
2858 /* query offload-related parameters */
2859 params[0] = FW_PARAM_DEV(NTID);
2860 params[1] = FW_PARAM_PFVF(SERVER_START);
2861 params[2] = FW_PARAM_PFVF(SERVER_END);
2862 params[3] = FW_PARAM_PFVF(TDDP_START);
2863 params[4] = FW_PARAM_PFVF(TDDP_END);
2864 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2865 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2866 if (ret < 0)
2867 goto bye;
2868 adap->tids.ntids = val[0];
2869 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
2870 adap->tids.stid_base = val[1];
2871 adap->tids.nstids = val[2] - val[1] + 1;
2872 adap->vres.ddp.start = val[3];
2873 adap->vres.ddp.size = val[4] - val[3] + 1;
2874 adap->params.ofldq_wr_cred = val[5];
2875 adap->params.offload = 1;
2876 }
2877 if (c.rdmacaps) {
2878 params[0] = FW_PARAM_PFVF(STAG_START);
2879 params[1] = FW_PARAM_PFVF(STAG_END);
2880 params[2] = FW_PARAM_PFVF(RQ_START);
2881 params[3] = FW_PARAM_PFVF(RQ_END);
2882 params[4] = FW_PARAM_PFVF(PBL_START);
2883 params[5] = FW_PARAM_PFVF(PBL_END);
2884 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2885 if (ret < 0)
2886 goto bye;
2887 adap->vres.stag.start = val[0];
2888 adap->vres.stag.size = val[1] - val[0] + 1;
2889 adap->vres.rq.start = val[2];
2890 adap->vres.rq.size = val[3] - val[2] + 1;
2891 adap->vres.pbl.start = val[4];
2892 adap->vres.pbl.size = val[5] - val[4] + 1;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002893
2894 params[0] = FW_PARAM_PFVF(SQRQ_START);
2895 params[1] = FW_PARAM_PFVF(SQRQ_END);
2896 params[2] = FW_PARAM_PFVF(CQ_START);
2897 params[3] = FW_PARAM_PFVF(CQ_END);
2898 ret = t4_query_params(adap, 0, 0, 0, 4, params, val);
2899 if (ret < 0)
2900 goto bye;
2901 adap->vres.qp.start = val[0];
2902 adap->vres.qp.size = val[1] - val[0] + 1;
2903 adap->vres.cq.start = val[2];
2904 adap->vres.cq.size = val[3] - val[2] + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002905 }
2906 if (c.iscsicaps) {
2907 params[0] = FW_PARAM_PFVF(ISCSI_START);
2908 params[1] = FW_PARAM_PFVF(ISCSI_END);
2909 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
2910 if (ret < 0)
2911 goto bye;
2912 adap->vres.iscsi.start = val[0];
2913 adap->vres.iscsi.size = val[1] - val[0] + 1;
2914 }
2915#undef FW_PARAM_PFVF
2916#undef FW_PARAM_DEV
2917
2918 adap->params.nports = hweight32(port_vec);
2919 adap->params.portvec = port_vec;
2920 adap->flags |= FW_OK;
2921
2922 /* These are finalized by FW initialization, load their values now */
2923 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
2924 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
2925 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
2926 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
2927 adap->params.b_wnd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002928 return 0;
2929
2930 /*
2931 * If a command timed out or failed with EIO FW does not operate within
2932 * its spec or something catastrophic happened to HW/FW, stop issuing
2933 * commands.
2934 */
2935bye: if (ret != -ETIMEDOUT && ret != -EIO)
2936 t4_fw_bye(adap, 0);
2937 return ret;
2938}
2939
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002940/* EEH callbacks */
2941
2942static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
2943 pci_channel_state_t state)
2944{
2945 int i;
2946 struct adapter *adap = pci_get_drvdata(pdev);
2947
2948 if (!adap)
2949 goto out;
2950
2951 rtnl_lock();
2952 adap->flags &= ~FW_OK;
2953 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
2954 for_each_port(adap, i) {
2955 struct net_device *dev = adap->port[i];
2956
2957 netif_device_detach(dev);
2958 netif_carrier_off(dev);
2959 }
2960 if (adap->flags & FULL_INIT_DONE)
2961 cxgb_down(adap);
2962 rtnl_unlock();
2963 pci_disable_device(pdev);
2964out: return state == pci_channel_io_perm_failure ?
2965 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
2966}
2967
2968static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
2969{
2970 int i, ret;
2971 struct fw_caps_config_cmd c;
2972 struct adapter *adap = pci_get_drvdata(pdev);
2973
2974 if (!adap) {
2975 pci_restore_state(pdev);
2976 pci_save_state(pdev);
2977 return PCI_ERS_RESULT_RECOVERED;
2978 }
2979
2980 if (pci_enable_device(pdev)) {
2981 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
2982 return PCI_ERS_RESULT_DISCONNECT;
2983 }
2984
2985 pci_set_master(pdev);
2986 pci_restore_state(pdev);
2987 pci_save_state(pdev);
2988 pci_cleanup_aer_uncorrect_error_status(pdev);
2989
2990 if (t4_wait_dev_ready(adap) < 0)
2991 return PCI_ERS_RESULT_DISCONNECT;
2992 if (t4_fw_hello(adap, 0, 0, MASTER_MUST, NULL))
2993 return PCI_ERS_RESULT_DISCONNECT;
2994 adap->flags |= FW_OK;
2995 if (adap_init1(adap, &c))
2996 return PCI_ERS_RESULT_DISCONNECT;
2997
2998 for_each_port(adap, i) {
2999 struct port_info *p = adap2pinfo(adap, i);
3000
3001 ret = t4_alloc_vi(adap, 0, p->tx_chan, 0, 0, 1, NULL, NULL);
3002 if (ret < 0)
3003 return PCI_ERS_RESULT_DISCONNECT;
3004 p->viid = ret;
3005 p->xact_addr_filt = -1;
3006 }
3007
3008 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3009 adap->params.b_wnd);
3010 if (cxgb_up(adap))
3011 return PCI_ERS_RESULT_DISCONNECT;
3012 return PCI_ERS_RESULT_RECOVERED;
3013}
3014
3015static void eeh_resume(struct pci_dev *pdev)
3016{
3017 int i;
3018 struct adapter *adap = pci_get_drvdata(pdev);
3019
3020 if (!adap)
3021 return;
3022
3023 rtnl_lock();
3024 for_each_port(adap, i) {
3025 struct net_device *dev = adap->port[i];
3026
3027 if (netif_running(dev)) {
3028 link_start(dev);
3029 cxgb_set_rxmode(dev);
3030 }
3031 netif_device_attach(dev);
3032 }
3033 rtnl_unlock();
3034}
3035
3036static struct pci_error_handlers cxgb4_eeh = {
3037 .error_detected = eeh_err_detected,
3038 .slot_reset = eeh_slot_reset,
3039 .resume = eeh_resume,
3040};
3041
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003042static inline bool is_10g_port(const struct link_config *lc)
3043{
3044 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3045}
3046
3047static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3048 unsigned int size, unsigned int iqe_size)
3049{
3050 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3051 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3052 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3053 q->iqe_len = iqe_size;
3054 q->size = size;
3055}
3056
3057/*
3058 * Perform default configuration of DMA queues depending on the number and type
3059 * of ports we found and the number of available CPUs. Most settings can be
3060 * modified by the admin prior to actual use.
3061 */
3062static void __devinit cfg_queues(struct adapter *adap)
3063{
3064 struct sge *s = &adap->sge;
3065 int i, q10g = 0, n10g = 0, qidx = 0;
3066
3067 for_each_port(adap, i)
3068 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3069
3070 /*
3071 * We default to 1 queue per non-10G port and up to # of cores queues
3072 * per 10G port.
3073 */
3074 if (n10g)
3075 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3076 if (q10g > num_online_cpus())
3077 q10g = num_online_cpus();
3078
3079 for_each_port(adap, i) {
3080 struct port_info *pi = adap2pinfo(adap, i);
3081
3082 pi->first_qset = qidx;
3083 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3084 qidx += pi->nqsets;
3085 }
3086
3087 s->ethqsets = qidx;
3088 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3089
3090 if (is_offload(adap)) {
3091 /*
3092 * For offload we use 1 queue/channel if all ports are up to 1G,
3093 * otherwise we divide all available queues amongst the channels
3094 * capped by the number of available cores.
3095 */
3096 if (n10g) {
3097 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3098 num_online_cpus());
3099 s->ofldqsets = roundup(i, adap->params.nports);
3100 } else
3101 s->ofldqsets = adap->params.nports;
3102 /* For RDMA one Rx queue per channel suffices */
3103 s->rdmaqs = adap->params.nports;
3104 }
3105
3106 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3107 struct sge_eth_rxq *r = &s->ethrxq[i];
3108
3109 init_rspq(&r->rspq, 0, 0, 1024, 64);
3110 r->fl.size = 72;
3111 }
3112
3113 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3114 s->ethtxq[i].q.size = 1024;
3115
3116 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3117 s->ctrlq[i].q.size = 512;
3118
3119 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3120 s->ofldtxq[i].q.size = 1024;
3121
3122 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3123 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3124
3125 init_rspq(&r->rspq, 0, 0, 1024, 64);
3126 r->rspq.uld = CXGB4_ULD_ISCSI;
3127 r->fl.size = 72;
3128 }
3129
3130 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3131 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3132
3133 init_rspq(&r->rspq, 0, 0, 511, 64);
3134 r->rspq.uld = CXGB4_ULD_RDMA;
3135 r->fl.size = 72;
3136 }
3137
3138 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3139 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3140}
3141
3142/*
3143 * Reduce the number of Ethernet queues across all ports to at most n.
3144 * n provides at least one queue per port.
3145 */
3146static void __devinit reduce_ethqs(struct adapter *adap, int n)
3147{
3148 int i;
3149 struct port_info *pi;
3150
3151 while (n < adap->sge.ethqsets)
3152 for_each_port(adap, i) {
3153 pi = adap2pinfo(adap, i);
3154 if (pi->nqsets > 1) {
3155 pi->nqsets--;
3156 adap->sge.ethqsets--;
3157 if (adap->sge.ethqsets <= n)
3158 break;
3159 }
3160 }
3161
3162 n = 0;
3163 for_each_port(adap, i) {
3164 pi = adap2pinfo(adap, i);
3165 pi->first_qset = n;
3166 n += pi->nqsets;
3167 }
3168}
3169
3170/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3171#define EXTRA_VECS 2
3172
3173static int __devinit enable_msix(struct adapter *adap)
3174{
3175 int ofld_need = 0;
3176 int i, err, want, need;
3177 struct sge *s = &adap->sge;
3178 unsigned int nchan = adap->params.nports;
3179 struct msix_entry entries[MAX_INGQ + 1];
3180
3181 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3182 entries[i].entry = i;
3183
3184 want = s->max_ethqsets + EXTRA_VECS;
3185 if (is_offload(adap)) {
3186 want += s->rdmaqs + s->ofldqsets;
3187 /* need nchan for each possible ULD */
3188 ofld_need = 2 * nchan;
3189 }
3190 need = adap->params.nports + EXTRA_VECS + ofld_need;
3191
3192 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3193 want = err;
3194
3195 if (!err) {
3196 /*
3197 * Distribute available vectors to the various queue groups.
3198 * Every group gets its minimum requirement and NIC gets top
3199 * priority for leftovers.
3200 */
3201 i = want - EXTRA_VECS - ofld_need;
3202 if (i < s->max_ethqsets) {
3203 s->max_ethqsets = i;
3204 if (i < s->ethqsets)
3205 reduce_ethqs(adap, i);
3206 }
3207 if (is_offload(adap)) {
3208 i = want - EXTRA_VECS - s->max_ethqsets;
3209 i -= ofld_need - nchan;
3210 s->ofldqsets = (i / nchan) * nchan; /* round down */
3211 }
3212 for (i = 0; i < want; ++i)
3213 adap->msix_info[i].vec = entries[i].vector;
3214 } else if (err > 0)
3215 dev_info(adap->pdev_dev,
3216 "only %d MSI-X vectors left, not using MSI-X\n", err);
3217 return err;
3218}
3219
3220#undef EXTRA_VECS
3221
3222static void __devinit print_port_info(struct adapter *adap)
3223{
3224 static const char *base[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003225 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3226 "KX", "KR", "KR SFP+", "KR FEC"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003227 };
3228
3229 int i;
3230 char buf[80];
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003231 const char *spd = "";
3232
3233 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3234 spd = " 2.5 GT/s";
3235 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3236 spd = " 5 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003237
3238 for_each_port(adap, i) {
3239 struct net_device *dev = adap->port[i];
3240 const struct port_info *pi = netdev_priv(dev);
3241 char *bufp = buf;
3242
3243 if (!test_bit(i, &adap->registered_device_map))
3244 continue;
3245
3246 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3247 bufp += sprintf(bufp, "100/");
3248 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3249 bufp += sprintf(bufp, "1000/");
3250 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3251 bufp += sprintf(bufp, "10G/");
3252 if (bufp != buf)
3253 --bufp;
3254 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3255
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003256 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003257 adap->params.vpd.id, adap->params.rev,
3258 buf, is_offload(adap) ? "R" : "",
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003259 adap->params.pci.width, spd,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003260 (adap->flags & USING_MSIX) ? " MSI-X" :
3261 (adap->flags & USING_MSI) ? " MSI" : "");
3262 if (adap->name == dev->name)
3263 netdev_info(dev, "S/N: %s, E/C: %s\n",
3264 adap->params.vpd.sn, adap->params.vpd.ec);
3265 }
3266}
3267
3268#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
3269 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3270
3271static int __devinit init_one(struct pci_dev *pdev,
3272 const struct pci_device_id *ent)
3273{
3274 int func, i, err;
3275 struct port_info *pi;
3276 unsigned int highdma = 0;
3277 struct adapter *adapter = NULL;
3278
3279 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3280
3281 err = pci_request_regions(pdev, KBUILD_MODNAME);
3282 if (err) {
3283 /* Just info, some other driver may have claimed the device. */
3284 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3285 return err;
3286 }
3287
3288 /* We control everything through PF 0 */
3289 func = PCI_FUNC(pdev->devfn);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003290 if (func > 0) {
3291 pci_save_state(pdev); /* to restore SR-IOV later */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003292 goto sriov;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003293 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003294
3295 err = pci_enable_device(pdev);
3296 if (err) {
3297 dev_err(&pdev->dev, "cannot enable PCI device\n");
3298 goto out_release_regions;
3299 }
3300
3301 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3302 highdma = NETIF_F_HIGHDMA;
3303 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3304 if (err) {
3305 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3306 "coherent allocations\n");
3307 goto out_disable_device;
3308 }
3309 } else {
3310 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3311 if (err) {
3312 dev_err(&pdev->dev, "no usable DMA configuration\n");
3313 goto out_disable_device;
3314 }
3315 }
3316
3317 pci_enable_pcie_error_reporting(pdev);
3318 pci_set_master(pdev);
3319 pci_save_state(pdev);
3320
3321 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3322 if (!adapter) {
3323 err = -ENOMEM;
3324 goto out_disable_device;
3325 }
3326
3327 adapter->regs = pci_ioremap_bar(pdev, 0);
3328 if (!adapter->regs) {
3329 dev_err(&pdev->dev, "cannot map device registers\n");
3330 err = -ENOMEM;
3331 goto out_free_adapter;
3332 }
3333
3334 adapter->pdev = pdev;
3335 adapter->pdev_dev = &pdev->dev;
3336 adapter->name = pci_name(pdev);
3337 adapter->msg_enable = dflt_msg_enable;
3338 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3339
3340 spin_lock_init(&adapter->stats_lock);
3341 spin_lock_init(&adapter->tid_release_lock);
3342
3343 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3344
3345 err = t4_prep_adapter(adapter);
3346 if (err)
3347 goto out_unmap_bar;
3348 err = adap_init0(adapter);
3349 if (err)
3350 goto out_unmap_bar;
3351
3352 for_each_port(adapter, i) {
3353 struct net_device *netdev;
3354
3355 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3356 MAX_ETH_QSETS);
3357 if (!netdev) {
3358 err = -ENOMEM;
3359 goto out_free_dev;
3360 }
3361
3362 SET_NETDEV_DEV(netdev, &pdev->dev);
3363
3364 adapter->port[i] = netdev;
3365 pi = netdev_priv(netdev);
3366 pi->adapter = adapter;
3367 pi->xact_addr_filt = -1;
3368 pi->rx_offload = RX_CSO;
3369 pi->port_id = i;
3370 netif_carrier_off(netdev);
3371 netif_tx_stop_all_queues(netdev);
3372 netdev->irq = pdev->irq;
3373
3374 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3375 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07003376 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003377 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3378 netdev->vlan_features = netdev->features & VLAN_FEAT;
3379
3380 netdev->netdev_ops = &cxgb4_netdev_ops;
3381 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3382 }
3383
3384 pci_set_drvdata(pdev, adapter);
3385
3386 if (adapter->flags & FW_OK) {
3387 err = t4_port_init(adapter, 0, 0, 0);
3388 if (err)
3389 goto out_free_dev;
3390 }
3391
3392 /*
3393 * Configure queues and allocate tables now, they can be needed as
3394 * soon as the first register_netdev completes.
3395 */
3396 cfg_queues(adapter);
3397
3398 adapter->l2t = t4_init_l2t();
3399 if (!adapter->l2t) {
3400 /* We tolerate a lack of L2T, giving up some functionality */
3401 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3402 adapter->params.offload = 0;
3403 }
3404
3405 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3406 dev_warn(&pdev->dev, "could not allocate TID table, "
3407 "continuing\n");
3408 adapter->params.offload = 0;
3409 }
3410
3411 /*
3412 * The card is now ready to go. If any errors occur during device
3413 * registration we do not fail the whole card but rather proceed only
3414 * with the ports we manage to register successfully. However we must
3415 * register at least one net device.
3416 */
3417 for_each_port(adapter, i) {
3418 err = register_netdev(adapter->port[i]);
3419 if (err)
3420 dev_warn(&pdev->dev,
3421 "cannot register net device %s, skipping\n",
3422 adapter->port[i]->name);
3423 else {
3424 /*
3425 * Change the name we use for messages to the name of
3426 * the first successfully registered interface.
3427 */
3428 if (!adapter->registered_device_map)
3429 adapter->name = adapter->port[i]->name;
3430
3431 __set_bit(i, &adapter->registered_device_map);
3432 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3433 }
3434 }
3435 if (!adapter->registered_device_map) {
3436 dev_err(&pdev->dev, "could not register any net devices\n");
3437 goto out_free_dev;
3438 }
3439
3440 if (cxgb4_debugfs_root) {
3441 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3442 cxgb4_debugfs_root);
3443 setup_debugfs(adapter);
3444 }
3445
3446 /* See what interrupts we'll be using */
3447 if (msi > 1 && enable_msix(adapter) == 0)
3448 adapter->flags |= USING_MSIX;
3449 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3450 adapter->flags |= USING_MSI;
3451
3452 if (is_offload(adapter))
3453 attach_ulds(adapter);
3454
3455 print_port_info(adapter);
3456
3457sriov:
3458#ifdef CONFIG_PCI_IOV
3459 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3460 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3461 dev_info(&pdev->dev,
3462 "instantiated %u virtual functions\n",
3463 num_vf[func]);
3464#endif
3465 return 0;
3466
3467 out_free_dev:
3468 t4_free_mem(adapter->tids.tid_tab);
3469 t4_free_mem(adapter->l2t);
3470 for_each_port(adapter, i)
3471 if (adapter->port[i])
3472 free_netdev(adapter->port[i]);
3473 if (adapter->flags & FW_OK)
3474 t4_fw_bye(adapter, 0);
3475 out_unmap_bar:
3476 iounmap(adapter->regs);
3477 out_free_adapter:
3478 kfree(adapter);
3479 out_disable_device:
3480 pci_disable_pcie_error_reporting(pdev);
3481 pci_disable_device(pdev);
3482 out_release_regions:
3483 pci_release_regions(pdev);
3484 pci_set_drvdata(pdev, NULL);
3485 return err;
3486}
3487
3488static void __devexit remove_one(struct pci_dev *pdev)
3489{
3490 struct adapter *adapter = pci_get_drvdata(pdev);
3491
3492 pci_disable_sriov(pdev);
3493
3494 if (adapter) {
3495 int i;
3496
3497 if (is_offload(adapter))
3498 detach_ulds(adapter);
3499
3500 for_each_port(adapter, i)
3501 if (test_bit(i, &adapter->registered_device_map))
3502 unregister_netdev(adapter->port[i]);
3503
3504 if (adapter->debugfs_root)
3505 debugfs_remove_recursive(adapter->debugfs_root);
3506
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003507 if (adapter->flags & FULL_INIT_DONE)
3508 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003509 t4_free_mem(adapter->l2t);
3510 t4_free_mem(adapter->tids.tid_tab);
3511 disable_msi(adapter);
3512
3513 for_each_port(adapter, i)
3514 if (adapter->port[i])
3515 free_netdev(adapter->port[i]);
3516
3517 if (adapter->flags & FW_OK)
3518 t4_fw_bye(adapter, 0);
3519 iounmap(adapter->regs);
3520 kfree(adapter);
3521 pci_disable_pcie_error_reporting(pdev);
3522 pci_disable_device(pdev);
3523 pci_release_regions(pdev);
3524 pci_set_drvdata(pdev, NULL);
3525 } else if (PCI_FUNC(pdev->devfn) > 0)
3526 pci_release_regions(pdev);
3527}
3528
3529static struct pci_driver cxgb4_driver = {
3530 .name = KBUILD_MODNAME,
3531 .id_table = cxgb4_pci_tbl,
3532 .probe = init_one,
3533 .remove = __devexit_p(remove_one),
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003534 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003535};
3536
3537static int __init cxgb4_init_module(void)
3538{
3539 int ret;
3540
3541 /* Debugfs support is optional, just warn if this fails */
3542 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3543 if (!cxgb4_debugfs_root)
3544 pr_warning("could not create debugfs entry, continuing\n");
3545
3546 ret = pci_register_driver(&cxgb4_driver);
3547 if (ret < 0)
3548 debugfs_remove(cxgb4_debugfs_root);
3549 return ret;
3550}
3551
3552static void __exit cxgb4_cleanup_module(void)
3553{
3554 pci_unregister_driver(&cxgb4_driver);
3555 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3556}
3557
3558module_init(cxgb4_init_module);
3559module_exit(cxgb4_cleanup_module);