blob: c327527fbbc854d6cf4f63d0457221cf434869e6 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
Dimitris Michailidis99e6d062010-08-02 13:19:24 +000070#define DRV_VERSION "1.3.0-ko"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000071#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
Casey Leedom7ee9ff92010-06-25 12:11:46 +000080#ifdef CONFIG_PCI_IOV
81/*
82 * Virtual Function provisioning constants. We need two extra Ingress Queues
83 * with Interrupt capability to serve as the VF's Firmware Event Queue and
84 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
85 * Lists associated with them). For each Ethernet/Control Egress Queue and
86 * for each Free List, we need an Egress Context.
87 */
88enum {
89 VFRES_NPORTS = 1, /* # of "ports" per VF */
90 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
91
92 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
93 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
94 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
95 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
96 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
97 VFRES_TC = 0, /* PCI-E traffic class */
98 VFRES_NEXACTF = 16, /* # of exact MPS filters */
99
100 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
101 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
102};
103
104/*
105 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
106 * static and likely not to be useful in the long run. We really need to
107 * implement some form of persistent configuration which the firmware
108 * controls.
109 */
110static unsigned int pfvfres_pmask(struct adapter *adapter,
111 unsigned int pf, unsigned int vf)
112{
113 unsigned int portn, portvec;
114
115 /*
116 * Give PF's access to all of the ports.
117 */
118 if (vf == 0)
119 return FW_PFVF_CMD_PMASK_MASK;
120
121 /*
122 * For VFs, we'll assign them access to the ports based purely on the
123 * PF. We assign active ports in order, wrapping around if there are
124 * fewer active ports than PFs: e.g. active port[pf % nports].
125 * Unfortunately the adapter's port_info structs haven't been
126 * initialized yet so we have to compute this.
127 */
128 if (adapter->params.nports == 0)
129 return 0;
130
131 portn = pf % adapter->params.nports;
132 portvec = adapter->params.portvec;
133 for (;;) {
134 /*
135 * Isolate the lowest set bit in the port vector. If we're at
136 * the port number that we want, return that as the pmask.
137 * otherwise mask that bit out of the port vector and
138 * decrement our port number ...
139 */
140 unsigned int pmask = portvec ^ (portvec & (portvec-1));
141 if (portn == 0)
142 return pmask;
143 portn--;
144 portvec &= ~pmask;
145 }
146 /*NOTREACHED*/
147}
148#endif
149
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000150enum {
151 MEMWIN0_APERTURE = 65536,
152 MEMWIN0_BASE = 0x30000,
153 MEMWIN1_APERTURE = 32768,
154 MEMWIN1_BASE = 0x28000,
155 MEMWIN2_APERTURE = 2048,
156 MEMWIN2_BASE = 0x1b800,
157};
158
159enum {
160 MAX_TXQ_ENTRIES = 16384,
161 MAX_CTRL_TXQ_ENTRIES = 1024,
162 MAX_RSPQ_ENTRIES = 16384,
163 MAX_RX_BUFFERS = 16384,
164 MIN_TXQ_ENTRIES = 32,
165 MIN_CTRL_TXQ_ENTRIES = 32,
166 MIN_RSPQ_ENTRIES = 128,
167 MIN_FL_ENTRIES = 16
168};
169
170#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
171 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
172 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
173
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000174#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000177 CH_DEVICE(0xa000, 0), /* PE10K */
Dimitris Michailidisac50bed2010-08-02 13:19:23 +0000178 CH_DEVICE(0x4001, 0),
179 CH_DEVICE(0x4002, 0),
180 CH_DEVICE(0x4003, 0),
181 CH_DEVICE(0x4004, 0),
182 CH_DEVICE(0x4005, 0),
183 CH_DEVICE(0x4006, 0),
184 CH_DEVICE(0x4007, 0),
185 CH_DEVICE(0x4008, 0),
186 CH_DEVICE(0x4009, 0),
187 CH_DEVICE(0x400a, 0),
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000188 { 0, }
189};
190
191#define FW_FNAME "cxgb4/t4fw.bin"
192
193MODULE_DESCRIPTION(DRV_DESC);
194MODULE_AUTHOR("Chelsio Communications");
195MODULE_LICENSE("Dual BSD/GPL");
196MODULE_VERSION(DRV_VERSION);
197MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
198MODULE_FIRMWARE(FW_FNAME);
199
200static int dflt_msg_enable = DFLT_MSG_ENABLE;
201
202module_param(dflt_msg_enable, int, 0644);
203MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
204
205/*
206 * The driver uses the best interrupt scheme available on a platform in the
207 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
208 * of these schemes the driver may consider as follows:
209 *
210 * msi = 2: choose from among all three options
211 * msi = 1: only consider MSI and INTx interrupts
212 * msi = 0: force INTx interrupts
213 */
214static int msi = 2;
215
216module_param(msi, int, 0644);
217MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
218
219/*
220 * Queue interrupt hold-off timer values. Queues default to the first of these
221 * upon creation.
222 */
223static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
224
225module_param_array(intr_holdoff, uint, NULL, 0644);
226MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
227 "0..4 in microseconds");
228
229static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
230
231module_param_array(intr_cnt, uint, NULL, 0644);
232MODULE_PARM_DESC(intr_cnt,
233 "thresholds 1..3 for queue interrupt packet counters");
234
235static int vf_acls;
236
237#ifdef CONFIG_PCI_IOV
238module_param(vf_acls, bool, 0644);
239MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
240
241static unsigned int num_vf[4];
242
243module_param_array(num_vf, uint, NULL, 0644);
244MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
245#endif
246
247static struct dentry *cxgb4_debugfs_root;
248
249static LIST_HEAD(adapter_list);
250static DEFINE_MUTEX(uld_mutex);
251static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
252static const char *uld_str[] = { "RDMA", "iSCSI" };
253
254static void link_report(struct net_device *dev)
255{
256 if (!netif_carrier_ok(dev))
257 netdev_info(dev, "link down\n");
258 else {
259 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
260
261 const char *s = "10Mbps";
262 const struct port_info *p = netdev_priv(dev);
263
264 switch (p->link_cfg.speed) {
265 case SPEED_10000:
266 s = "10Gbps";
267 break;
268 case SPEED_1000:
269 s = "1000Mbps";
270 break;
271 case SPEED_100:
272 s = "100Mbps";
273 break;
274 }
275
276 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
277 fc[p->link_cfg.fc]);
278 }
279}
280
281void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
282{
283 struct net_device *dev = adapter->port[port_id];
284
285 /* Skip changes from disabled ports. */
286 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
287 if (link_stat)
288 netif_carrier_on(dev);
289 else
290 netif_carrier_off(dev);
291
292 link_report(dev);
293 }
294}
295
296void t4_os_portmod_changed(const struct adapter *adap, int port_id)
297{
298 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000299 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000300 };
301
302 const struct net_device *dev = adap->port[port_id];
303 const struct port_info *pi = netdev_priv(dev);
304
305 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
306 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000307 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000308 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
309}
310
311/*
312 * Configure the exact and hash address filters to handle a port's multicast
313 * and secondary unicast MAC addresses.
314 */
315static int set_addr_filters(const struct net_device *dev, bool sleep)
316{
317 u64 mhash = 0;
318 u64 uhash = 0;
319 bool free = true;
320 u16 filt_idx[7];
321 const u8 *addr[7];
322 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000323 const struct netdev_hw_addr *ha;
324 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700325 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000326 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000327 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000328
329 /* first do the secondary unicast addresses */
330 netdev_for_each_uc_addr(ha, dev) {
331 addr[naddr++] = ha->addr;
332 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000333 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000334 naddr, addr, filt_idx, &uhash, sleep);
335 if (ret < 0)
336 return ret;
337
338 free = false;
339 naddr = 0;
340 }
341 }
342
343 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700344 netdev_for_each_mc_addr(ha, dev) {
345 addr[naddr++] = ha->addr;
346 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000347 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000348 naddr, addr, filt_idx, &mhash, sleep);
349 if (ret < 0)
350 return ret;
351
352 free = false;
353 naddr = 0;
354 }
355 }
356
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000357 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000358 uhash | mhash, sleep);
359}
360
361/*
362 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
363 * If @mtu is -1 it is left unchanged.
364 */
365static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
366{
367 int ret;
368 struct port_info *pi = netdev_priv(dev);
369
370 ret = set_addr_filters(dev, sleep_ok);
371 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000372 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000373 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000374 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000375 sleep_ok);
376 return ret;
377}
378
379/**
380 * link_start - enable a port
381 * @dev: the port to enable
382 *
383 * Performs the MAC and PHY actions needed to enable a port.
384 */
385static int link_start(struct net_device *dev)
386{
387 int ret;
388 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000389 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000390
391 /*
392 * We do not set address filters and promiscuity here, the stack does
393 * that step explicitly.
394 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000395 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000396 pi->vlan_grp != NULL, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000397 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000398 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000399 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000400 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000401 if (ret >= 0) {
402 pi->xact_addr_filt = ret;
403 ret = 0;
404 }
405 }
406 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000407 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
408 &pi->link_cfg);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000409 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000410 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000411 return ret;
412}
413
414/*
415 * Response queue handler for the FW event queue.
416 */
417static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
418 const struct pkt_gl *gl)
419{
420 u8 opcode = ((const struct rss_header *)rsp)->opcode;
421
422 rsp++; /* skip RSS header */
423 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
424 const struct cpl_sge_egr_update *p = (void *)rsp;
425 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
426 struct sge_txq *txq = q->adap->sge.egr_map[qid];
427
428 txq->restarts++;
429 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
430 struct sge_eth_txq *eq;
431
432 eq = container_of(txq, struct sge_eth_txq, q);
433 netif_tx_wake_queue(eq->txq);
434 } else {
435 struct sge_ofld_txq *oq;
436
437 oq = container_of(txq, struct sge_ofld_txq, q);
438 tasklet_schedule(&oq->qresume_tsk);
439 }
440 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
441 const struct cpl_fw6_msg *p = (void *)rsp;
442
443 if (p->type == 0)
444 t4_handle_fw_rpl(q->adap, p->data);
445 } else if (opcode == CPL_L2T_WRITE_RPL) {
446 const struct cpl_l2t_write_rpl *p = (void *)rsp;
447
448 do_l2t_write_rpl(q->adap, p);
449 } else
450 dev_err(q->adap->pdev_dev,
451 "unexpected CPL %#x on FW event queue\n", opcode);
452 return 0;
453}
454
455/**
456 * uldrx_handler - response queue handler for ULD queues
457 * @q: the response queue that received the packet
458 * @rsp: the response queue descriptor holding the offload message
459 * @gl: the gather list of packet fragments
460 *
461 * Deliver an ingress offload packet to a ULD. All processing is done by
462 * the ULD, we just maintain statistics.
463 */
464static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
465 const struct pkt_gl *gl)
466{
467 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
468
469 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
470 rxq->stats.nomem++;
471 return -1;
472 }
473 if (gl == NULL)
474 rxq->stats.imm++;
475 else if (gl == CXGB4_MSG_AN)
476 rxq->stats.an++;
477 else
478 rxq->stats.pkts++;
479 return 0;
480}
481
482static void disable_msi(struct adapter *adapter)
483{
484 if (adapter->flags & USING_MSIX) {
485 pci_disable_msix(adapter->pdev);
486 adapter->flags &= ~USING_MSIX;
487 } else if (adapter->flags & USING_MSI) {
488 pci_disable_msi(adapter->pdev);
489 adapter->flags &= ~USING_MSI;
490 }
491}
492
493/*
494 * Interrupt handler for non-data events used with MSI-X.
495 */
496static irqreturn_t t4_nondata_intr(int irq, void *cookie)
497{
498 struct adapter *adap = cookie;
499
500 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
501 if (v & PFSW) {
502 adap->swintr = 1;
503 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
504 }
505 t4_slow_intr_handler(adap);
506 return IRQ_HANDLED;
507}
508
509/*
510 * Name the MSI-X interrupts.
511 */
512static void name_msix_vecs(struct adapter *adap)
513{
514 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
515
516 /* non-data interrupts */
517 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
518 adap->msix_info[0].desc[n] = 0;
519
520 /* FW events */
521 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
522 adap->msix_info[1].desc[n] = 0;
523
524 /* Ethernet queues */
525 for_each_port(adap, j) {
526 struct net_device *d = adap->port[j];
527 const struct port_info *pi = netdev_priv(d);
528
529 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
530 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
531 d->name, i);
532 adap->msix_info[msi_idx].desc[n] = 0;
533 }
534 }
535
536 /* offload queues */
537 for_each_ofldrxq(&adap->sge, i) {
538 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
539 adap->name, i);
540 adap->msix_info[msi_idx++].desc[n] = 0;
541 }
542 for_each_rdmarxq(&adap->sge, i) {
543 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
544 adap->name, i);
545 adap->msix_info[msi_idx++].desc[n] = 0;
546 }
547}
548
549static int request_msix_queue_irqs(struct adapter *adap)
550{
551 struct sge *s = &adap->sge;
552 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
553
554 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
555 adap->msix_info[1].desc, &s->fw_evtq);
556 if (err)
557 return err;
558
559 for_each_ethrxq(s, ethqidx) {
560 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
561 adap->msix_info[msi].desc,
562 &s->ethrxq[ethqidx].rspq);
563 if (err)
564 goto unwind;
565 msi++;
566 }
567 for_each_ofldrxq(s, ofldqidx) {
568 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
569 adap->msix_info[msi].desc,
570 &s->ofldrxq[ofldqidx].rspq);
571 if (err)
572 goto unwind;
573 msi++;
574 }
575 for_each_rdmarxq(s, rdmaqidx) {
576 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
577 adap->msix_info[msi].desc,
578 &s->rdmarxq[rdmaqidx].rspq);
579 if (err)
580 goto unwind;
581 msi++;
582 }
583 return 0;
584
585unwind:
586 while (--rdmaqidx >= 0)
587 free_irq(adap->msix_info[--msi].vec,
588 &s->rdmarxq[rdmaqidx].rspq);
589 while (--ofldqidx >= 0)
590 free_irq(adap->msix_info[--msi].vec,
591 &s->ofldrxq[ofldqidx].rspq);
592 while (--ethqidx >= 0)
593 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
594 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
595 return err;
596}
597
598static void free_msix_queue_irqs(struct adapter *adap)
599{
600 int i, msi = 2;
601 struct sge *s = &adap->sge;
602
603 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
604 for_each_ethrxq(s, i)
605 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
606 for_each_ofldrxq(s, i)
607 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
608 for_each_rdmarxq(s, i)
609 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
610}
611
612/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000613 * write_rss - write the RSS table for a given port
614 * @pi: the port
615 * @queues: array of queue indices for RSS
616 *
617 * Sets up the portion of the HW RSS table for the port's VI to distribute
618 * packets to the Rx queues in @queues.
619 */
620static int write_rss(const struct port_info *pi, const u16 *queues)
621{
622 u16 *rss;
623 int i, err;
624 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
625
626 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
627 if (!rss)
628 return -ENOMEM;
629
630 /* map the queue indices to queue ids */
631 for (i = 0; i < pi->rss_size; i++, queues++)
632 rss[i] = q[*queues].rspq.abs_id;
633
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000634 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
635 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000636 kfree(rss);
637 return err;
638}
639
640/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000641 * setup_rss - configure RSS
642 * @adap: the adapter
643 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000644 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000645 */
646static int setup_rss(struct adapter *adap)
647{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000648 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000649
650 for_each_port(adap, i) {
651 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000652
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000653 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000654 if (err)
655 return err;
656 }
657 return 0;
658}
659
660/*
661 * Wait until all NAPI handlers are descheduled.
662 */
663static void quiesce_rx(struct adapter *adap)
664{
665 int i;
666
667 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
668 struct sge_rspq *q = adap->sge.ingr_map[i];
669
670 if (q && q->handler)
671 napi_disable(&q->napi);
672 }
673}
674
675/*
676 * Enable NAPI scheduling and interrupt generation for all Rx queues.
677 */
678static void enable_rx(struct adapter *adap)
679{
680 int i;
681
682 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
683 struct sge_rspq *q = adap->sge.ingr_map[i];
684
685 if (!q)
686 continue;
687 if (q->handler)
688 napi_enable(&q->napi);
689 /* 0-increment GTS to start the timer and enable interrupts */
690 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
691 SEINTARM(q->intr_params) |
692 INGRESSQID(q->cntxt_id));
693 }
694}
695
696/**
697 * setup_sge_queues - configure SGE Tx/Rx/response queues
698 * @adap: the adapter
699 *
700 * Determines how many sets of SGE queues to use and initializes them.
701 * We support multiple queue sets per port if we have MSI-X, otherwise
702 * just one queue set per port.
703 */
704static int setup_sge_queues(struct adapter *adap)
705{
706 int err, msi_idx, i, j;
707 struct sge *s = &adap->sge;
708
709 bitmap_zero(s->starving_fl, MAX_EGRQ);
710 bitmap_zero(s->txq_maperr, MAX_EGRQ);
711
712 if (adap->flags & USING_MSIX)
713 msi_idx = 1; /* vector 0 is for non-queue interrupts */
714 else {
715 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
716 NULL, NULL);
717 if (err)
718 return err;
719 msi_idx = -((int)s->intrq.abs_id + 1);
720 }
721
722 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
723 msi_idx, NULL, fwevtq_handler);
724 if (err) {
725freeout: t4_free_sge_resources(adap);
726 return err;
727 }
728
729 for_each_port(adap, i) {
730 struct net_device *dev = adap->port[i];
731 struct port_info *pi = netdev_priv(dev);
732 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
733 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
734
735 for (j = 0; j < pi->nqsets; j++, q++) {
736 if (msi_idx > 0)
737 msi_idx++;
738 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
739 msi_idx, &q->fl,
740 t4_ethrx_handler);
741 if (err)
742 goto freeout;
743 q->rspq.idx = j;
744 memset(&q->stats, 0, sizeof(q->stats));
745 }
746 for (j = 0; j < pi->nqsets; j++, t++) {
747 err = t4_sge_alloc_eth_txq(adap, t, dev,
748 netdev_get_tx_queue(dev, j),
749 s->fw_evtq.cntxt_id);
750 if (err)
751 goto freeout;
752 }
753 }
754
755 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
756 for_each_ofldrxq(s, i) {
757 struct sge_ofld_rxq *q = &s->ofldrxq[i];
758 struct net_device *dev = adap->port[i / j];
759
760 if (msi_idx > 0)
761 msi_idx++;
762 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
763 &q->fl, uldrx_handler);
764 if (err)
765 goto freeout;
766 memset(&q->stats, 0, sizeof(q->stats));
767 s->ofld_rxq[i] = q->rspq.abs_id;
768 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
769 s->fw_evtq.cntxt_id);
770 if (err)
771 goto freeout;
772 }
773
774 for_each_rdmarxq(s, i) {
775 struct sge_ofld_rxq *q = &s->rdmarxq[i];
776
777 if (msi_idx > 0)
778 msi_idx++;
779 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
780 msi_idx, &q->fl, uldrx_handler);
781 if (err)
782 goto freeout;
783 memset(&q->stats, 0, sizeof(q->stats));
784 s->rdma_rxq[i] = q->rspq.abs_id;
785 }
786
787 for_each_port(adap, i) {
788 /*
789 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
790 * have RDMA queues, and that's the right value.
791 */
792 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
793 s->fw_evtq.cntxt_id,
794 s->rdmarxq[i].rspq.cntxt_id);
795 if (err)
796 goto freeout;
797 }
798
799 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
800 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
801 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
802 return 0;
803}
804
805/*
806 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
807 * started but failed, and a negative errno if flash load couldn't start.
808 */
809static int upgrade_fw(struct adapter *adap)
810{
811 int ret;
812 u32 vers;
813 const struct fw_hdr *hdr;
814 const struct firmware *fw;
815 struct device *dev = adap->pdev_dev;
816
817 ret = request_firmware(&fw, FW_FNAME, dev);
818 if (ret < 0) {
819 dev_err(dev, "unable to load firmware image " FW_FNAME
820 ", error %d\n", ret);
821 return ret;
822 }
823
824 hdr = (const struct fw_hdr *)fw->data;
825 vers = ntohl(hdr->fw_ver);
826 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
827 ret = -EINVAL; /* wrong major version, won't do */
828 goto out;
829 }
830
831 /*
832 * If the flash FW is unusable or we found something newer, load it.
833 */
834 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
835 vers > adap->params.fw_vers) {
836 ret = -t4_load_fw(adap, fw->data, fw->size);
837 if (!ret)
838 dev_info(dev, "firmware upgraded to version %pI4 from "
839 FW_FNAME "\n", &hdr->fw_ver);
840 }
841out: release_firmware(fw);
842 return ret;
843}
844
845/*
846 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
847 * The allocated memory is cleared.
848 */
849void *t4_alloc_mem(size_t size)
850{
851 void *p = kmalloc(size, GFP_KERNEL);
852
853 if (!p)
854 p = vmalloc(size);
855 if (p)
856 memset(p, 0, size);
857 return p;
858}
859
860/*
861 * Free memory allocated through alloc_mem().
862 */
863void t4_free_mem(void *addr)
864{
865 if (is_vmalloc_addr(addr))
866 vfree(addr);
867 else
868 kfree(addr);
869}
870
871static inline int is_offload(const struct adapter *adap)
872{
873 return adap->params.offload;
874}
875
876/*
877 * Implementation of ethtool operations.
878 */
879
880static u32 get_msglevel(struct net_device *dev)
881{
882 return netdev2adap(dev)->msg_enable;
883}
884
885static void set_msglevel(struct net_device *dev, u32 val)
886{
887 netdev2adap(dev)->msg_enable = val;
888}
889
890static char stats_strings[][ETH_GSTRING_LEN] = {
891 "TxOctetsOK ",
892 "TxFramesOK ",
893 "TxBroadcastFrames ",
894 "TxMulticastFrames ",
895 "TxUnicastFrames ",
896 "TxErrorFrames ",
897
898 "TxFrames64 ",
899 "TxFrames65To127 ",
900 "TxFrames128To255 ",
901 "TxFrames256To511 ",
902 "TxFrames512To1023 ",
903 "TxFrames1024To1518 ",
904 "TxFrames1519ToMax ",
905
906 "TxFramesDropped ",
907 "TxPauseFrames ",
908 "TxPPP0Frames ",
909 "TxPPP1Frames ",
910 "TxPPP2Frames ",
911 "TxPPP3Frames ",
912 "TxPPP4Frames ",
913 "TxPPP5Frames ",
914 "TxPPP6Frames ",
915 "TxPPP7Frames ",
916
917 "RxOctetsOK ",
918 "RxFramesOK ",
919 "RxBroadcastFrames ",
920 "RxMulticastFrames ",
921 "RxUnicastFrames ",
922
923 "RxFramesTooLong ",
924 "RxJabberErrors ",
925 "RxFCSErrors ",
926 "RxLengthErrors ",
927 "RxSymbolErrors ",
928 "RxRuntFrames ",
929
930 "RxFrames64 ",
931 "RxFrames65To127 ",
932 "RxFrames128To255 ",
933 "RxFrames256To511 ",
934 "RxFrames512To1023 ",
935 "RxFrames1024To1518 ",
936 "RxFrames1519ToMax ",
937
938 "RxPauseFrames ",
939 "RxPPP0Frames ",
940 "RxPPP1Frames ",
941 "RxPPP2Frames ",
942 "RxPPP3Frames ",
943 "RxPPP4Frames ",
944 "RxPPP5Frames ",
945 "RxPPP6Frames ",
946 "RxPPP7Frames ",
947
948 "RxBG0FramesDropped ",
949 "RxBG1FramesDropped ",
950 "RxBG2FramesDropped ",
951 "RxBG3FramesDropped ",
952 "RxBG0FramesTrunc ",
953 "RxBG1FramesTrunc ",
954 "RxBG2FramesTrunc ",
955 "RxBG3FramesTrunc ",
956
957 "TSO ",
958 "TxCsumOffload ",
959 "RxCsumGood ",
960 "VLANextractions ",
961 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000962 "GROpackets ",
963 "GROmerged ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000964};
965
966static int get_sset_count(struct net_device *dev, int sset)
967{
968 switch (sset) {
969 case ETH_SS_STATS:
970 return ARRAY_SIZE(stats_strings);
971 default:
972 return -EOPNOTSUPP;
973 }
974}
975
976#define T4_REGMAP_SIZE (160 * 1024)
977
978static int get_regs_len(struct net_device *dev)
979{
980 return T4_REGMAP_SIZE;
981}
982
983static int get_eeprom_len(struct net_device *dev)
984{
985 return EEPROMSIZE;
986}
987
988static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
989{
990 struct adapter *adapter = netdev2adap(dev);
991
992 strcpy(info->driver, KBUILD_MODNAME);
993 strcpy(info->version, DRV_VERSION);
994 strcpy(info->bus_info, pci_name(adapter->pdev));
995
996 if (!adapter->params.fw_vers)
997 strcpy(info->fw_version, "N/A");
998 else
999 snprintf(info->fw_version, sizeof(info->fw_version),
1000 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1001 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1002 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1003 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1004 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1005 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1006 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1007 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1008 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1009}
1010
1011static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1012{
1013 if (stringset == ETH_SS_STATS)
1014 memcpy(data, stats_strings, sizeof(stats_strings));
1015}
1016
1017/*
1018 * port stats maintained per queue of the port. They should be in the same
1019 * order as in stats_strings above.
1020 */
1021struct queue_port_stats {
1022 u64 tso;
1023 u64 tx_csum;
1024 u64 rx_csum;
1025 u64 vlan_ex;
1026 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001027 u64 gro_pkts;
1028 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001029};
1030
1031static void collect_sge_port_stats(const struct adapter *adap,
1032 const struct port_info *p, struct queue_port_stats *s)
1033{
1034 int i;
1035 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1036 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1037
1038 memset(s, 0, sizeof(*s));
1039 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1040 s->tso += tx->tso;
1041 s->tx_csum += tx->tx_cso;
1042 s->rx_csum += rx->stats.rx_cso;
1043 s->vlan_ex += rx->stats.vlan_ex;
1044 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001045 s->gro_pkts += rx->stats.lro_pkts;
1046 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001047 }
1048}
1049
1050static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1051 u64 *data)
1052{
1053 struct port_info *pi = netdev_priv(dev);
1054 struct adapter *adapter = pi->adapter;
1055
1056 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1057
1058 data += sizeof(struct port_stats) / sizeof(u64);
1059 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1060}
1061
1062/*
1063 * Return a version number to identify the type of adapter. The scheme is:
1064 * - bits 0..9: chip version
1065 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001066 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001067 */
1068static inline unsigned int mk_adap_vers(const struct adapter *ap)
1069{
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001070 return 4 | (ap->params.rev << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001071}
1072
1073static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1074 unsigned int end)
1075{
1076 u32 *p = buf + start;
1077
1078 for ( ; start <= end; start += sizeof(u32))
1079 *p++ = t4_read_reg(ap, start);
1080}
1081
1082static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1083 void *buf)
1084{
1085 static const unsigned int reg_ranges[] = {
1086 0x1008, 0x1108,
1087 0x1180, 0x11b4,
1088 0x11fc, 0x123c,
1089 0x1300, 0x173c,
1090 0x1800, 0x18fc,
1091 0x3000, 0x30d8,
1092 0x30e0, 0x5924,
1093 0x5960, 0x59d4,
1094 0x5a00, 0x5af8,
1095 0x6000, 0x6098,
1096 0x6100, 0x6150,
1097 0x6200, 0x6208,
1098 0x6240, 0x6248,
1099 0x6280, 0x6338,
1100 0x6370, 0x638c,
1101 0x6400, 0x643c,
1102 0x6500, 0x6524,
1103 0x6a00, 0x6a38,
1104 0x6a60, 0x6a78,
1105 0x6b00, 0x6b84,
1106 0x6bf0, 0x6c84,
1107 0x6cf0, 0x6d84,
1108 0x6df0, 0x6e84,
1109 0x6ef0, 0x6f84,
1110 0x6ff0, 0x7084,
1111 0x70f0, 0x7184,
1112 0x71f0, 0x7284,
1113 0x72f0, 0x7384,
1114 0x73f0, 0x7450,
1115 0x7500, 0x7530,
1116 0x7600, 0x761c,
1117 0x7680, 0x76cc,
1118 0x7700, 0x7798,
1119 0x77c0, 0x77fc,
1120 0x7900, 0x79fc,
1121 0x7b00, 0x7c38,
1122 0x7d00, 0x7efc,
1123 0x8dc0, 0x8e1c,
1124 0x8e30, 0x8e78,
1125 0x8ea0, 0x8f6c,
1126 0x8fc0, 0x9074,
1127 0x90fc, 0x90fc,
1128 0x9400, 0x9458,
1129 0x9600, 0x96bc,
1130 0x9800, 0x9808,
1131 0x9820, 0x983c,
1132 0x9850, 0x9864,
1133 0x9c00, 0x9c6c,
1134 0x9c80, 0x9cec,
1135 0x9d00, 0x9d6c,
1136 0x9d80, 0x9dec,
1137 0x9e00, 0x9e6c,
1138 0x9e80, 0x9eec,
1139 0x9f00, 0x9f6c,
1140 0x9f80, 0x9fec,
1141 0xd004, 0xd03c,
1142 0xdfc0, 0xdfe0,
1143 0xe000, 0xea7c,
1144 0xf000, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001145 0x19040, 0x1906c,
1146 0x19078, 0x19080,
1147 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001148 0x19150, 0x191b0,
1149 0x191d0, 0x191e8,
1150 0x19238, 0x1924c,
1151 0x193f8, 0x19474,
1152 0x19490, 0x194f8,
1153 0x19800, 0x19f30,
1154 0x1a000, 0x1a06c,
1155 0x1a0b0, 0x1a120,
1156 0x1a128, 0x1a138,
1157 0x1a190, 0x1a1c4,
1158 0x1a1fc, 0x1a1fc,
1159 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001160 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001161 0x1e2c0, 0x1e2c0,
1162 0x1e2e0, 0x1e2e0,
1163 0x1e300, 0x1e384,
1164 0x1e3c0, 0x1e3c8,
1165 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001166 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001167 0x1e6c0, 0x1e6c0,
1168 0x1e6e0, 0x1e6e0,
1169 0x1e700, 0x1e784,
1170 0x1e7c0, 0x1e7c8,
1171 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001172 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001173 0x1eac0, 0x1eac0,
1174 0x1eae0, 0x1eae0,
1175 0x1eb00, 0x1eb84,
1176 0x1ebc0, 0x1ebc8,
1177 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001178 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001179 0x1eec0, 0x1eec0,
1180 0x1eee0, 0x1eee0,
1181 0x1ef00, 0x1ef84,
1182 0x1efc0, 0x1efc8,
1183 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001184 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001185 0x1f2c0, 0x1f2c0,
1186 0x1f2e0, 0x1f2e0,
1187 0x1f300, 0x1f384,
1188 0x1f3c0, 0x1f3c8,
1189 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001190 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001191 0x1f6c0, 0x1f6c0,
1192 0x1f6e0, 0x1f6e0,
1193 0x1f700, 0x1f784,
1194 0x1f7c0, 0x1f7c8,
1195 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001196 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001197 0x1fac0, 0x1fac0,
1198 0x1fae0, 0x1fae0,
1199 0x1fb00, 0x1fb84,
1200 0x1fbc0, 0x1fbc8,
1201 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001202 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001203 0x1fec0, 0x1fec0,
1204 0x1fee0, 0x1fee0,
1205 0x1ff00, 0x1ff84,
1206 0x1ffc0, 0x1ffc8,
1207 0x20000, 0x2002c,
1208 0x20100, 0x2013c,
1209 0x20190, 0x201c8,
1210 0x20200, 0x20318,
1211 0x20400, 0x20528,
1212 0x20540, 0x20614,
1213 0x21000, 0x21040,
1214 0x2104c, 0x21060,
1215 0x210c0, 0x210ec,
1216 0x21200, 0x21268,
1217 0x21270, 0x21284,
1218 0x212fc, 0x21388,
1219 0x21400, 0x21404,
1220 0x21500, 0x21518,
1221 0x2152c, 0x2153c,
1222 0x21550, 0x21554,
1223 0x21600, 0x21600,
1224 0x21608, 0x21628,
1225 0x21630, 0x2163c,
1226 0x21700, 0x2171c,
1227 0x21780, 0x2178c,
1228 0x21800, 0x21c38,
1229 0x21c80, 0x21d7c,
1230 0x21e00, 0x21e04,
1231 0x22000, 0x2202c,
1232 0x22100, 0x2213c,
1233 0x22190, 0x221c8,
1234 0x22200, 0x22318,
1235 0x22400, 0x22528,
1236 0x22540, 0x22614,
1237 0x23000, 0x23040,
1238 0x2304c, 0x23060,
1239 0x230c0, 0x230ec,
1240 0x23200, 0x23268,
1241 0x23270, 0x23284,
1242 0x232fc, 0x23388,
1243 0x23400, 0x23404,
1244 0x23500, 0x23518,
1245 0x2352c, 0x2353c,
1246 0x23550, 0x23554,
1247 0x23600, 0x23600,
1248 0x23608, 0x23628,
1249 0x23630, 0x2363c,
1250 0x23700, 0x2371c,
1251 0x23780, 0x2378c,
1252 0x23800, 0x23c38,
1253 0x23c80, 0x23d7c,
1254 0x23e00, 0x23e04,
1255 0x24000, 0x2402c,
1256 0x24100, 0x2413c,
1257 0x24190, 0x241c8,
1258 0x24200, 0x24318,
1259 0x24400, 0x24528,
1260 0x24540, 0x24614,
1261 0x25000, 0x25040,
1262 0x2504c, 0x25060,
1263 0x250c0, 0x250ec,
1264 0x25200, 0x25268,
1265 0x25270, 0x25284,
1266 0x252fc, 0x25388,
1267 0x25400, 0x25404,
1268 0x25500, 0x25518,
1269 0x2552c, 0x2553c,
1270 0x25550, 0x25554,
1271 0x25600, 0x25600,
1272 0x25608, 0x25628,
1273 0x25630, 0x2563c,
1274 0x25700, 0x2571c,
1275 0x25780, 0x2578c,
1276 0x25800, 0x25c38,
1277 0x25c80, 0x25d7c,
1278 0x25e00, 0x25e04,
1279 0x26000, 0x2602c,
1280 0x26100, 0x2613c,
1281 0x26190, 0x261c8,
1282 0x26200, 0x26318,
1283 0x26400, 0x26528,
1284 0x26540, 0x26614,
1285 0x27000, 0x27040,
1286 0x2704c, 0x27060,
1287 0x270c0, 0x270ec,
1288 0x27200, 0x27268,
1289 0x27270, 0x27284,
1290 0x272fc, 0x27388,
1291 0x27400, 0x27404,
1292 0x27500, 0x27518,
1293 0x2752c, 0x2753c,
1294 0x27550, 0x27554,
1295 0x27600, 0x27600,
1296 0x27608, 0x27628,
1297 0x27630, 0x2763c,
1298 0x27700, 0x2771c,
1299 0x27780, 0x2778c,
1300 0x27800, 0x27c38,
1301 0x27c80, 0x27d7c,
1302 0x27e00, 0x27e04
1303 };
1304
1305 int i;
1306 struct adapter *ap = netdev2adap(dev);
1307
1308 regs->version = mk_adap_vers(ap);
1309
1310 memset(buf, 0, T4_REGMAP_SIZE);
1311 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1312 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1313}
1314
1315static int restart_autoneg(struct net_device *dev)
1316{
1317 struct port_info *p = netdev_priv(dev);
1318
1319 if (!netif_running(dev))
1320 return -EAGAIN;
1321 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1322 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001323 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001324 return 0;
1325}
1326
1327static int identify_port(struct net_device *dev, u32 data)
1328{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001329 struct adapter *adap = netdev2adap(dev);
1330
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001331 if (data == 0)
1332 data = 2; /* default to 2 seconds */
1333
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001334 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001335 data * 5);
1336}
1337
1338static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1339{
1340 unsigned int v = 0;
1341
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001342 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1343 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001344 v |= SUPPORTED_TP;
1345 if (caps & FW_PORT_CAP_SPEED_100M)
1346 v |= SUPPORTED_100baseT_Full;
1347 if (caps & FW_PORT_CAP_SPEED_1G)
1348 v |= SUPPORTED_1000baseT_Full;
1349 if (caps & FW_PORT_CAP_SPEED_10G)
1350 v |= SUPPORTED_10000baseT_Full;
1351 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1352 v |= SUPPORTED_Backplane;
1353 if (caps & FW_PORT_CAP_SPEED_1G)
1354 v |= SUPPORTED_1000baseKX_Full;
1355 if (caps & FW_PORT_CAP_SPEED_10G)
1356 v |= SUPPORTED_10000baseKX4_Full;
1357 } else if (type == FW_PORT_TYPE_KR)
1358 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001359 else if (type == FW_PORT_TYPE_BP_AP)
1360 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
1361 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1362 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001363 v |= SUPPORTED_FIBRE;
1364
1365 if (caps & FW_PORT_CAP_ANEG)
1366 v |= SUPPORTED_Autoneg;
1367 return v;
1368}
1369
1370static unsigned int to_fw_linkcaps(unsigned int caps)
1371{
1372 unsigned int v = 0;
1373
1374 if (caps & ADVERTISED_100baseT_Full)
1375 v |= FW_PORT_CAP_SPEED_100M;
1376 if (caps & ADVERTISED_1000baseT_Full)
1377 v |= FW_PORT_CAP_SPEED_1G;
1378 if (caps & ADVERTISED_10000baseT_Full)
1379 v |= FW_PORT_CAP_SPEED_10G;
1380 return v;
1381}
1382
1383static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1384{
1385 const struct port_info *p = netdev_priv(dev);
1386
1387 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001388 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001389 p->port_type == FW_PORT_TYPE_BT_XAUI)
1390 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001391 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1392 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001393 cmd->port = PORT_FIBRE;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001394 else if (p->port_type == FW_PORT_TYPE_SFP) {
1395 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1396 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1397 cmd->port = PORT_DA;
1398 else
1399 cmd->port = PORT_FIBRE;
1400 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001401 cmd->port = PORT_OTHER;
1402
1403 if (p->mdio_addr >= 0) {
1404 cmd->phy_address = p->mdio_addr;
1405 cmd->transceiver = XCVR_EXTERNAL;
1406 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1407 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1408 } else {
1409 cmd->phy_address = 0; /* not really, but no better option */
1410 cmd->transceiver = XCVR_INTERNAL;
1411 cmd->mdio_support = 0;
1412 }
1413
1414 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1415 cmd->advertising = from_fw_linkcaps(p->port_type,
1416 p->link_cfg.advertising);
1417 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1418 cmd->duplex = DUPLEX_FULL;
1419 cmd->autoneg = p->link_cfg.autoneg;
1420 cmd->maxtxpkt = 0;
1421 cmd->maxrxpkt = 0;
1422 return 0;
1423}
1424
1425static unsigned int speed_to_caps(int speed)
1426{
1427 if (speed == SPEED_100)
1428 return FW_PORT_CAP_SPEED_100M;
1429 if (speed == SPEED_1000)
1430 return FW_PORT_CAP_SPEED_1G;
1431 if (speed == SPEED_10000)
1432 return FW_PORT_CAP_SPEED_10G;
1433 return 0;
1434}
1435
1436static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1437{
1438 unsigned int cap;
1439 struct port_info *p = netdev_priv(dev);
1440 struct link_config *lc = &p->link_cfg;
1441
1442 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1443 return -EINVAL;
1444
1445 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1446 /*
1447 * PHY offers a single speed. See if that's what's
1448 * being requested.
1449 */
1450 if (cmd->autoneg == AUTONEG_DISABLE &&
1451 (lc->supported & speed_to_caps(cmd->speed)))
1452 return 0;
1453 return -EINVAL;
1454 }
1455
1456 if (cmd->autoneg == AUTONEG_DISABLE) {
1457 cap = speed_to_caps(cmd->speed);
1458
1459 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1460 cmd->speed == SPEED_10000)
1461 return -EINVAL;
1462 lc->requested_speed = cap;
1463 lc->advertising = 0;
1464 } else {
1465 cap = to_fw_linkcaps(cmd->advertising);
1466 if (!(lc->supported & cap))
1467 return -EINVAL;
1468 lc->requested_speed = 0;
1469 lc->advertising = cap | FW_PORT_CAP_ANEG;
1470 }
1471 lc->autoneg = cmd->autoneg;
1472
1473 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001474 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1475 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001476 return 0;
1477}
1478
1479static void get_pauseparam(struct net_device *dev,
1480 struct ethtool_pauseparam *epause)
1481{
1482 struct port_info *p = netdev_priv(dev);
1483
1484 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1485 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1486 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1487}
1488
1489static int set_pauseparam(struct net_device *dev,
1490 struct ethtool_pauseparam *epause)
1491{
1492 struct port_info *p = netdev_priv(dev);
1493 struct link_config *lc = &p->link_cfg;
1494
1495 if (epause->autoneg == AUTONEG_DISABLE)
1496 lc->requested_fc = 0;
1497 else if (lc->supported & FW_PORT_CAP_ANEG)
1498 lc->requested_fc = PAUSE_AUTONEG;
1499 else
1500 return -EINVAL;
1501
1502 if (epause->rx_pause)
1503 lc->requested_fc |= PAUSE_RX;
1504 if (epause->tx_pause)
1505 lc->requested_fc |= PAUSE_TX;
1506 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001507 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1508 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001509 return 0;
1510}
1511
1512static u32 get_rx_csum(struct net_device *dev)
1513{
1514 struct port_info *p = netdev_priv(dev);
1515
1516 return p->rx_offload & RX_CSO;
1517}
1518
1519static int set_rx_csum(struct net_device *dev, u32 data)
1520{
1521 struct port_info *p = netdev_priv(dev);
1522
1523 if (data)
1524 p->rx_offload |= RX_CSO;
1525 else
1526 p->rx_offload &= ~RX_CSO;
1527 return 0;
1528}
1529
1530static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1531{
1532 const struct port_info *pi = netdev_priv(dev);
1533 const struct sge *s = &pi->adapter->sge;
1534
1535 e->rx_max_pending = MAX_RX_BUFFERS;
1536 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1537 e->rx_jumbo_max_pending = 0;
1538 e->tx_max_pending = MAX_TXQ_ENTRIES;
1539
1540 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1541 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1542 e->rx_jumbo_pending = 0;
1543 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1544}
1545
1546static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1547{
1548 int i;
1549 const struct port_info *pi = netdev_priv(dev);
1550 struct adapter *adapter = pi->adapter;
1551 struct sge *s = &adapter->sge;
1552
1553 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1554 e->tx_pending > MAX_TXQ_ENTRIES ||
1555 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1556 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1557 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1558 return -EINVAL;
1559
1560 if (adapter->flags & FULL_INIT_DONE)
1561 return -EBUSY;
1562
1563 for (i = 0; i < pi->nqsets; ++i) {
1564 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1565 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1566 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1567 }
1568 return 0;
1569}
1570
1571static int closest_timer(const struct sge *s, int time)
1572{
1573 int i, delta, match = 0, min_delta = INT_MAX;
1574
1575 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1576 delta = time - s->timer_val[i];
1577 if (delta < 0)
1578 delta = -delta;
1579 if (delta < min_delta) {
1580 min_delta = delta;
1581 match = i;
1582 }
1583 }
1584 return match;
1585}
1586
1587static int closest_thres(const struct sge *s, int thres)
1588{
1589 int i, delta, match = 0, min_delta = INT_MAX;
1590
1591 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1592 delta = thres - s->counter_val[i];
1593 if (delta < 0)
1594 delta = -delta;
1595 if (delta < min_delta) {
1596 min_delta = delta;
1597 match = i;
1598 }
1599 }
1600 return match;
1601}
1602
1603/*
1604 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1605 */
1606static unsigned int qtimer_val(const struct adapter *adap,
1607 const struct sge_rspq *q)
1608{
1609 unsigned int idx = q->intr_params >> 1;
1610
1611 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1612}
1613
1614/**
1615 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1616 * @adap: the adapter
1617 * @q: the Rx queue
1618 * @us: the hold-off time in us, or 0 to disable timer
1619 * @cnt: the hold-off packet count, or 0 to disable counter
1620 *
1621 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1622 * one of the two needs to be enabled for the queue to generate interrupts.
1623 */
1624static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1625 unsigned int us, unsigned int cnt)
1626{
1627 if ((us | cnt) == 0)
1628 cnt = 1;
1629
1630 if (cnt) {
1631 int err;
1632 u32 v, new_idx;
1633
1634 new_idx = closest_thres(&adap->sge, cnt);
1635 if (q->desc && q->pktcnt_idx != new_idx) {
1636 /* the queue has already been created, update it */
1637 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1638 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1639 FW_PARAMS_PARAM_YZ(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001640 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1641 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001642 if (err)
1643 return err;
1644 }
1645 q->pktcnt_idx = new_idx;
1646 }
1647
1648 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1649 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1650 return 0;
1651}
1652
1653static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1654{
1655 const struct port_info *pi = netdev_priv(dev);
1656 struct adapter *adap = pi->adapter;
1657
1658 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1659 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1660}
1661
1662static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1663{
1664 const struct port_info *pi = netdev_priv(dev);
1665 const struct adapter *adap = pi->adapter;
1666 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1667
1668 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1669 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1670 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1671 return 0;
1672}
1673
1674/*
1675 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1676 * through virtual addresses starting at 31K, the rest is accessed through
1677 * virtual addresses starting at 0. This mapping is correct only for PF0.
1678 */
1679static int eeprom_ptov(unsigned int phys_addr)
1680{
1681 if (phys_addr < 1024)
1682 return phys_addr + (31 << 10);
1683 if (phys_addr < EEPROMSIZE)
1684 return phys_addr - 1024;
1685 return -EINVAL;
1686}
1687
1688/*
1689 * The next two routines implement eeprom read/write from physical addresses.
1690 * The physical->virtual translation is correct only for PF0.
1691 */
1692static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1693{
1694 int vaddr = eeprom_ptov(phys_addr);
1695
1696 if (vaddr >= 0)
1697 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1698 return vaddr < 0 ? vaddr : 0;
1699}
1700
1701static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1702{
1703 int vaddr = eeprom_ptov(phys_addr);
1704
1705 if (vaddr >= 0)
1706 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1707 return vaddr < 0 ? vaddr : 0;
1708}
1709
1710#define EEPROM_MAGIC 0x38E2F10C
1711
1712static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1713 u8 *data)
1714{
1715 int i, err = 0;
1716 struct adapter *adapter = netdev2adap(dev);
1717
1718 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1719 if (!buf)
1720 return -ENOMEM;
1721
1722 e->magic = EEPROM_MAGIC;
1723 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1724 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1725
1726 if (!err)
1727 memcpy(data, buf + e->offset, e->len);
1728 kfree(buf);
1729 return err;
1730}
1731
1732static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1733 u8 *data)
1734{
1735 u8 *buf;
1736 int err = 0;
1737 u32 aligned_offset, aligned_len, *p;
1738 struct adapter *adapter = netdev2adap(dev);
1739
1740 if (eeprom->magic != EEPROM_MAGIC)
1741 return -EINVAL;
1742
1743 aligned_offset = eeprom->offset & ~3;
1744 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1745
1746 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1747 /*
1748 * RMW possibly needed for first or last words.
1749 */
1750 buf = kmalloc(aligned_len, GFP_KERNEL);
1751 if (!buf)
1752 return -ENOMEM;
1753 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1754 if (!err && aligned_len > 4)
1755 err = eeprom_rd_phys(adapter,
1756 aligned_offset + aligned_len - 4,
1757 (u32 *)&buf[aligned_len - 4]);
1758 if (err)
1759 goto out;
1760 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1761 } else
1762 buf = data;
1763
1764 err = t4_seeprom_wp(adapter, false);
1765 if (err)
1766 goto out;
1767
1768 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1769 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1770 aligned_offset += 4;
1771 }
1772
1773 if (!err)
1774 err = t4_seeprom_wp(adapter, true);
1775out:
1776 if (buf != data)
1777 kfree(buf);
1778 return err;
1779}
1780
1781static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1782{
1783 int ret;
1784 const struct firmware *fw;
1785 struct adapter *adap = netdev2adap(netdev);
1786
1787 ef->data[sizeof(ef->data) - 1] = '\0';
1788 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1789 if (ret < 0)
1790 return ret;
1791
1792 ret = t4_load_fw(adap, fw->data, fw->size);
1793 release_firmware(fw);
1794 if (!ret)
1795 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1796 return ret;
1797}
1798
1799#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1800#define BCAST_CRC 0xa0ccc1a6
1801
1802static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1803{
1804 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1805 wol->wolopts = netdev2adap(dev)->wol;
1806 memset(&wol->sopass, 0, sizeof(wol->sopass));
1807}
1808
1809static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1810{
1811 int err = 0;
1812 struct port_info *pi = netdev_priv(dev);
1813
1814 if (wol->wolopts & ~WOL_SUPPORTED)
1815 return -EINVAL;
1816 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1817 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1818 if (wol->wolopts & WAKE_BCAST) {
1819 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1820 ~0ULL, 0, false);
1821 if (!err)
1822 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1823 ~6ULL, ~0ULL, BCAST_CRC, true);
1824 } else
1825 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1826 return err;
1827}
1828
Dimitris Michailidis35d35682010-08-02 13:19:20 +00001829#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1830
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001831static int set_tso(struct net_device *dev, u32 value)
1832{
1833 if (value)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00001834 dev->features |= TSO_FLAGS;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001835 else
Dimitris Michailidis35d35682010-08-02 13:19:20 +00001836 dev->features &= ~TSO_FLAGS;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001837 return 0;
1838}
1839
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001840static int set_flags(struct net_device *dev, u32 flags)
1841{
Ben Hutchings1437ce32010-06-30 02:44:32 +00001842 return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001843}
1844
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001845static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1846{
1847 const struct port_info *pi = netdev_priv(dev);
1848 unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1849
1850 p->size = pi->rss_size;
1851 while (n--)
1852 p->ring_index[n] = pi->rss[n];
1853 return 0;
1854}
1855
1856static int set_rss_table(struct net_device *dev,
1857 const struct ethtool_rxfh_indir *p)
1858{
1859 unsigned int i;
1860 struct port_info *pi = netdev_priv(dev);
1861
1862 if (p->size != pi->rss_size)
1863 return -EINVAL;
1864 for (i = 0; i < p->size; i++)
1865 if (p->ring_index[i] >= pi->nqsets)
1866 return -EINVAL;
1867 for (i = 0; i < p->size; i++)
1868 pi->rss[i] = p->ring_index[i];
1869 if (pi->adapter->flags & FULL_INIT_DONE)
1870 return write_rss(pi, pi->rss);
1871 return 0;
1872}
1873
1874static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1875 void *rules)
1876{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001877 const struct port_info *pi = netdev_priv(dev);
1878
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001879 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001880 case ETHTOOL_GRXFH: {
1881 unsigned int v = pi->rss_mode;
1882
1883 info->data = 0;
1884 switch (info->flow_type) {
1885 case TCP_V4_FLOW:
1886 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1887 info->data = RXH_IP_SRC | RXH_IP_DST |
1888 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1889 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1890 info->data = RXH_IP_SRC | RXH_IP_DST;
1891 break;
1892 case UDP_V4_FLOW:
1893 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1894 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1895 info->data = RXH_IP_SRC | RXH_IP_DST |
1896 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1897 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1898 info->data = RXH_IP_SRC | RXH_IP_DST;
1899 break;
1900 case SCTP_V4_FLOW:
1901 case AH_ESP_V4_FLOW:
1902 case IPV4_FLOW:
1903 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1904 info->data = RXH_IP_SRC | RXH_IP_DST;
1905 break;
1906 case TCP_V6_FLOW:
1907 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1908 info->data = RXH_IP_SRC | RXH_IP_DST |
1909 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1910 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1911 info->data = RXH_IP_SRC | RXH_IP_DST;
1912 break;
1913 case UDP_V6_FLOW:
1914 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1915 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1916 info->data = RXH_IP_SRC | RXH_IP_DST |
1917 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1918 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1919 info->data = RXH_IP_SRC | RXH_IP_DST;
1920 break;
1921 case SCTP_V6_FLOW:
1922 case AH_ESP_V6_FLOW:
1923 case IPV6_FLOW:
1924 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1925 info->data = RXH_IP_SRC | RXH_IP_DST;
1926 break;
1927 }
1928 return 0;
1929 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001930 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001931 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001932 return 0;
1933 }
1934 return -EOPNOTSUPP;
1935}
1936
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001937static struct ethtool_ops cxgb_ethtool_ops = {
1938 .get_settings = get_settings,
1939 .set_settings = set_settings,
1940 .get_drvinfo = get_drvinfo,
1941 .get_msglevel = get_msglevel,
1942 .set_msglevel = set_msglevel,
1943 .get_ringparam = get_sge_param,
1944 .set_ringparam = set_sge_param,
1945 .get_coalesce = get_coalesce,
1946 .set_coalesce = set_coalesce,
1947 .get_eeprom_len = get_eeprom_len,
1948 .get_eeprom = get_eeprom,
1949 .set_eeprom = set_eeprom,
1950 .get_pauseparam = get_pauseparam,
1951 .set_pauseparam = set_pauseparam,
1952 .get_rx_csum = get_rx_csum,
1953 .set_rx_csum = set_rx_csum,
1954 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1955 .set_sg = ethtool_op_set_sg,
1956 .get_link = ethtool_op_get_link,
1957 .get_strings = get_strings,
1958 .phys_id = identify_port,
1959 .nway_reset = restart_autoneg,
1960 .get_sset_count = get_sset_count,
1961 .get_ethtool_stats = get_stats,
1962 .get_regs_len = get_regs_len,
1963 .get_regs = get_regs,
1964 .get_wol = get_wol,
1965 .set_wol = set_wol,
1966 .set_tso = set_tso,
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001967 .set_flags = set_flags,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001968 .get_rxnfc = get_rxnfc,
1969 .get_rxfh_indir = get_rss_table,
1970 .set_rxfh_indir = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001971 .flash_device = set_flash,
1972};
1973
1974/*
1975 * debugfs support
1976 */
1977
1978static int mem_open(struct inode *inode, struct file *file)
1979{
1980 file->private_data = inode->i_private;
1981 return 0;
1982}
1983
1984static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1985 loff_t *ppos)
1986{
1987 loff_t pos = *ppos;
1988 loff_t avail = file->f_path.dentry->d_inode->i_size;
1989 unsigned int mem = (uintptr_t)file->private_data & 3;
1990 struct adapter *adap = file->private_data - mem;
1991
1992 if (pos < 0)
1993 return -EINVAL;
1994 if (pos >= avail)
1995 return 0;
1996 if (count > avail - pos)
1997 count = avail - pos;
1998
1999 while (count) {
2000 size_t len;
2001 int ret, ofst;
2002 __be32 data[16];
2003
2004 if (mem == MEM_MC)
2005 ret = t4_mc_read(adap, pos, data, NULL);
2006 else
2007 ret = t4_edc_read(adap, mem, pos, data, NULL);
2008 if (ret)
2009 return ret;
2010
2011 ofst = pos % sizeof(data);
2012 len = min(count, sizeof(data) - ofst);
2013 if (copy_to_user(buf, (u8 *)data + ofst, len))
2014 return -EFAULT;
2015
2016 buf += len;
2017 pos += len;
2018 count -= len;
2019 }
2020 count = pos - *ppos;
2021 *ppos = pos;
2022 return count;
2023}
2024
2025static const struct file_operations mem_debugfs_fops = {
2026 .owner = THIS_MODULE,
2027 .open = mem_open,
2028 .read = mem_read,
2029};
2030
2031static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2032 unsigned int idx, unsigned int size_mb)
2033{
2034 struct dentry *de;
2035
2036 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2037 (void *)adap + idx, &mem_debugfs_fops);
2038 if (de && de->d_inode)
2039 de->d_inode->i_size = size_mb << 20;
2040}
2041
2042static int __devinit setup_debugfs(struct adapter *adap)
2043{
2044 int i;
2045
2046 if (IS_ERR_OR_NULL(adap->debugfs_root))
2047 return -1;
2048
2049 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2050 if (i & EDRAM0_ENABLE)
2051 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2052 if (i & EDRAM1_ENABLE)
2053 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2054 if (i & EXT_MEM_ENABLE)
2055 add_debugfs_mem(adap, "mc", MEM_MC,
2056 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2057 if (adap->l2t)
2058 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2059 &t4_l2t_fops);
2060 return 0;
2061}
2062
2063/*
2064 * upper-layer driver support
2065 */
2066
2067/*
2068 * Allocate an active-open TID and set it to the supplied value.
2069 */
2070int cxgb4_alloc_atid(struct tid_info *t, void *data)
2071{
2072 int atid = -1;
2073
2074 spin_lock_bh(&t->atid_lock);
2075 if (t->afree) {
2076 union aopen_entry *p = t->afree;
2077
2078 atid = p - t->atid_tab;
2079 t->afree = p->next;
2080 p->data = data;
2081 t->atids_in_use++;
2082 }
2083 spin_unlock_bh(&t->atid_lock);
2084 return atid;
2085}
2086EXPORT_SYMBOL(cxgb4_alloc_atid);
2087
2088/*
2089 * Release an active-open TID.
2090 */
2091void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2092{
2093 union aopen_entry *p = &t->atid_tab[atid];
2094
2095 spin_lock_bh(&t->atid_lock);
2096 p->next = t->afree;
2097 t->afree = p;
2098 t->atids_in_use--;
2099 spin_unlock_bh(&t->atid_lock);
2100}
2101EXPORT_SYMBOL(cxgb4_free_atid);
2102
2103/*
2104 * Allocate a server TID and set it to the supplied value.
2105 */
2106int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2107{
2108 int stid;
2109
2110 spin_lock_bh(&t->stid_lock);
2111 if (family == PF_INET) {
2112 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2113 if (stid < t->nstids)
2114 __set_bit(stid, t->stid_bmap);
2115 else
2116 stid = -1;
2117 } else {
2118 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2119 if (stid < 0)
2120 stid = -1;
2121 }
2122 if (stid >= 0) {
2123 t->stid_tab[stid].data = data;
2124 stid += t->stid_base;
2125 t->stids_in_use++;
2126 }
2127 spin_unlock_bh(&t->stid_lock);
2128 return stid;
2129}
2130EXPORT_SYMBOL(cxgb4_alloc_stid);
2131
2132/*
2133 * Release a server TID.
2134 */
2135void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2136{
2137 stid -= t->stid_base;
2138 spin_lock_bh(&t->stid_lock);
2139 if (family == PF_INET)
2140 __clear_bit(stid, t->stid_bmap);
2141 else
2142 bitmap_release_region(t->stid_bmap, stid, 2);
2143 t->stid_tab[stid].data = NULL;
2144 t->stids_in_use--;
2145 spin_unlock_bh(&t->stid_lock);
2146}
2147EXPORT_SYMBOL(cxgb4_free_stid);
2148
2149/*
2150 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2151 */
2152static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2153 unsigned int tid)
2154{
2155 struct cpl_tid_release *req;
2156
2157 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2158 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2159 INIT_TP_WR(req, tid);
2160 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2161}
2162
2163/*
2164 * Queue a TID release request and if necessary schedule a work queue to
2165 * process it.
2166 */
2167void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2168 unsigned int tid)
2169{
2170 void **p = &t->tid_tab[tid];
2171 struct adapter *adap = container_of(t, struct adapter, tids);
2172
2173 spin_lock_bh(&adap->tid_release_lock);
2174 *p = adap->tid_release_head;
2175 /* Low 2 bits encode the Tx channel number */
2176 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2177 if (!adap->tid_release_task_busy) {
2178 adap->tid_release_task_busy = true;
2179 schedule_work(&adap->tid_release_task);
2180 }
2181 spin_unlock_bh(&adap->tid_release_lock);
2182}
2183EXPORT_SYMBOL(cxgb4_queue_tid_release);
2184
2185/*
2186 * Process the list of pending TID release requests.
2187 */
2188static void process_tid_release_list(struct work_struct *work)
2189{
2190 struct sk_buff *skb;
2191 struct adapter *adap;
2192
2193 adap = container_of(work, struct adapter, tid_release_task);
2194
2195 spin_lock_bh(&adap->tid_release_lock);
2196 while (adap->tid_release_head) {
2197 void **p = adap->tid_release_head;
2198 unsigned int chan = (uintptr_t)p & 3;
2199 p = (void *)p - chan;
2200
2201 adap->tid_release_head = *p;
2202 *p = NULL;
2203 spin_unlock_bh(&adap->tid_release_lock);
2204
2205 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2206 GFP_KERNEL)))
2207 schedule_timeout_uninterruptible(1);
2208
2209 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2210 t4_ofld_send(adap, skb);
2211 spin_lock_bh(&adap->tid_release_lock);
2212 }
2213 adap->tid_release_task_busy = false;
2214 spin_unlock_bh(&adap->tid_release_lock);
2215}
2216
2217/*
2218 * Release a TID and inform HW. If we are unable to allocate the release
2219 * message we defer to a work queue.
2220 */
2221void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2222{
2223 void *old;
2224 struct sk_buff *skb;
2225 struct adapter *adap = container_of(t, struct adapter, tids);
2226
2227 old = t->tid_tab[tid];
2228 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2229 if (likely(skb)) {
2230 t->tid_tab[tid] = NULL;
2231 mk_tid_release(skb, chan, tid);
2232 t4_ofld_send(adap, skb);
2233 } else
2234 cxgb4_queue_tid_release(t, chan, tid);
2235 if (old)
2236 atomic_dec(&t->tids_in_use);
2237}
2238EXPORT_SYMBOL(cxgb4_remove_tid);
2239
2240/*
2241 * Allocate and initialize the TID tables. Returns 0 on success.
2242 */
2243static int tid_init(struct tid_info *t)
2244{
2245 size_t size;
2246 unsigned int natids = t->natids;
2247
2248 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2249 t->nstids * sizeof(*t->stid_tab) +
2250 BITS_TO_LONGS(t->nstids) * sizeof(long);
2251 t->tid_tab = t4_alloc_mem(size);
2252 if (!t->tid_tab)
2253 return -ENOMEM;
2254
2255 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2256 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2257 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2258 spin_lock_init(&t->stid_lock);
2259 spin_lock_init(&t->atid_lock);
2260
2261 t->stids_in_use = 0;
2262 t->afree = NULL;
2263 t->atids_in_use = 0;
2264 atomic_set(&t->tids_in_use, 0);
2265
2266 /* Setup the free list for atid_tab and clear the stid bitmap. */
2267 if (natids) {
2268 while (--natids)
2269 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2270 t->afree = t->atid_tab;
2271 }
2272 bitmap_zero(t->stid_bmap, t->nstids);
2273 return 0;
2274}
2275
2276/**
2277 * cxgb4_create_server - create an IP server
2278 * @dev: the device
2279 * @stid: the server TID
2280 * @sip: local IP address to bind server to
2281 * @sport: the server's TCP port
2282 * @queue: queue to direct messages from this server to
2283 *
2284 * Create an IP server for the given port and address.
2285 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2286 */
2287int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2288 __be32 sip, __be16 sport, unsigned int queue)
2289{
2290 unsigned int chan;
2291 struct sk_buff *skb;
2292 struct adapter *adap;
2293 struct cpl_pass_open_req *req;
2294
2295 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2296 if (!skb)
2297 return -ENOMEM;
2298
2299 adap = netdev2adap(dev);
2300 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2301 INIT_TP_WR(req, 0);
2302 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2303 req->local_port = sport;
2304 req->peer_port = htons(0);
2305 req->local_ip = sip;
2306 req->peer_ip = htonl(0);
2307 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2308 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2309 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2310 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2311 return t4_mgmt_tx(adap, skb);
2312}
2313EXPORT_SYMBOL(cxgb4_create_server);
2314
2315/**
2316 * cxgb4_create_server6 - create an IPv6 server
2317 * @dev: the device
2318 * @stid: the server TID
2319 * @sip: local IPv6 address to bind server to
2320 * @sport: the server's TCP port
2321 * @queue: queue to direct messages from this server to
2322 *
2323 * Create an IPv6 server for the given port and address.
2324 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2325 */
2326int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2327 const struct in6_addr *sip, __be16 sport,
2328 unsigned int queue)
2329{
2330 unsigned int chan;
2331 struct sk_buff *skb;
2332 struct adapter *adap;
2333 struct cpl_pass_open_req6 *req;
2334
2335 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2336 if (!skb)
2337 return -ENOMEM;
2338
2339 adap = netdev2adap(dev);
2340 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2341 INIT_TP_WR(req, 0);
2342 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2343 req->local_port = sport;
2344 req->peer_port = htons(0);
2345 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2346 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2347 req->peer_ip_hi = cpu_to_be64(0);
2348 req->peer_ip_lo = cpu_to_be64(0);
2349 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2350 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2351 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2352 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2353 return t4_mgmt_tx(adap, skb);
2354}
2355EXPORT_SYMBOL(cxgb4_create_server6);
2356
2357/**
2358 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2359 * @mtus: the HW MTU table
2360 * @mtu: the target MTU
2361 * @idx: index of selected entry in the MTU table
2362 *
2363 * Returns the index and the value in the HW MTU table that is closest to
2364 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2365 * table, in which case that smallest available value is selected.
2366 */
2367unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2368 unsigned int *idx)
2369{
2370 unsigned int i = 0;
2371
2372 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2373 ++i;
2374 if (idx)
2375 *idx = i;
2376 return mtus[i];
2377}
2378EXPORT_SYMBOL(cxgb4_best_mtu);
2379
2380/**
2381 * cxgb4_port_chan - get the HW channel of a port
2382 * @dev: the net device for the port
2383 *
2384 * Return the HW Tx channel of the given port.
2385 */
2386unsigned int cxgb4_port_chan(const struct net_device *dev)
2387{
2388 return netdev2pinfo(dev)->tx_chan;
2389}
2390EXPORT_SYMBOL(cxgb4_port_chan);
2391
2392/**
2393 * cxgb4_port_viid - get the VI id of a port
2394 * @dev: the net device for the port
2395 *
2396 * Return the VI id of the given port.
2397 */
2398unsigned int cxgb4_port_viid(const struct net_device *dev)
2399{
2400 return netdev2pinfo(dev)->viid;
2401}
2402EXPORT_SYMBOL(cxgb4_port_viid);
2403
2404/**
2405 * cxgb4_port_idx - get the index of a port
2406 * @dev: the net device for the port
2407 *
2408 * Return the index of the given port.
2409 */
2410unsigned int cxgb4_port_idx(const struct net_device *dev)
2411{
2412 return netdev2pinfo(dev)->port_id;
2413}
2414EXPORT_SYMBOL(cxgb4_port_idx);
2415
2416/**
2417 * cxgb4_netdev_by_hwid - return the net device of a HW port
2418 * @pdev: identifies the adapter
2419 * @id: the HW port id
2420 *
2421 * Return the net device associated with the interface with the given HW
2422 * id.
2423 */
2424struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2425{
2426 const struct adapter *adap = pci_get_drvdata(pdev);
2427
2428 if (!adap || id >= NCHAN)
2429 return NULL;
2430 id = adap->chan_map[id];
2431 return id < MAX_NPORTS ? adap->port[id] : NULL;
2432}
2433EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2434
2435void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2436 struct tp_tcp_stats *v6)
2437{
2438 struct adapter *adap = pci_get_drvdata(pdev);
2439
2440 spin_lock(&adap->stats_lock);
2441 t4_tp_get_tcp_stats(adap, v4, v6);
2442 spin_unlock(&adap->stats_lock);
2443}
2444EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2445
2446void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2447 const unsigned int *pgsz_order)
2448{
2449 struct adapter *adap = netdev2adap(dev);
2450
2451 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2452 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2453 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2454 HPZ3(pgsz_order[3]));
2455}
2456EXPORT_SYMBOL(cxgb4_iscsi_init);
2457
2458static struct pci_driver cxgb4_driver;
2459
2460static void check_neigh_update(struct neighbour *neigh)
2461{
2462 const struct device *parent;
2463 const struct net_device *netdev = neigh->dev;
2464
2465 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2466 netdev = vlan_dev_real_dev(netdev);
2467 parent = netdev->dev.parent;
2468 if (parent && parent->driver == &cxgb4_driver.driver)
2469 t4_l2t_update(dev_get_drvdata(parent), neigh);
2470}
2471
2472static int netevent_cb(struct notifier_block *nb, unsigned long event,
2473 void *data)
2474{
2475 switch (event) {
2476 case NETEVENT_NEIGH_UPDATE:
2477 check_neigh_update(data);
2478 break;
2479 case NETEVENT_PMTU_UPDATE:
2480 case NETEVENT_REDIRECT:
2481 default:
2482 break;
2483 }
2484 return 0;
2485}
2486
2487static bool netevent_registered;
2488static struct notifier_block cxgb4_netevent_nb = {
2489 .notifier_call = netevent_cb
2490};
2491
2492static void uld_attach(struct adapter *adap, unsigned int uld)
2493{
2494 void *handle;
2495 struct cxgb4_lld_info lli;
2496
2497 lli.pdev = adap->pdev;
2498 lli.l2t = adap->l2t;
2499 lli.tids = &adap->tids;
2500 lli.ports = adap->port;
2501 lli.vr = &adap->vres;
2502 lli.mtus = adap->params.mtus;
2503 if (uld == CXGB4_ULD_RDMA) {
2504 lli.rxq_ids = adap->sge.rdma_rxq;
2505 lli.nrxq = adap->sge.rdmaqs;
2506 } else if (uld == CXGB4_ULD_ISCSI) {
2507 lli.rxq_ids = adap->sge.ofld_rxq;
2508 lli.nrxq = adap->sge.ofldqsets;
2509 }
2510 lli.ntxq = adap->sge.ofldqsets;
2511 lli.nchan = adap->params.nports;
2512 lli.nports = adap->params.nports;
2513 lli.wr_cred = adap->params.ofldq_wr_cred;
2514 lli.adapter_type = adap->params.rev;
2515 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2516 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002517 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2518 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002519 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002520 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2521 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002522 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2523 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2524 lli.fw_vers = adap->params.fw_vers;
2525
2526 handle = ulds[uld].add(&lli);
2527 if (IS_ERR(handle)) {
2528 dev_warn(adap->pdev_dev,
2529 "could not attach to the %s driver, error %ld\n",
2530 uld_str[uld], PTR_ERR(handle));
2531 return;
2532 }
2533
2534 adap->uld_handle[uld] = handle;
2535
2536 if (!netevent_registered) {
2537 register_netevent_notifier(&cxgb4_netevent_nb);
2538 netevent_registered = true;
2539 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00002540
2541 if (adap->flags & FULL_INIT_DONE)
2542 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002543}
2544
2545static void attach_ulds(struct adapter *adap)
2546{
2547 unsigned int i;
2548
2549 mutex_lock(&uld_mutex);
2550 list_add_tail(&adap->list_node, &adapter_list);
2551 for (i = 0; i < CXGB4_ULD_MAX; i++)
2552 if (ulds[i].add)
2553 uld_attach(adap, i);
2554 mutex_unlock(&uld_mutex);
2555}
2556
2557static void detach_ulds(struct adapter *adap)
2558{
2559 unsigned int i;
2560
2561 mutex_lock(&uld_mutex);
2562 list_del(&adap->list_node);
2563 for (i = 0; i < CXGB4_ULD_MAX; i++)
2564 if (adap->uld_handle[i]) {
2565 ulds[i].state_change(adap->uld_handle[i],
2566 CXGB4_STATE_DETACH);
2567 adap->uld_handle[i] = NULL;
2568 }
2569 if (netevent_registered && list_empty(&adapter_list)) {
2570 unregister_netevent_notifier(&cxgb4_netevent_nb);
2571 netevent_registered = false;
2572 }
2573 mutex_unlock(&uld_mutex);
2574}
2575
2576static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2577{
2578 unsigned int i;
2579
2580 mutex_lock(&uld_mutex);
2581 for (i = 0; i < CXGB4_ULD_MAX; i++)
2582 if (adap->uld_handle[i])
2583 ulds[i].state_change(adap->uld_handle[i], new_state);
2584 mutex_unlock(&uld_mutex);
2585}
2586
2587/**
2588 * cxgb4_register_uld - register an upper-layer driver
2589 * @type: the ULD type
2590 * @p: the ULD methods
2591 *
2592 * Registers an upper-layer driver with this driver and notifies the ULD
2593 * about any presently available devices that support its type. Returns
2594 * %-EBUSY if a ULD of the same type is already registered.
2595 */
2596int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2597{
2598 int ret = 0;
2599 struct adapter *adap;
2600
2601 if (type >= CXGB4_ULD_MAX)
2602 return -EINVAL;
2603 mutex_lock(&uld_mutex);
2604 if (ulds[type].add) {
2605 ret = -EBUSY;
2606 goto out;
2607 }
2608 ulds[type] = *p;
2609 list_for_each_entry(adap, &adapter_list, list_node)
2610 uld_attach(adap, type);
2611out: mutex_unlock(&uld_mutex);
2612 return ret;
2613}
2614EXPORT_SYMBOL(cxgb4_register_uld);
2615
2616/**
2617 * cxgb4_unregister_uld - unregister an upper-layer driver
2618 * @type: the ULD type
2619 *
2620 * Unregisters an existing upper-layer driver.
2621 */
2622int cxgb4_unregister_uld(enum cxgb4_uld type)
2623{
2624 struct adapter *adap;
2625
2626 if (type >= CXGB4_ULD_MAX)
2627 return -EINVAL;
2628 mutex_lock(&uld_mutex);
2629 list_for_each_entry(adap, &adapter_list, list_node)
2630 adap->uld_handle[type] = NULL;
2631 ulds[type].add = NULL;
2632 mutex_unlock(&uld_mutex);
2633 return 0;
2634}
2635EXPORT_SYMBOL(cxgb4_unregister_uld);
2636
2637/**
2638 * cxgb_up - enable the adapter
2639 * @adap: adapter being enabled
2640 *
2641 * Called when the first port is enabled, this function performs the
2642 * actions necessary to make an adapter operational, such as completing
2643 * the initialization of HW modules, and enabling interrupts.
2644 *
2645 * Must be called with the rtnl lock held.
2646 */
2647static int cxgb_up(struct adapter *adap)
2648{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002649 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002650
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002651 err = setup_sge_queues(adap);
2652 if (err)
2653 goto out;
2654 err = setup_rss(adap);
2655 if (err)
2656 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002657
2658 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002659 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002660 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2661 adap->msix_info[0].desc, adap);
2662 if (err)
2663 goto irq_err;
2664
2665 err = request_msix_queue_irqs(adap);
2666 if (err) {
2667 free_irq(adap->msix_info[0].vec, adap);
2668 goto irq_err;
2669 }
2670 } else {
2671 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2672 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2673 adap->name, adap);
2674 if (err)
2675 goto irq_err;
2676 }
2677 enable_rx(adap);
2678 t4_sge_start(adap);
2679 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002680 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002681 notify_ulds(adap, CXGB4_STATE_UP);
2682 out:
2683 return err;
2684 irq_err:
2685 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002686 freeq:
2687 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002688 goto out;
2689}
2690
2691static void cxgb_down(struct adapter *adapter)
2692{
2693 t4_intr_disable(adapter);
2694 cancel_work_sync(&adapter->tid_release_task);
2695 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002696 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002697
2698 if (adapter->flags & USING_MSIX) {
2699 free_msix_queue_irqs(adapter);
2700 free_irq(adapter->msix_info[0].vec, adapter);
2701 } else
2702 free_irq(adapter->pdev->irq, adapter);
2703 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002704 t4_sge_stop(adapter);
2705 t4_free_sge_resources(adapter);
2706 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002707}
2708
2709/*
2710 * net_device operations
2711 */
2712static int cxgb_open(struct net_device *dev)
2713{
2714 int err;
2715 struct port_info *pi = netdev_priv(dev);
2716 struct adapter *adapter = pi->adapter;
2717
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002718 if (!(adapter->flags & FULL_INIT_DONE)) {
2719 err = cxgb_up(adapter);
2720 if (err < 0)
2721 return err;
2722 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002723
2724 dev->real_num_tx_queues = pi->nqsets;
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00002725 err = link_start(dev);
2726 if (!err)
2727 netif_tx_start_all_queues(dev);
2728 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002729}
2730
2731static int cxgb_close(struct net_device *dev)
2732{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002733 struct port_info *pi = netdev_priv(dev);
2734 struct adapter *adapter = pi->adapter;
2735
2736 netif_tx_stop_all_queues(dev);
2737 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002738 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002739}
2740
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00002741static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2742 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002743{
2744 struct port_stats stats;
2745 struct port_info *p = netdev_priv(dev);
2746 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002747
2748 spin_lock(&adapter->stats_lock);
2749 t4_get_port_stats(adapter, p->tx_chan, &stats);
2750 spin_unlock(&adapter->stats_lock);
2751
2752 ns->tx_bytes = stats.tx_octets;
2753 ns->tx_packets = stats.tx_frames;
2754 ns->rx_bytes = stats.rx_octets;
2755 ns->rx_packets = stats.rx_frames;
2756 ns->multicast = stats.rx_mcast_frames;
2757
2758 /* detailed rx_errors */
2759 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2760 stats.rx_runt;
2761 ns->rx_over_errors = 0;
2762 ns->rx_crc_errors = stats.rx_fcs_err;
2763 ns->rx_frame_errors = stats.rx_symbol_err;
2764 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2765 stats.rx_ovflow2 + stats.rx_ovflow3 +
2766 stats.rx_trunc0 + stats.rx_trunc1 +
2767 stats.rx_trunc2 + stats.rx_trunc3;
2768 ns->rx_missed_errors = 0;
2769
2770 /* detailed tx_errors */
2771 ns->tx_aborted_errors = 0;
2772 ns->tx_carrier_errors = 0;
2773 ns->tx_fifo_errors = 0;
2774 ns->tx_heartbeat_errors = 0;
2775 ns->tx_window_errors = 0;
2776
2777 ns->tx_errors = stats.tx_error_frames;
2778 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2779 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2780 return ns;
2781}
2782
2783static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2784{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002785 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002786 int ret = 0, prtad, devad;
2787 struct port_info *pi = netdev_priv(dev);
2788 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2789
2790 switch (cmd) {
2791 case SIOCGMIIPHY:
2792 if (pi->mdio_addr < 0)
2793 return -EOPNOTSUPP;
2794 data->phy_id = pi->mdio_addr;
2795 break;
2796 case SIOCGMIIREG:
2797 case SIOCSMIIREG:
2798 if (mdio_phy_id_is_c45(data->phy_id)) {
2799 prtad = mdio_phy_id_prtad(data->phy_id);
2800 devad = mdio_phy_id_devad(data->phy_id);
2801 } else if (data->phy_id < 32) {
2802 prtad = data->phy_id;
2803 devad = 0;
2804 data->reg_num &= 0x1f;
2805 } else
2806 return -EINVAL;
2807
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002808 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002809 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002810 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002811 data->reg_num, &data->val_out);
2812 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002813 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002814 data->reg_num, data->val_in);
2815 break;
2816 default:
2817 return -EOPNOTSUPP;
2818 }
2819 return ret;
2820}
2821
2822static void cxgb_set_rxmode(struct net_device *dev)
2823{
2824 /* unfortunately we can't return errors to the stack */
2825 set_rxmode(dev, -1, false);
2826}
2827
2828static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2829{
2830 int ret;
2831 struct port_info *pi = netdev_priv(dev);
2832
2833 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2834 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002835 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2836 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002837 if (!ret)
2838 dev->mtu = new_mtu;
2839 return ret;
2840}
2841
2842static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2843{
2844 int ret;
2845 struct sockaddr *addr = p;
2846 struct port_info *pi = netdev_priv(dev);
2847
2848 if (!is_valid_ether_addr(addr->sa_data))
2849 return -EINVAL;
2850
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002851 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2852 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002853 if (ret < 0)
2854 return ret;
2855
2856 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2857 pi->xact_addr_filt = ret;
2858 return 0;
2859}
2860
2861static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2862{
2863 struct port_info *pi = netdev_priv(dev);
2864
2865 pi->vlan_grp = grp;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002866 t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
2867 grp != NULL, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002868}
2869
2870#ifdef CONFIG_NET_POLL_CONTROLLER
2871static void cxgb_netpoll(struct net_device *dev)
2872{
2873 struct port_info *pi = netdev_priv(dev);
2874 struct adapter *adap = pi->adapter;
2875
2876 if (adap->flags & USING_MSIX) {
2877 int i;
2878 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2879
2880 for (i = pi->nqsets; i; i--, rx++)
2881 t4_sge_intr_msix(0, &rx->rspq);
2882 } else
2883 t4_intr_handler(adap)(0, adap);
2884}
2885#endif
2886
2887static const struct net_device_ops cxgb4_netdev_ops = {
2888 .ndo_open = cxgb_open,
2889 .ndo_stop = cxgb_close,
2890 .ndo_start_xmit = t4_eth_xmit,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002891 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002892 .ndo_set_rx_mode = cxgb_set_rxmode,
2893 .ndo_set_mac_address = cxgb_set_mac_addr,
2894 .ndo_validate_addr = eth_validate_addr,
2895 .ndo_do_ioctl = cxgb_ioctl,
2896 .ndo_change_mtu = cxgb_change_mtu,
2897 .ndo_vlan_rx_register = vlan_rx_register,
2898#ifdef CONFIG_NET_POLL_CONTROLLER
2899 .ndo_poll_controller = cxgb_netpoll,
2900#endif
2901};
2902
2903void t4_fatal_err(struct adapter *adap)
2904{
2905 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2906 t4_intr_disable(adap);
2907 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2908}
2909
2910static void setup_memwin(struct adapter *adap)
2911{
2912 u32 bar0;
2913
2914 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2915 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2916 (bar0 + MEMWIN0_BASE) | BIR(0) |
2917 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2918 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2919 (bar0 + MEMWIN1_BASE) | BIR(0) |
2920 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2921 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2922 (bar0 + MEMWIN2_BASE) | BIR(0) |
2923 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00002924 if (adap->vres.ocq.size) {
2925 unsigned int start, sz_kb;
2926
2927 start = pci_resource_start(adap->pdev, 2) +
2928 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2929 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2930 t4_write_reg(adap,
2931 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2932 start | BIR(1) | WINDOW(ilog2(sz_kb)));
2933 t4_write_reg(adap,
2934 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2935 adap->vres.ocq.start);
2936 t4_read_reg(adap,
2937 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2938 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002939}
2940
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002941static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2942{
2943 u32 v;
2944 int ret;
2945
2946 /* get device capabilities */
2947 memset(c, 0, sizeof(*c));
2948 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2949 FW_CMD_REQUEST | FW_CMD_READ);
2950 c->retval_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002951 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002952 if (ret < 0)
2953 return ret;
2954
2955 /* select capabilities we'll be using */
2956 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2957 if (!vf_acls)
2958 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2959 else
2960 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2961 } else if (vf_acls) {
2962 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2963 return ret;
2964 }
2965 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2966 FW_CMD_REQUEST | FW_CMD_WRITE);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002967 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002968 if (ret < 0)
2969 return ret;
2970
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002971 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002972 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2973 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2974 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2975 if (ret < 0)
2976 return ret;
2977
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002978 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2979 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002980 if (ret < 0)
2981 return ret;
2982
2983 t4_sge_init(adap);
2984
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002985 /* tweak some settings */
2986 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2987 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2988 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2989 v = t4_read_reg(adap, TP_PIO_DATA);
2990 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002991
2992 /* get basic stuff going */
2993 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002994}
2995
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002996/*
2997 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2998 */
2999#define MAX_ATIDS 8192U
3000
3001/*
3002 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3003 */
3004static int adap_init0(struct adapter *adap)
3005{
3006 int ret;
3007 u32 v, port_vec;
3008 enum dev_state state;
3009 u32 params[7], val[7];
3010 struct fw_caps_config_cmd c;
3011
3012 ret = t4_check_fw_version(adap);
3013 if (ret == -EINVAL || ret > 0) {
3014 if (upgrade_fw(adap) >= 0) /* recache FW version */
3015 ret = t4_check_fw_version(adap);
3016 }
3017 if (ret < 0)
3018 return ret;
3019
3020 /* contact FW, request master */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003021 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003022 if (ret < 0) {
3023 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3024 ret);
3025 return ret;
3026 }
3027
3028 /* reset device */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003029 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003030 if (ret < 0)
3031 goto bye;
3032
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003033 for (v = 0; v < SGE_NTIMERS - 1; v++)
3034 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
3035 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3036 adap->sge.counter_val[0] = 1;
3037 for (v = 1; v < SGE_NCOUNTERS; v++)
3038 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
3039 THRESHOLD_3_MASK);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003040#define FW_PARAM_DEV(param) \
3041 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3042 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3043
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003044 params[0] = FW_PARAM_DEV(CCLK);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003045 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003046 if (ret < 0)
3047 goto bye;
3048 adap->params.vpd.cclk = val[0];
3049
3050 ret = adap_init1(adap, &c);
3051 if (ret < 0)
3052 goto bye;
3053
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003054#define FW_PARAM_PFVF(param) \
3055 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003056 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3057 FW_PARAMS_PARAM_Y(adap->fn))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003058
3059 params[0] = FW_PARAM_DEV(PORTVEC);
3060 params[1] = FW_PARAM_PFVF(L2T_START);
3061 params[2] = FW_PARAM_PFVF(L2T_END);
3062 params[3] = FW_PARAM_PFVF(FILTER_START);
3063 params[4] = FW_PARAM_PFVF(FILTER_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003064 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003065 if (ret < 0)
3066 goto bye;
3067 port_vec = val[0];
3068 adap->tids.ftid_base = val[3];
3069 adap->tids.nftids = val[4] - val[3] + 1;
3070
3071 if (c.ofldcaps) {
3072 /* query offload-related parameters */
3073 params[0] = FW_PARAM_DEV(NTID);
3074 params[1] = FW_PARAM_PFVF(SERVER_START);
3075 params[2] = FW_PARAM_PFVF(SERVER_END);
3076 params[3] = FW_PARAM_PFVF(TDDP_START);
3077 params[4] = FW_PARAM_PFVF(TDDP_END);
3078 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003079 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3080 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003081 if (ret < 0)
3082 goto bye;
3083 adap->tids.ntids = val[0];
3084 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3085 adap->tids.stid_base = val[1];
3086 adap->tids.nstids = val[2] - val[1] + 1;
3087 adap->vres.ddp.start = val[3];
3088 adap->vres.ddp.size = val[4] - val[3] + 1;
3089 adap->params.ofldq_wr_cred = val[5];
3090 adap->params.offload = 1;
3091 }
3092 if (c.rdmacaps) {
3093 params[0] = FW_PARAM_PFVF(STAG_START);
3094 params[1] = FW_PARAM_PFVF(STAG_END);
3095 params[2] = FW_PARAM_PFVF(RQ_START);
3096 params[3] = FW_PARAM_PFVF(RQ_END);
3097 params[4] = FW_PARAM_PFVF(PBL_START);
3098 params[5] = FW_PARAM_PFVF(PBL_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003099 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3100 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003101 if (ret < 0)
3102 goto bye;
3103 adap->vres.stag.start = val[0];
3104 adap->vres.stag.size = val[1] - val[0] + 1;
3105 adap->vres.rq.start = val[2];
3106 adap->vres.rq.size = val[3] - val[2] + 1;
3107 adap->vres.pbl.start = val[4];
3108 adap->vres.pbl.size = val[5] - val[4] + 1;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003109
3110 params[0] = FW_PARAM_PFVF(SQRQ_START);
3111 params[1] = FW_PARAM_PFVF(SQRQ_END);
3112 params[2] = FW_PARAM_PFVF(CQ_START);
3113 params[3] = FW_PARAM_PFVF(CQ_END);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003114 params[4] = FW_PARAM_PFVF(OCQ_START);
3115 params[5] = FW_PARAM_PFVF(OCQ_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003116 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3117 val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003118 if (ret < 0)
3119 goto bye;
3120 adap->vres.qp.start = val[0];
3121 adap->vres.qp.size = val[1] - val[0] + 1;
3122 adap->vres.cq.start = val[2];
3123 adap->vres.cq.size = val[3] - val[2] + 1;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003124 adap->vres.ocq.start = val[4];
3125 adap->vres.ocq.size = val[5] - val[4] + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003126 }
3127 if (c.iscsicaps) {
3128 params[0] = FW_PARAM_PFVF(ISCSI_START);
3129 params[1] = FW_PARAM_PFVF(ISCSI_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003130 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3131 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003132 if (ret < 0)
3133 goto bye;
3134 adap->vres.iscsi.start = val[0];
3135 adap->vres.iscsi.size = val[1] - val[0] + 1;
3136 }
3137#undef FW_PARAM_PFVF
3138#undef FW_PARAM_DEV
3139
3140 adap->params.nports = hweight32(port_vec);
3141 adap->params.portvec = port_vec;
3142 adap->flags |= FW_OK;
3143
3144 /* These are finalized by FW initialization, load their values now */
3145 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3146 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3147 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3148 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3149 adap->params.b_wnd);
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003150
3151#ifdef CONFIG_PCI_IOV
3152 /*
3153 * Provision resource limits for Virtual Functions. We currently
3154 * grant them all the same static resource limits except for the Port
3155 * Access Rights Mask which we're assigning based on the PF. All of
3156 * the static provisioning stuff for both the PF and VF really needs
3157 * to be managed in a persistent manner for each device which the
3158 * firmware controls.
3159 */
3160 {
3161 int pf, vf;
3162
3163 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3164 if (num_vf[pf] <= 0)
3165 continue;
3166
3167 /* VF numbering starts at 1! */
3168 for (vf = 1; vf <= num_vf[pf]; vf++) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003169 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003170 VFRES_NEQ, VFRES_NETHCTRL,
3171 VFRES_NIQFLINT, VFRES_NIQ,
3172 VFRES_TC, VFRES_NVI,
3173 FW_PFVF_CMD_CMASK_MASK,
3174 pfvfres_pmask(adap, pf, vf),
3175 VFRES_NEXACTF,
3176 VFRES_R_CAPS, VFRES_WX_CAPS);
3177 if (ret < 0)
3178 dev_warn(adap->pdev_dev, "failed to "
3179 "provision pf/vf=%d/%d; "
3180 "err=%d\n", pf, vf, ret);
3181 }
3182 }
3183 }
3184#endif
3185
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003186 setup_memwin(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003187 return 0;
3188
3189 /*
3190 * If a command timed out or failed with EIO FW does not operate within
3191 * its spec or something catastrophic happened to HW/FW, stop issuing
3192 * commands.
3193 */
3194bye: if (ret != -ETIMEDOUT && ret != -EIO)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003195 t4_fw_bye(adap, adap->fn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003196 return ret;
3197}
3198
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003199/* EEH callbacks */
3200
3201static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3202 pci_channel_state_t state)
3203{
3204 int i;
3205 struct adapter *adap = pci_get_drvdata(pdev);
3206
3207 if (!adap)
3208 goto out;
3209
3210 rtnl_lock();
3211 adap->flags &= ~FW_OK;
3212 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3213 for_each_port(adap, i) {
3214 struct net_device *dev = adap->port[i];
3215
3216 netif_device_detach(dev);
3217 netif_carrier_off(dev);
3218 }
3219 if (adap->flags & FULL_INIT_DONE)
3220 cxgb_down(adap);
3221 rtnl_unlock();
3222 pci_disable_device(pdev);
3223out: return state == pci_channel_io_perm_failure ?
3224 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3225}
3226
3227static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3228{
3229 int i, ret;
3230 struct fw_caps_config_cmd c;
3231 struct adapter *adap = pci_get_drvdata(pdev);
3232
3233 if (!adap) {
3234 pci_restore_state(pdev);
3235 pci_save_state(pdev);
3236 return PCI_ERS_RESULT_RECOVERED;
3237 }
3238
3239 if (pci_enable_device(pdev)) {
3240 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3241 return PCI_ERS_RESULT_DISCONNECT;
3242 }
3243
3244 pci_set_master(pdev);
3245 pci_restore_state(pdev);
3246 pci_save_state(pdev);
3247 pci_cleanup_aer_uncorrect_error_status(pdev);
3248
3249 if (t4_wait_dev_ready(adap) < 0)
3250 return PCI_ERS_RESULT_DISCONNECT;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003251 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003252 return PCI_ERS_RESULT_DISCONNECT;
3253 adap->flags |= FW_OK;
3254 if (adap_init1(adap, &c))
3255 return PCI_ERS_RESULT_DISCONNECT;
3256
3257 for_each_port(adap, i) {
3258 struct port_info *p = adap2pinfo(adap, i);
3259
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003260 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3261 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003262 if (ret < 0)
3263 return PCI_ERS_RESULT_DISCONNECT;
3264 p->viid = ret;
3265 p->xact_addr_filt = -1;
3266 }
3267
3268 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3269 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003270 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003271 if (cxgb_up(adap))
3272 return PCI_ERS_RESULT_DISCONNECT;
3273 return PCI_ERS_RESULT_RECOVERED;
3274}
3275
3276static void eeh_resume(struct pci_dev *pdev)
3277{
3278 int i;
3279 struct adapter *adap = pci_get_drvdata(pdev);
3280
3281 if (!adap)
3282 return;
3283
3284 rtnl_lock();
3285 for_each_port(adap, i) {
3286 struct net_device *dev = adap->port[i];
3287
3288 if (netif_running(dev)) {
3289 link_start(dev);
3290 cxgb_set_rxmode(dev);
3291 }
3292 netif_device_attach(dev);
3293 }
3294 rtnl_unlock();
3295}
3296
3297static struct pci_error_handlers cxgb4_eeh = {
3298 .error_detected = eeh_err_detected,
3299 .slot_reset = eeh_slot_reset,
3300 .resume = eeh_resume,
3301};
3302
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003303static inline bool is_10g_port(const struct link_config *lc)
3304{
3305 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3306}
3307
3308static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3309 unsigned int size, unsigned int iqe_size)
3310{
3311 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3312 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3313 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3314 q->iqe_len = iqe_size;
3315 q->size = size;
3316}
3317
3318/*
3319 * Perform default configuration of DMA queues depending on the number and type
3320 * of ports we found and the number of available CPUs. Most settings can be
3321 * modified by the admin prior to actual use.
3322 */
3323static void __devinit cfg_queues(struct adapter *adap)
3324{
3325 struct sge *s = &adap->sge;
3326 int i, q10g = 0, n10g = 0, qidx = 0;
3327
3328 for_each_port(adap, i)
3329 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3330
3331 /*
3332 * We default to 1 queue per non-10G port and up to # of cores queues
3333 * per 10G port.
3334 */
3335 if (n10g)
3336 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3337 if (q10g > num_online_cpus())
3338 q10g = num_online_cpus();
3339
3340 for_each_port(adap, i) {
3341 struct port_info *pi = adap2pinfo(adap, i);
3342
3343 pi->first_qset = qidx;
3344 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3345 qidx += pi->nqsets;
3346 }
3347
3348 s->ethqsets = qidx;
3349 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3350
3351 if (is_offload(adap)) {
3352 /*
3353 * For offload we use 1 queue/channel if all ports are up to 1G,
3354 * otherwise we divide all available queues amongst the channels
3355 * capped by the number of available cores.
3356 */
3357 if (n10g) {
3358 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3359 num_online_cpus());
3360 s->ofldqsets = roundup(i, adap->params.nports);
3361 } else
3362 s->ofldqsets = adap->params.nports;
3363 /* For RDMA one Rx queue per channel suffices */
3364 s->rdmaqs = adap->params.nports;
3365 }
3366
3367 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3368 struct sge_eth_rxq *r = &s->ethrxq[i];
3369
3370 init_rspq(&r->rspq, 0, 0, 1024, 64);
3371 r->fl.size = 72;
3372 }
3373
3374 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3375 s->ethtxq[i].q.size = 1024;
3376
3377 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3378 s->ctrlq[i].q.size = 512;
3379
3380 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3381 s->ofldtxq[i].q.size = 1024;
3382
3383 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3384 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3385
3386 init_rspq(&r->rspq, 0, 0, 1024, 64);
3387 r->rspq.uld = CXGB4_ULD_ISCSI;
3388 r->fl.size = 72;
3389 }
3390
3391 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3392 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3393
3394 init_rspq(&r->rspq, 0, 0, 511, 64);
3395 r->rspq.uld = CXGB4_ULD_RDMA;
3396 r->fl.size = 72;
3397 }
3398
3399 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3400 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3401}
3402
3403/*
3404 * Reduce the number of Ethernet queues across all ports to at most n.
3405 * n provides at least one queue per port.
3406 */
3407static void __devinit reduce_ethqs(struct adapter *adap, int n)
3408{
3409 int i;
3410 struct port_info *pi;
3411
3412 while (n < adap->sge.ethqsets)
3413 for_each_port(adap, i) {
3414 pi = adap2pinfo(adap, i);
3415 if (pi->nqsets > 1) {
3416 pi->nqsets--;
3417 adap->sge.ethqsets--;
3418 if (adap->sge.ethqsets <= n)
3419 break;
3420 }
3421 }
3422
3423 n = 0;
3424 for_each_port(adap, i) {
3425 pi = adap2pinfo(adap, i);
3426 pi->first_qset = n;
3427 n += pi->nqsets;
3428 }
3429}
3430
3431/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3432#define EXTRA_VECS 2
3433
3434static int __devinit enable_msix(struct adapter *adap)
3435{
3436 int ofld_need = 0;
3437 int i, err, want, need;
3438 struct sge *s = &adap->sge;
3439 unsigned int nchan = adap->params.nports;
3440 struct msix_entry entries[MAX_INGQ + 1];
3441
3442 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3443 entries[i].entry = i;
3444
3445 want = s->max_ethqsets + EXTRA_VECS;
3446 if (is_offload(adap)) {
3447 want += s->rdmaqs + s->ofldqsets;
3448 /* need nchan for each possible ULD */
3449 ofld_need = 2 * nchan;
3450 }
3451 need = adap->params.nports + EXTRA_VECS + ofld_need;
3452
3453 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3454 want = err;
3455
3456 if (!err) {
3457 /*
3458 * Distribute available vectors to the various queue groups.
3459 * Every group gets its minimum requirement and NIC gets top
3460 * priority for leftovers.
3461 */
3462 i = want - EXTRA_VECS - ofld_need;
3463 if (i < s->max_ethqsets) {
3464 s->max_ethqsets = i;
3465 if (i < s->ethqsets)
3466 reduce_ethqs(adap, i);
3467 }
3468 if (is_offload(adap)) {
3469 i = want - EXTRA_VECS - s->max_ethqsets;
3470 i -= ofld_need - nchan;
3471 s->ofldqsets = (i / nchan) * nchan; /* round down */
3472 }
3473 for (i = 0; i < want; ++i)
3474 adap->msix_info[i].vec = entries[i].vector;
3475 } else if (err > 0)
3476 dev_info(adap->pdev_dev,
3477 "only %d MSI-X vectors left, not using MSI-X\n", err);
3478 return err;
3479}
3480
3481#undef EXTRA_VECS
3482
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003483static int __devinit init_rss(struct adapter *adap)
3484{
3485 unsigned int i, j;
3486
3487 for_each_port(adap, i) {
3488 struct port_info *pi = adap2pinfo(adap, i);
3489
3490 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3491 if (!pi->rss)
3492 return -ENOMEM;
3493 for (j = 0; j < pi->rss_size; j++)
3494 pi->rss[j] = j % pi->nqsets;
3495 }
3496 return 0;
3497}
3498
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003499static void __devinit print_port_info(struct adapter *adap)
3500{
3501 static const char *base[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003502 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3503 "KX", "KR", "KR SFP+", "KR FEC"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003504 };
3505
3506 int i;
3507 char buf[80];
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003508 const char *spd = "";
3509
3510 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3511 spd = " 2.5 GT/s";
3512 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3513 spd = " 5 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003514
3515 for_each_port(adap, i) {
3516 struct net_device *dev = adap->port[i];
3517 const struct port_info *pi = netdev_priv(dev);
3518 char *bufp = buf;
3519
3520 if (!test_bit(i, &adap->registered_device_map))
3521 continue;
3522
3523 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3524 bufp += sprintf(bufp, "100/");
3525 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3526 bufp += sprintf(bufp, "1000/");
3527 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3528 bufp += sprintf(bufp, "10G/");
3529 if (bufp != buf)
3530 --bufp;
3531 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3532
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003533 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003534 adap->params.vpd.id, adap->params.rev,
3535 buf, is_offload(adap) ? "R" : "",
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003536 adap->params.pci.width, spd,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003537 (adap->flags & USING_MSIX) ? " MSI-X" :
3538 (adap->flags & USING_MSI) ? " MSI" : "");
3539 if (adap->name == dev->name)
3540 netdev_info(dev, "S/N: %s, E/C: %s\n",
3541 adap->params.vpd.sn, adap->params.vpd.ec);
3542 }
3543}
3544
Dimitris Michailidis06546392010-07-11 12:01:16 +00003545/*
3546 * Free the following resources:
3547 * - memory used for tables
3548 * - MSI/MSI-X
3549 * - net devices
3550 * - resources FW is holding for us
3551 */
3552static void free_some_resources(struct adapter *adapter)
3553{
3554 unsigned int i;
3555
3556 t4_free_mem(adapter->l2t);
3557 t4_free_mem(adapter->tids.tid_tab);
3558 disable_msi(adapter);
3559
3560 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003561 if (adapter->port[i]) {
3562 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00003563 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003564 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00003565 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003566 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00003567}
3568
Dimitris Michailidis35d35682010-08-02 13:19:20 +00003569#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003570 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3571
3572static int __devinit init_one(struct pci_dev *pdev,
3573 const struct pci_device_id *ent)
3574{
3575 int func, i, err;
3576 struct port_info *pi;
3577 unsigned int highdma = 0;
3578 struct adapter *adapter = NULL;
3579
3580 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3581
3582 err = pci_request_regions(pdev, KBUILD_MODNAME);
3583 if (err) {
3584 /* Just info, some other driver may have claimed the device. */
3585 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3586 return err;
3587 }
3588
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003589 /* We control everything through one PF */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003590 func = PCI_FUNC(pdev->devfn);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003591 if (func != ent->driver_data) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003592 pci_save_state(pdev); /* to restore SR-IOV later */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003593 goto sriov;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003594 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003595
3596 err = pci_enable_device(pdev);
3597 if (err) {
3598 dev_err(&pdev->dev, "cannot enable PCI device\n");
3599 goto out_release_regions;
3600 }
3601
3602 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3603 highdma = NETIF_F_HIGHDMA;
3604 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3605 if (err) {
3606 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3607 "coherent allocations\n");
3608 goto out_disable_device;
3609 }
3610 } else {
3611 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3612 if (err) {
3613 dev_err(&pdev->dev, "no usable DMA configuration\n");
3614 goto out_disable_device;
3615 }
3616 }
3617
3618 pci_enable_pcie_error_reporting(pdev);
3619 pci_set_master(pdev);
3620 pci_save_state(pdev);
3621
3622 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3623 if (!adapter) {
3624 err = -ENOMEM;
3625 goto out_disable_device;
3626 }
3627
3628 adapter->regs = pci_ioremap_bar(pdev, 0);
3629 if (!adapter->regs) {
3630 dev_err(&pdev->dev, "cannot map device registers\n");
3631 err = -ENOMEM;
3632 goto out_free_adapter;
3633 }
3634
3635 adapter->pdev = pdev;
3636 adapter->pdev_dev = &pdev->dev;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003637 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003638 adapter->name = pci_name(pdev);
3639 adapter->msg_enable = dflt_msg_enable;
3640 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3641
3642 spin_lock_init(&adapter->stats_lock);
3643 spin_lock_init(&adapter->tid_release_lock);
3644
3645 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3646
3647 err = t4_prep_adapter(adapter);
3648 if (err)
3649 goto out_unmap_bar;
3650 err = adap_init0(adapter);
3651 if (err)
3652 goto out_unmap_bar;
3653
3654 for_each_port(adapter, i) {
3655 struct net_device *netdev;
3656
3657 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3658 MAX_ETH_QSETS);
3659 if (!netdev) {
3660 err = -ENOMEM;
3661 goto out_free_dev;
3662 }
3663
3664 SET_NETDEV_DEV(netdev, &pdev->dev);
3665
3666 adapter->port[i] = netdev;
3667 pi = netdev_priv(netdev);
3668 pi->adapter = adapter;
3669 pi->xact_addr_filt = -1;
3670 pi->rx_offload = RX_CSO;
3671 pi->port_id = i;
3672 netif_carrier_off(netdev);
3673 netif_tx_stop_all_queues(netdev);
3674 netdev->irq = pdev->irq;
3675
Dimitris Michailidis35d35682010-08-02 13:19:20 +00003676 netdev->features |= NETIF_F_SG | TSO_FLAGS;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003677 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07003678 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003679 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3680 netdev->vlan_features = netdev->features & VLAN_FEAT;
3681
3682 netdev->netdev_ops = &cxgb4_netdev_ops;
3683 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3684 }
3685
3686 pci_set_drvdata(pdev, adapter);
3687
3688 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003689 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003690 if (err)
3691 goto out_free_dev;
3692 }
3693
3694 /*
3695 * Configure queues and allocate tables now, they can be needed as
3696 * soon as the first register_netdev completes.
3697 */
3698 cfg_queues(adapter);
3699
3700 adapter->l2t = t4_init_l2t();
3701 if (!adapter->l2t) {
3702 /* We tolerate a lack of L2T, giving up some functionality */
3703 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3704 adapter->params.offload = 0;
3705 }
3706
3707 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3708 dev_warn(&pdev->dev, "could not allocate TID table, "
3709 "continuing\n");
3710 adapter->params.offload = 0;
3711 }
3712
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00003713 /* See what interrupts we'll be using */
3714 if (msi > 1 && enable_msix(adapter) == 0)
3715 adapter->flags |= USING_MSIX;
3716 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3717 adapter->flags |= USING_MSI;
3718
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003719 err = init_rss(adapter);
3720 if (err)
3721 goto out_free_dev;
3722
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003723 /*
3724 * The card is now ready to go. If any errors occur during device
3725 * registration we do not fail the whole card but rather proceed only
3726 * with the ports we manage to register successfully. However we must
3727 * register at least one net device.
3728 */
3729 for_each_port(adapter, i) {
3730 err = register_netdev(adapter->port[i]);
3731 if (err)
3732 dev_warn(&pdev->dev,
3733 "cannot register net device %s, skipping\n",
3734 adapter->port[i]->name);
3735 else {
3736 /*
3737 * Change the name we use for messages to the name of
3738 * the first successfully registered interface.
3739 */
3740 if (!adapter->registered_device_map)
3741 adapter->name = adapter->port[i]->name;
3742
3743 __set_bit(i, &adapter->registered_device_map);
3744 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3745 }
3746 }
3747 if (!adapter->registered_device_map) {
3748 dev_err(&pdev->dev, "could not register any net devices\n");
3749 goto out_free_dev;
3750 }
3751
3752 if (cxgb4_debugfs_root) {
3753 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3754 cxgb4_debugfs_root);
3755 setup_debugfs(adapter);
3756 }
3757
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003758 if (is_offload(adapter))
3759 attach_ulds(adapter);
3760
3761 print_port_info(adapter);
3762
3763sriov:
3764#ifdef CONFIG_PCI_IOV
3765 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3766 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3767 dev_info(&pdev->dev,
3768 "instantiated %u virtual functions\n",
3769 num_vf[func]);
3770#endif
3771 return 0;
3772
3773 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00003774 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003775 out_unmap_bar:
3776 iounmap(adapter->regs);
3777 out_free_adapter:
3778 kfree(adapter);
3779 out_disable_device:
3780 pci_disable_pcie_error_reporting(pdev);
3781 pci_disable_device(pdev);
3782 out_release_regions:
3783 pci_release_regions(pdev);
3784 pci_set_drvdata(pdev, NULL);
3785 return err;
3786}
3787
3788static void __devexit remove_one(struct pci_dev *pdev)
3789{
3790 struct adapter *adapter = pci_get_drvdata(pdev);
3791
3792 pci_disable_sriov(pdev);
3793
3794 if (adapter) {
3795 int i;
3796
3797 if (is_offload(adapter))
3798 detach_ulds(adapter);
3799
3800 for_each_port(adapter, i)
3801 if (test_bit(i, &adapter->registered_device_map))
3802 unregister_netdev(adapter->port[i]);
3803
3804 if (adapter->debugfs_root)
3805 debugfs_remove_recursive(adapter->debugfs_root);
3806
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003807 if (adapter->flags & FULL_INIT_DONE)
3808 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003809
Dimitris Michailidis06546392010-07-11 12:01:16 +00003810 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003811 iounmap(adapter->regs);
3812 kfree(adapter);
3813 pci_disable_pcie_error_reporting(pdev);
3814 pci_disable_device(pdev);
3815 pci_release_regions(pdev);
3816 pci_set_drvdata(pdev, NULL);
3817 } else if (PCI_FUNC(pdev->devfn) > 0)
3818 pci_release_regions(pdev);
3819}
3820
3821static struct pci_driver cxgb4_driver = {
3822 .name = KBUILD_MODNAME,
3823 .id_table = cxgb4_pci_tbl,
3824 .probe = init_one,
3825 .remove = __devexit_p(remove_one),
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003826 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003827};
3828
3829static int __init cxgb4_init_module(void)
3830{
3831 int ret;
3832
3833 /* Debugfs support is optional, just warn if this fails */
3834 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3835 if (!cxgb4_debugfs_root)
3836 pr_warning("could not create debugfs entry, continuing\n");
3837
3838 ret = pci_register_driver(&cxgb4_driver);
3839 if (ret < 0)
3840 debugfs_remove(cxgb4_debugfs_root);
3841 return ret;
3842}
3843
3844static void __exit cxgb4_cleanup_module(void)
3845{
3846 pci_unregister_driver(&cxgb4_driver);
3847 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3848}
3849
3850module_init(cxgb4_init_module);
3851module_exit(cxgb4_cleanup_module);