blob: 5d5ea822731fab99834bdf0436a31cfab1855229 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
70#define DRV_VERSION "1.0.0-ko"
71#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
Casey Leedom7ee9ff92010-06-25 12:11:46 +000080#ifdef CONFIG_PCI_IOV
81/*
82 * Virtual Function provisioning constants. We need two extra Ingress Queues
83 * with Interrupt capability to serve as the VF's Firmware Event Queue and
84 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
85 * Lists associated with them). For each Ethernet/Control Egress Queue and
86 * for each Free List, we need an Egress Context.
87 */
88enum {
89 VFRES_NPORTS = 1, /* # of "ports" per VF */
90 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
91
92 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
93 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
94 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
95 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
96 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
97 VFRES_TC = 0, /* PCI-E traffic class */
98 VFRES_NEXACTF = 16, /* # of exact MPS filters */
99
100 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
101 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
102};
103
104/*
105 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
106 * static and likely not to be useful in the long run. We really need to
107 * implement some form of persistent configuration which the firmware
108 * controls.
109 */
110static unsigned int pfvfres_pmask(struct adapter *adapter,
111 unsigned int pf, unsigned int vf)
112{
113 unsigned int portn, portvec;
114
115 /*
116 * Give PF's access to all of the ports.
117 */
118 if (vf == 0)
119 return FW_PFVF_CMD_PMASK_MASK;
120
121 /*
122 * For VFs, we'll assign them access to the ports based purely on the
123 * PF. We assign active ports in order, wrapping around if there are
124 * fewer active ports than PFs: e.g. active port[pf % nports].
125 * Unfortunately the adapter's port_info structs haven't been
126 * initialized yet so we have to compute this.
127 */
128 if (adapter->params.nports == 0)
129 return 0;
130
131 portn = pf % adapter->params.nports;
132 portvec = adapter->params.portvec;
133 for (;;) {
134 /*
135 * Isolate the lowest set bit in the port vector. If we're at
136 * the port number that we want, return that as the pmask.
137 * otherwise mask that bit out of the port vector and
138 * decrement our port number ...
139 */
140 unsigned int pmask = portvec ^ (portvec & (portvec-1));
141 if (portn == 0)
142 return pmask;
143 portn--;
144 portvec &= ~pmask;
145 }
146 /*NOTREACHED*/
147}
148#endif
149
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000150enum {
151 MEMWIN0_APERTURE = 65536,
152 MEMWIN0_BASE = 0x30000,
153 MEMWIN1_APERTURE = 32768,
154 MEMWIN1_BASE = 0x28000,
155 MEMWIN2_APERTURE = 2048,
156 MEMWIN2_BASE = 0x1b800,
157};
158
159enum {
160 MAX_TXQ_ENTRIES = 16384,
161 MAX_CTRL_TXQ_ENTRIES = 1024,
162 MAX_RSPQ_ENTRIES = 16384,
163 MAX_RX_BUFFERS = 16384,
164 MIN_TXQ_ENTRIES = 32,
165 MIN_CTRL_TXQ_ENTRIES = 32,
166 MIN_RSPQ_ENTRIES = 128,
167 MIN_FL_ENTRIES = 16
168};
169
170#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
171 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
172 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
173
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000174#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000177 CH_DEVICE(0xa000, 0), /* PE10K */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000178 { 0, }
179};
180
181#define FW_FNAME "cxgb4/t4fw.bin"
182
183MODULE_DESCRIPTION(DRV_DESC);
184MODULE_AUTHOR("Chelsio Communications");
185MODULE_LICENSE("Dual BSD/GPL");
186MODULE_VERSION(DRV_VERSION);
187MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
188MODULE_FIRMWARE(FW_FNAME);
189
190static int dflt_msg_enable = DFLT_MSG_ENABLE;
191
192module_param(dflt_msg_enable, int, 0644);
193MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
194
195/*
196 * The driver uses the best interrupt scheme available on a platform in the
197 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
198 * of these schemes the driver may consider as follows:
199 *
200 * msi = 2: choose from among all three options
201 * msi = 1: only consider MSI and INTx interrupts
202 * msi = 0: force INTx interrupts
203 */
204static int msi = 2;
205
206module_param(msi, int, 0644);
207MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
208
209/*
210 * Queue interrupt hold-off timer values. Queues default to the first of these
211 * upon creation.
212 */
213static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
214
215module_param_array(intr_holdoff, uint, NULL, 0644);
216MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
217 "0..4 in microseconds");
218
219static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
220
221module_param_array(intr_cnt, uint, NULL, 0644);
222MODULE_PARM_DESC(intr_cnt,
223 "thresholds 1..3 for queue interrupt packet counters");
224
225static int vf_acls;
226
227#ifdef CONFIG_PCI_IOV
228module_param(vf_acls, bool, 0644);
229MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
230
231static unsigned int num_vf[4];
232
233module_param_array(num_vf, uint, NULL, 0644);
234MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
235#endif
236
237static struct dentry *cxgb4_debugfs_root;
238
239static LIST_HEAD(adapter_list);
240static DEFINE_MUTEX(uld_mutex);
241static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
242static const char *uld_str[] = { "RDMA", "iSCSI" };
243
244static void link_report(struct net_device *dev)
245{
246 if (!netif_carrier_ok(dev))
247 netdev_info(dev, "link down\n");
248 else {
249 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
250
251 const char *s = "10Mbps";
252 const struct port_info *p = netdev_priv(dev);
253
254 switch (p->link_cfg.speed) {
255 case SPEED_10000:
256 s = "10Gbps";
257 break;
258 case SPEED_1000:
259 s = "1000Mbps";
260 break;
261 case SPEED_100:
262 s = "100Mbps";
263 break;
264 }
265
266 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
267 fc[p->link_cfg.fc]);
268 }
269}
270
271void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
272{
273 struct net_device *dev = adapter->port[port_id];
274
275 /* Skip changes from disabled ports. */
276 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
277 if (link_stat)
278 netif_carrier_on(dev);
279 else
280 netif_carrier_off(dev);
281
282 link_report(dev);
283 }
284}
285
286void t4_os_portmod_changed(const struct adapter *adap, int port_id)
287{
288 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000289 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000290 };
291
292 const struct net_device *dev = adap->port[port_id];
293 const struct port_info *pi = netdev_priv(dev);
294
295 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
296 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000297 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000298 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
299}
300
301/*
302 * Configure the exact and hash address filters to handle a port's multicast
303 * and secondary unicast MAC addresses.
304 */
305static int set_addr_filters(const struct net_device *dev, bool sleep)
306{
307 u64 mhash = 0;
308 u64 uhash = 0;
309 bool free = true;
310 u16 filt_idx[7];
311 const u8 *addr[7];
312 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000313 const struct netdev_hw_addr *ha;
314 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700315 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000316 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000317 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000318
319 /* first do the secondary unicast addresses */
320 netdev_for_each_uc_addr(ha, dev) {
321 addr[naddr++] = ha->addr;
322 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000323 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000324 naddr, addr, filt_idx, &uhash, sleep);
325 if (ret < 0)
326 return ret;
327
328 free = false;
329 naddr = 0;
330 }
331 }
332
333 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700334 netdev_for_each_mc_addr(ha, dev) {
335 addr[naddr++] = ha->addr;
336 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000337 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000338 naddr, addr, filt_idx, &mhash, sleep);
339 if (ret < 0)
340 return ret;
341
342 free = false;
343 naddr = 0;
344 }
345 }
346
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000347 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000348 uhash | mhash, sleep);
349}
350
351/*
352 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
353 * If @mtu is -1 it is left unchanged.
354 */
355static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
356{
357 int ret;
358 struct port_info *pi = netdev_priv(dev);
359
360 ret = set_addr_filters(dev, sleep_ok);
361 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000362 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000363 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000364 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000365 sleep_ok);
366 return ret;
367}
368
369/**
370 * link_start - enable a port
371 * @dev: the port to enable
372 *
373 * Performs the MAC and PHY actions needed to enable a port.
374 */
375static int link_start(struct net_device *dev)
376{
377 int ret;
378 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000379 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000380
381 /*
382 * We do not set address filters and promiscuity here, the stack does
383 * that step explicitly.
384 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000385 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000386 pi->vlan_grp != NULL, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000387 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000388 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000389 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000390 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000391 if (ret >= 0) {
392 pi->xact_addr_filt = ret;
393 ret = 0;
394 }
395 }
396 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000397 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
398 &pi->link_cfg);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000399 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000400 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000401 return ret;
402}
403
404/*
405 * Response queue handler for the FW event queue.
406 */
407static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
408 const struct pkt_gl *gl)
409{
410 u8 opcode = ((const struct rss_header *)rsp)->opcode;
411
412 rsp++; /* skip RSS header */
413 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
414 const struct cpl_sge_egr_update *p = (void *)rsp;
415 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
416 struct sge_txq *txq = q->adap->sge.egr_map[qid];
417
418 txq->restarts++;
419 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
420 struct sge_eth_txq *eq;
421
422 eq = container_of(txq, struct sge_eth_txq, q);
423 netif_tx_wake_queue(eq->txq);
424 } else {
425 struct sge_ofld_txq *oq;
426
427 oq = container_of(txq, struct sge_ofld_txq, q);
428 tasklet_schedule(&oq->qresume_tsk);
429 }
430 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
431 const struct cpl_fw6_msg *p = (void *)rsp;
432
433 if (p->type == 0)
434 t4_handle_fw_rpl(q->adap, p->data);
435 } else if (opcode == CPL_L2T_WRITE_RPL) {
436 const struct cpl_l2t_write_rpl *p = (void *)rsp;
437
438 do_l2t_write_rpl(q->adap, p);
439 } else
440 dev_err(q->adap->pdev_dev,
441 "unexpected CPL %#x on FW event queue\n", opcode);
442 return 0;
443}
444
445/**
446 * uldrx_handler - response queue handler for ULD queues
447 * @q: the response queue that received the packet
448 * @rsp: the response queue descriptor holding the offload message
449 * @gl: the gather list of packet fragments
450 *
451 * Deliver an ingress offload packet to a ULD. All processing is done by
452 * the ULD, we just maintain statistics.
453 */
454static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
455 const struct pkt_gl *gl)
456{
457 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
458
459 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
460 rxq->stats.nomem++;
461 return -1;
462 }
463 if (gl == NULL)
464 rxq->stats.imm++;
465 else if (gl == CXGB4_MSG_AN)
466 rxq->stats.an++;
467 else
468 rxq->stats.pkts++;
469 return 0;
470}
471
472static void disable_msi(struct adapter *adapter)
473{
474 if (adapter->flags & USING_MSIX) {
475 pci_disable_msix(adapter->pdev);
476 adapter->flags &= ~USING_MSIX;
477 } else if (adapter->flags & USING_MSI) {
478 pci_disable_msi(adapter->pdev);
479 adapter->flags &= ~USING_MSI;
480 }
481}
482
483/*
484 * Interrupt handler for non-data events used with MSI-X.
485 */
486static irqreturn_t t4_nondata_intr(int irq, void *cookie)
487{
488 struct adapter *adap = cookie;
489
490 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
491 if (v & PFSW) {
492 adap->swintr = 1;
493 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
494 }
495 t4_slow_intr_handler(adap);
496 return IRQ_HANDLED;
497}
498
499/*
500 * Name the MSI-X interrupts.
501 */
502static void name_msix_vecs(struct adapter *adap)
503{
504 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
505
506 /* non-data interrupts */
507 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
508 adap->msix_info[0].desc[n] = 0;
509
510 /* FW events */
511 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
512 adap->msix_info[1].desc[n] = 0;
513
514 /* Ethernet queues */
515 for_each_port(adap, j) {
516 struct net_device *d = adap->port[j];
517 const struct port_info *pi = netdev_priv(d);
518
519 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
520 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
521 d->name, i);
522 adap->msix_info[msi_idx].desc[n] = 0;
523 }
524 }
525
526 /* offload queues */
527 for_each_ofldrxq(&adap->sge, i) {
528 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
529 adap->name, i);
530 adap->msix_info[msi_idx++].desc[n] = 0;
531 }
532 for_each_rdmarxq(&adap->sge, i) {
533 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
534 adap->name, i);
535 adap->msix_info[msi_idx++].desc[n] = 0;
536 }
537}
538
539static int request_msix_queue_irqs(struct adapter *adap)
540{
541 struct sge *s = &adap->sge;
542 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
543
544 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
545 adap->msix_info[1].desc, &s->fw_evtq);
546 if (err)
547 return err;
548
549 for_each_ethrxq(s, ethqidx) {
550 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
551 adap->msix_info[msi].desc,
552 &s->ethrxq[ethqidx].rspq);
553 if (err)
554 goto unwind;
555 msi++;
556 }
557 for_each_ofldrxq(s, ofldqidx) {
558 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
559 adap->msix_info[msi].desc,
560 &s->ofldrxq[ofldqidx].rspq);
561 if (err)
562 goto unwind;
563 msi++;
564 }
565 for_each_rdmarxq(s, rdmaqidx) {
566 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
567 adap->msix_info[msi].desc,
568 &s->rdmarxq[rdmaqidx].rspq);
569 if (err)
570 goto unwind;
571 msi++;
572 }
573 return 0;
574
575unwind:
576 while (--rdmaqidx >= 0)
577 free_irq(adap->msix_info[--msi].vec,
578 &s->rdmarxq[rdmaqidx].rspq);
579 while (--ofldqidx >= 0)
580 free_irq(adap->msix_info[--msi].vec,
581 &s->ofldrxq[ofldqidx].rspq);
582 while (--ethqidx >= 0)
583 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
584 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
585 return err;
586}
587
588static void free_msix_queue_irqs(struct adapter *adap)
589{
590 int i, msi = 2;
591 struct sge *s = &adap->sge;
592
593 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
594 for_each_ethrxq(s, i)
595 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
596 for_each_ofldrxq(s, i)
597 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
598 for_each_rdmarxq(s, i)
599 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
600}
601
602/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000603 * write_rss - write the RSS table for a given port
604 * @pi: the port
605 * @queues: array of queue indices for RSS
606 *
607 * Sets up the portion of the HW RSS table for the port's VI to distribute
608 * packets to the Rx queues in @queues.
609 */
610static int write_rss(const struct port_info *pi, const u16 *queues)
611{
612 u16 *rss;
613 int i, err;
614 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
615
616 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
617 if (!rss)
618 return -ENOMEM;
619
620 /* map the queue indices to queue ids */
621 for (i = 0; i < pi->rss_size; i++, queues++)
622 rss[i] = q[*queues].rspq.abs_id;
623
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000624 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
625 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000626 kfree(rss);
627 return err;
628}
629
630/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000631 * setup_rss - configure RSS
632 * @adap: the adapter
633 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000634 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000635 */
636static int setup_rss(struct adapter *adap)
637{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000638 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000639
640 for_each_port(adap, i) {
641 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000642
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000643 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000644 if (err)
645 return err;
646 }
647 return 0;
648}
649
650/*
651 * Wait until all NAPI handlers are descheduled.
652 */
653static void quiesce_rx(struct adapter *adap)
654{
655 int i;
656
657 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
658 struct sge_rspq *q = adap->sge.ingr_map[i];
659
660 if (q && q->handler)
661 napi_disable(&q->napi);
662 }
663}
664
665/*
666 * Enable NAPI scheduling and interrupt generation for all Rx queues.
667 */
668static void enable_rx(struct adapter *adap)
669{
670 int i;
671
672 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
673 struct sge_rspq *q = adap->sge.ingr_map[i];
674
675 if (!q)
676 continue;
677 if (q->handler)
678 napi_enable(&q->napi);
679 /* 0-increment GTS to start the timer and enable interrupts */
680 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
681 SEINTARM(q->intr_params) |
682 INGRESSQID(q->cntxt_id));
683 }
684}
685
686/**
687 * setup_sge_queues - configure SGE Tx/Rx/response queues
688 * @adap: the adapter
689 *
690 * Determines how many sets of SGE queues to use and initializes them.
691 * We support multiple queue sets per port if we have MSI-X, otherwise
692 * just one queue set per port.
693 */
694static int setup_sge_queues(struct adapter *adap)
695{
696 int err, msi_idx, i, j;
697 struct sge *s = &adap->sge;
698
699 bitmap_zero(s->starving_fl, MAX_EGRQ);
700 bitmap_zero(s->txq_maperr, MAX_EGRQ);
701
702 if (adap->flags & USING_MSIX)
703 msi_idx = 1; /* vector 0 is for non-queue interrupts */
704 else {
705 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
706 NULL, NULL);
707 if (err)
708 return err;
709 msi_idx = -((int)s->intrq.abs_id + 1);
710 }
711
712 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
713 msi_idx, NULL, fwevtq_handler);
714 if (err) {
715freeout: t4_free_sge_resources(adap);
716 return err;
717 }
718
719 for_each_port(adap, i) {
720 struct net_device *dev = adap->port[i];
721 struct port_info *pi = netdev_priv(dev);
722 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
723 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
724
725 for (j = 0; j < pi->nqsets; j++, q++) {
726 if (msi_idx > 0)
727 msi_idx++;
728 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
729 msi_idx, &q->fl,
730 t4_ethrx_handler);
731 if (err)
732 goto freeout;
733 q->rspq.idx = j;
734 memset(&q->stats, 0, sizeof(q->stats));
735 }
736 for (j = 0; j < pi->nqsets; j++, t++) {
737 err = t4_sge_alloc_eth_txq(adap, t, dev,
738 netdev_get_tx_queue(dev, j),
739 s->fw_evtq.cntxt_id);
740 if (err)
741 goto freeout;
742 }
743 }
744
745 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
746 for_each_ofldrxq(s, i) {
747 struct sge_ofld_rxq *q = &s->ofldrxq[i];
748 struct net_device *dev = adap->port[i / j];
749
750 if (msi_idx > 0)
751 msi_idx++;
752 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
753 &q->fl, uldrx_handler);
754 if (err)
755 goto freeout;
756 memset(&q->stats, 0, sizeof(q->stats));
757 s->ofld_rxq[i] = q->rspq.abs_id;
758 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
759 s->fw_evtq.cntxt_id);
760 if (err)
761 goto freeout;
762 }
763
764 for_each_rdmarxq(s, i) {
765 struct sge_ofld_rxq *q = &s->rdmarxq[i];
766
767 if (msi_idx > 0)
768 msi_idx++;
769 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
770 msi_idx, &q->fl, uldrx_handler);
771 if (err)
772 goto freeout;
773 memset(&q->stats, 0, sizeof(q->stats));
774 s->rdma_rxq[i] = q->rspq.abs_id;
775 }
776
777 for_each_port(adap, i) {
778 /*
779 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
780 * have RDMA queues, and that's the right value.
781 */
782 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
783 s->fw_evtq.cntxt_id,
784 s->rdmarxq[i].rspq.cntxt_id);
785 if (err)
786 goto freeout;
787 }
788
789 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
790 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
791 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
792 return 0;
793}
794
795/*
796 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
797 * started but failed, and a negative errno if flash load couldn't start.
798 */
799static int upgrade_fw(struct adapter *adap)
800{
801 int ret;
802 u32 vers;
803 const struct fw_hdr *hdr;
804 const struct firmware *fw;
805 struct device *dev = adap->pdev_dev;
806
807 ret = request_firmware(&fw, FW_FNAME, dev);
808 if (ret < 0) {
809 dev_err(dev, "unable to load firmware image " FW_FNAME
810 ", error %d\n", ret);
811 return ret;
812 }
813
814 hdr = (const struct fw_hdr *)fw->data;
815 vers = ntohl(hdr->fw_ver);
816 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
817 ret = -EINVAL; /* wrong major version, won't do */
818 goto out;
819 }
820
821 /*
822 * If the flash FW is unusable or we found something newer, load it.
823 */
824 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
825 vers > adap->params.fw_vers) {
826 ret = -t4_load_fw(adap, fw->data, fw->size);
827 if (!ret)
828 dev_info(dev, "firmware upgraded to version %pI4 from "
829 FW_FNAME "\n", &hdr->fw_ver);
830 }
831out: release_firmware(fw);
832 return ret;
833}
834
835/*
836 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
837 * The allocated memory is cleared.
838 */
839void *t4_alloc_mem(size_t size)
840{
841 void *p = kmalloc(size, GFP_KERNEL);
842
843 if (!p)
844 p = vmalloc(size);
845 if (p)
846 memset(p, 0, size);
847 return p;
848}
849
850/*
851 * Free memory allocated through alloc_mem().
852 */
853void t4_free_mem(void *addr)
854{
855 if (is_vmalloc_addr(addr))
856 vfree(addr);
857 else
858 kfree(addr);
859}
860
861static inline int is_offload(const struct adapter *adap)
862{
863 return adap->params.offload;
864}
865
866/*
867 * Implementation of ethtool operations.
868 */
869
870static u32 get_msglevel(struct net_device *dev)
871{
872 return netdev2adap(dev)->msg_enable;
873}
874
875static void set_msglevel(struct net_device *dev, u32 val)
876{
877 netdev2adap(dev)->msg_enable = val;
878}
879
880static char stats_strings[][ETH_GSTRING_LEN] = {
881 "TxOctetsOK ",
882 "TxFramesOK ",
883 "TxBroadcastFrames ",
884 "TxMulticastFrames ",
885 "TxUnicastFrames ",
886 "TxErrorFrames ",
887
888 "TxFrames64 ",
889 "TxFrames65To127 ",
890 "TxFrames128To255 ",
891 "TxFrames256To511 ",
892 "TxFrames512To1023 ",
893 "TxFrames1024To1518 ",
894 "TxFrames1519ToMax ",
895
896 "TxFramesDropped ",
897 "TxPauseFrames ",
898 "TxPPP0Frames ",
899 "TxPPP1Frames ",
900 "TxPPP2Frames ",
901 "TxPPP3Frames ",
902 "TxPPP4Frames ",
903 "TxPPP5Frames ",
904 "TxPPP6Frames ",
905 "TxPPP7Frames ",
906
907 "RxOctetsOK ",
908 "RxFramesOK ",
909 "RxBroadcastFrames ",
910 "RxMulticastFrames ",
911 "RxUnicastFrames ",
912
913 "RxFramesTooLong ",
914 "RxJabberErrors ",
915 "RxFCSErrors ",
916 "RxLengthErrors ",
917 "RxSymbolErrors ",
918 "RxRuntFrames ",
919
920 "RxFrames64 ",
921 "RxFrames65To127 ",
922 "RxFrames128To255 ",
923 "RxFrames256To511 ",
924 "RxFrames512To1023 ",
925 "RxFrames1024To1518 ",
926 "RxFrames1519ToMax ",
927
928 "RxPauseFrames ",
929 "RxPPP0Frames ",
930 "RxPPP1Frames ",
931 "RxPPP2Frames ",
932 "RxPPP3Frames ",
933 "RxPPP4Frames ",
934 "RxPPP5Frames ",
935 "RxPPP6Frames ",
936 "RxPPP7Frames ",
937
938 "RxBG0FramesDropped ",
939 "RxBG1FramesDropped ",
940 "RxBG2FramesDropped ",
941 "RxBG3FramesDropped ",
942 "RxBG0FramesTrunc ",
943 "RxBG1FramesTrunc ",
944 "RxBG2FramesTrunc ",
945 "RxBG3FramesTrunc ",
946
947 "TSO ",
948 "TxCsumOffload ",
949 "RxCsumGood ",
950 "VLANextractions ",
951 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000952 "GROpackets ",
953 "GROmerged ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000954};
955
956static int get_sset_count(struct net_device *dev, int sset)
957{
958 switch (sset) {
959 case ETH_SS_STATS:
960 return ARRAY_SIZE(stats_strings);
961 default:
962 return -EOPNOTSUPP;
963 }
964}
965
966#define T4_REGMAP_SIZE (160 * 1024)
967
968static int get_regs_len(struct net_device *dev)
969{
970 return T4_REGMAP_SIZE;
971}
972
973static int get_eeprom_len(struct net_device *dev)
974{
975 return EEPROMSIZE;
976}
977
978static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
979{
980 struct adapter *adapter = netdev2adap(dev);
981
982 strcpy(info->driver, KBUILD_MODNAME);
983 strcpy(info->version, DRV_VERSION);
984 strcpy(info->bus_info, pci_name(adapter->pdev));
985
986 if (!adapter->params.fw_vers)
987 strcpy(info->fw_version, "N/A");
988 else
989 snprintf(info->fw_version, sizeof(info->fw_version),
990 "%u.%u.%u.%u, TP %u.%u.%u.%u",
991 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
992 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
993 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
994 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
995 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
996 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
997 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
998 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
999}
1000
1001static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1002{
1003 if (stringset == ETH_SS_STATS)
1004 memcpy(data, stats_strings, sizeof(stats_strings));
1005}
1006
1007/*
1008 * port stats maintained per queue of the port. They should be in the same
1009 * order as in stats_strings above.
1010 */
1011struct queue_port_stats {
1012 u64 tso;
1013 u64 tx_csum;
1014 u64 rx_csum;
1015 u64 vlan_ex;
1016 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001017 u64 gro_pkts;
1018 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001019};
1020
1021static void collect_sge_port_stats(const struct adapter *adap,
1022 const struct port_info *p, struct queue_port_stats *s)
1023{
1024 int i;
1025 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1026 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1027
1028 memset(s, 0, sizeof(*s));
1029 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1030 s->tso += tx->tso;
1031 s->tx_csum += tx->tx_cso;
1032 s->rx_csum += rx->stats.rx_cso;
1033 s->vlan_ex += rx->stats.vlan_ex;
1034 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001035 s->gro_pkts += rx->stats.lro_pkts;
1036 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001037 }
1038}
1039
1040static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1041 u64 *data)
1042{
1043 struct port_info *pi = netdev_priv(dev);
1044 struct adapter *adapter = pi->adapter;
1045
1046 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1047
1048 data += sizeof(struct port_stats) / sizeof(u64);
1049 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1050}
1051
1052/*
1053 * Return a version number to identify the type of adapter. The scheme is:
1054 * - bits 0..9: chip version
1055 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001056 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001057 */
1058static inline unsigned int mk_adap_vers(const struct adapter *ap)
1059{
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001060 return 4 | (ap->params.rev << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001061}
1062
1063static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1064 unsigned int end)
1065{
1066 u32 *p = buf + start;
1067
1068 for ( ; start <= end; start += sizeof(u32))
1069 *p++ = t4_read_reg(ap, start);
1070}
1071
1072static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1073 void *buf)
1074{
1075 static const unsigned int reg_ranges[] = {
1076 0x1008, 0x1108,
1077 0x1180, 0x11b4,
1078 0x11fc, 0x123c,
1079 0x1300, 0x173c,
1080 0x1800, 0x18fc,
1081 0x3000, 0x30d8,
1082 0x30e0, 0x5924,
1083 0x5960, 0x59d4,
1084 0x5a00, 0x5af8,
1085 0x6000, 0x6098,
1086 0x6100, 0x6150,
1087 0x6200, 0x6208,
1088 0x6240, 0x6248,
1089 0x6280, 0x6338,
1090 0x6370, 0x638c,
1091 0x6400, 0x643c,
1092 0x6500, 0x6524,
1093 0x6a00, 0x6a38,
1094 0x6a60, 0x6a78,
1095 0x6b00, 0x6b84,
1096 0x6bf0, 0x6c84,
1097 0x6cf0, 0x6d84,
1098 0x6df0, 0x6e84,
1099 0x6ef0, 0x6f84,
1100 0x6ff0, 0x7084,
1101 0x70f0, 0x7184,
1102 0x71f0, 0x7284,
1103 0x72f0, 0x7384,
1104 0x73f0, 0x7450,
1105 0x7500, 0x7530,
1106 0x7600, 0x761c,
1107 0x7680, 0x76cc,
1108 0x7700, 0x7798,
1109 0x77c0, 0x77fc,
1110 0x7900, 0x79fc,
1111 0x7b00, 0x7c38,
1112 0x7d00, 0x7efc,
1113 0x8dc0, 0x8e1c,
1114 0x8e30, 0x8e78,
1115 0x8ea0, 0x8f6c,
1116 0x8fc0, 0x9074,
1117 0x90fc, 0x90fc,
1118 0x9400, 0x9458,
1119 0x9600, 0x96bc,
1120 0x9800, 0x9808,
1121 0x9820, 0x983c,
1122 0x9850, 0x9864,
1123 0x9c00, 0x9c6c,
1124 0x9c80, 0x9cec,
1125 0x9d00, 0x9d6c,
1126 0x9d80, 0x9dec,
1127 0x9e00, 0x9e6c,
1128 0x9e80, 0x9eec,
1129 0x9f00, 0x9f6c,
1130 0x9f80, 0x9fec,
1131 0xd004, 0xd03c,
1132 0xdfc0, 0xdfe0,
1133 0xe000, 0xea7c,
1134 0xf000, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001135 0x19040, 0x1906c,
1136 0x19078, 0x19080,
1137 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001138 0x19150, 0x191b0,
1139 0x191d0, 0x191e8,
1140 0x19238, 0x1924c,
1141 0x193f8, 0x19474,
1142 0x19490, 0x194f8,
1143 0x19800, 0x19f30,
1144 0x1a000, 0x1a06c,
1145 0x1a0b0, 0x1a120,
1146 0x1a128, 0x1a138,
1147 0x1a190, 0x1a1c4,
1148 0x1a1fc, 0x1a1fc,
1149 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001150 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001151 0x1e2c0, 0x1e2c0,
1152 0x1e2e0, 0x1e2e0,
1153 0x1e300, 0x1e384,
1154 0x1e3c0, 0x1e3c8,
1155 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001156 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001157 0x1e6c0, 0x1e6c0,
1158 0x1e6e0, 0x1e6e0,
1159 0x1e700, 0x1e784,
1160 0x1e7c0, 0x1e7c8,
1161 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001162 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001163 0x1eac0, 0x1eac0,
1164 0x1eae0, 0x1eae0,
1165 0x1eb00, 0x1eb84,
1166 0x1ebc0, 0x1ebc8,
1167 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001168 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001169 0x1eec0, 0x1eec0,
1170 0x1eee0, 0x1eee0,
1171 0x1ef00, 0x1ef84,
1172 0x1efc0, 0x1efc8,
1173 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001174 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001175 0x1f2c0, 0x1f2c0,
1176 0x1f2e0, 0x1f2e0,
1177 0x1f300, 0x1f384,
1178 0x1f3c0, 0x1f3c8,
1179 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001180 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001181 0x1f6c0, 0x1f6c0,
1182 0x1f6e0, 0x1f6e0,
1183 0x1f700, 0x1f784,
1184 0x1f7c0, 0x1f7c8,
1185 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001186 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001187 0x1fac0, 0x1fac0,
1188 0x1fae0, 0x1fae0,
1189 0x1fb00, 0x1fb84,
1190 0x1fbc0, 0x1fbc8,
1191 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001192 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001193 0x1fec0, 0x1fec0,
1194 0x1fee0, 0x1fee0,
1195 0x1ff00, 0x1ff84,
1196 0x1ffc0, 0x1ffc8,
1197 0x20000, 0x2002c,
1198 0x20100, 0x2013c,
1199 0x20190, 0x201c8,
1200 0x20200, 0x20318,
1201 0x20400, 0x20528,
1202 0x20540, 0x20614,
1203 0x21000, 0x21040,
1204 0x2104c, 0x21060,
1205 0x210c0, 0x210ec,
1206 0x21200, 0x21268,
1207 0x21270, 0x21284,
1208 0x212fc, 0x21388,
1209 0x21400, 0x21404,
1210 0x21500, 0x21518,
1211 0x2152c, 0x2153c,
1212 0x21550, 0x21554,
1213 0x21600, 0x21600,
1214 0x21608, 0x21628,
1215 0x21630, 0x2163c,
1216 0x21700, 0x2171c,
1217 0x21780, 0x2178c,
1218 0x21800, 0x21c38,
1219 0x21c80, 0x21d7c,
1220 0x21e00, 0x21e04,
1221 0x22000, 0x2202c,
1222 0x22100, 0x2213c,
1223 0x22190, 0x221c8,
1224 0x22200, 0x22318,
1225 0x22400, 0x22528,
1226 0x22540, 0x22614,
1227 0x23000, 0x23040,
1228 0x2304c, 0x23060,
1229 0x230c0, 0x230ec,
1230 0x23200, 0x23268,
1231 0x23270, 0x23284,
1232 0x232fc, 0x23388,
1233 0x23400, 0x23404,
1234 0x23500, 0x23518,
1235 0x2352c, 0x2353c,
1236 0x23550, 0x23554,
1237 0x23600, 0x23600,
1238 0x23608, 0x23628,
1239 0x23630, 0x2363c,
1240 0x23700, 0x2371c,
1241 0x23780, 0x2378c,
1242 0x23800, 0x23c38,
1243 0x23c80, 0x23d7c,
1244 0x23e00, 0x23e04,
1245 0x24000, 0x2402c,
1246 0x24100, 0x2413c,
1247 0x24190, 0x241c8,
1248 0x24200, 0x24318,
1249 0x24400, 0x24528,
1250 0x24540, 0x24614,
1251 0x25000, 0x25040,
1252 0x2504c, 0x25060,
1253 0x250c0, 0x250ec,
1254 0x25200, 0x25268,
1255 0x25270, 0x25284,
1256 0x252fc, 0x25388,
1257 0x25400, 0x25404,
1258 0x25500, 0x25518,
1259 0x2552c, 0x2553c,
1260 0x25550, 0x25554,
1261 0x25600, 0x25600,
1262 0x25608, 0x25628,
1263 0x25630, 0x2563c,
1264 0x25700, 0x2571c,
1265 0x25780, 0x2578c,
1266 0x25800, 0x25c38,
1267 0x25c80, 0x25d7c,
1268 0x25e00, 0x25e04,
1269 0x26000, 0x2602c,
1270 0x26100, 0x2613c,
1271 0x26190, 0x261c8,
1272 0x26200, 0x26318,
1273 0x26400, 0x26528,
1274 0x26540, 0x26614,
1275 0x27000, 0x27040,
1276 0x2704c, 0x27060,
1277 0x270c0, 0x270ec,
1278 0x27200, 0x27268,
1279 0x27270, 0x27284,
1280 0x272fc, 0x27388,
1281 0x27400, 0x27404,
1282 0x27500, 0x27518,
1283 0x2752c, 0x2753c,
1284 0x27550, 0x27554,
1285 0x27600, 0x27600,
1286 0x27608, 0x27628,
1287 0x27630, 0x2763c,
1288 0x27700, 0x2771c,
1289 0x27780, 0x2778c,
1290 0x27800, 0x27c38,
1291 0x27c80, 0x27d7c,
1292 0x27e00, 0x27e04
1293 };
1294
1295 int i;
1296 struct adapter *ap = netdev2adap(dev);
1297
1298 regs->version = mk_adap_vers(ap);
1299
1300 memset(buf, 0, T4_REGMAP_SIZE);
1301 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1302 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1303}
1304
1305static int restart_autoneg(struct net_device *dev)
1306{
1307 struct port_info *p = netdev_priv(dev);
1308
1309 if (!netif_running(dev))
1310 return -EAGAIN;
1311 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1312 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001313 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001314 return 0;
1315}
1316
1317static int identify_port(struct net_device *dev, u32 data)
1318{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001319 struct adapter *adap = netdev2adap(dev);
1320
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001321 if (data == 0)
1322 data = 2; /* default to 2 seconds */
1323
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001324 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001325 data * 5);
1326}
1327
1328static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1329{
1330 unsigned int v = 0;
1331
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001332 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1333 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001334 v |= SUPPORTED_TP;
1335 if (caps & FW_PORT_CAP_SPEED_100M)
1336 v |= SUPPORTED_100baseT_Full;
1337 if (caps & FW_PORT_CAP_SPEED_1G)
1338 v |= SUPPORTED_1000baseT_Full;
1339 if (caps & FW_PORT_CAP_SPEED_10G)
1340 v |= SUPPORTED_10000baseT_Full;
1341 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1342 v |= SUPPORTED_Backplane;
1343 if (caps & FW_PORT_CAP_SPEED_1G)
1344 v |= SUPPORTED_1000baseKX_Full;
1345 if (caps & FW_PORT_CAP_SPEED_10G)
1346 v |= SUPPORTED_10000baseKX4_Full;
1347 } else if (type == FW_PORT_TYPE_KR)
1348 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001349 else if (type == FW_PORT_TYPE_BP_AP)
1350 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
1351 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1352 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001353 v |= SUPPORTED_FIBRE;
1354
1355 if (caps & FW_PORT_CAP_ANEG)
1356 v |= SUPPORTED_Autoneg;
1357 return v;
1358}
1359
1360static unsigned int to_fw_linkcaps(unsigned int caps)
1361{
1362 unsigned int v = 0;
1363
1364 if (caps & ADVERTISED_100baseT_Full)
1365 v |= FW_PORT_CAP_SPEED_100M;
1366 if (caps & ADVERTISED_1000baseT_Full)
1367 v |= FW_PORT_CAP_SPEED_1G;
1368 if (caps & ADVERTISED_10000baseT_Full)
1369 v |= FW_PORT_CAP_SPEED_10G;
1370 return v;
1371}
1372
1373static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1374{
1375 const struct port_info *p = netdev_priv(dev);
1376
1377 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001378 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001379 p->port_type == FW_PORT_TYPE_BT_XAUI)
1380 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001381 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1382 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001383 cmd->port = PORT_FIBRE;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001384 else if (p->port_type == FW_PORT_TYPE_SFP) {
1385 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1386 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1387 cmd->port = PORT_DA;
1388 else
1389 cmd->port = PORT_FIBRE;
1390 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001391 cmd->port = PORT_OTHER;
1392
1393 if (p->mdio_addr >= 0) {
1394 cmd->phy_address = p->mdio_addr;
1395 cmd->transceiver = XCVR_EXTERNAL;
1396 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1397 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1398 } else {
1399 cmd->phy_address = 0; /* not really, but no better option */
1400 cmd->transceiver = XCVR_INTERNAL;
1401 cmd->mdio_support = 0;
1402 }
1403
1404 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1405 cmd->advertising = from_fw_linkcaps(p->port_type,
1406 p->link_cfg.advertising);
1407 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1408 cmd->duplex = DUPLEX_FULL;
1409 cmd->autoneg = p->link_cfg.autoneg;
1410 cmd->maxtxpkt = 0;
1411 cmd->maxrxpkt = 0;
1412 return 0;
1413}
1414
1415static unsigned int speed_to_caps(int speed)
1416{
1417 if (speed == SPEED_100)
1418 return FW_PORT_CAP_SPEED_100M;
1419 if (speed == SPEED_1000)
1420 return FW_PORT_CAP_SPEED_1G;
1421 if (speed == SPEED_10000)
1422 return FW_PORT_CAP_SPEED_10G;
1423 return 0;
1424}
1425
1426static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1427{
1428 unsigned int cap;
1429 struct port_info *p = netdev_priv(dev);
1430 struct link_config *lc = &p->link_cfg;
1431
1432 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1433 return -EINVAL;
1434
1435 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1436 /*
1437 * PHY offers a single speed. See if that's what's
1438 * being requested.
1439 */
1440 if (cmd->autoneg == AUTONEG_DISABLE &&
1441 (lc->supported & speed_to_caps(cmd->speed)))
1442 return 0;
1443 return -EINVAL;
1444 }
1445
1446 if (cmd->autoneg == AUTONEG_DISABLE) {
1447 cap = speed_to_caps(cmd->speed);
1448
1449 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1450 cmd->speed == SPEED_10000)
1451 return -EINVAL;
1452 lc->requested_speed = cap;
1453 lc->advertising = 0;
1454 } else {
1455 cap = to_fw_linkcaps(cmd->advertising);
1456 if (!(lc->supported & cap))
1457 return -EINVAL;
1458 lc->requested_speed = 0;
1459 lc->advertising = cap | FW_PORT_CAP_ANEG;
1460 }
1461 lc->autoneg = cmd->autoneg;
1462
1463 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001464 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1465 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001466 return 0;
1467}
1468
1469static void get_pauseparam(struct net_device *dev,
1470 struct ethtool_pauseparam *epause)
1471{
1472 struct port_info *p = netdev_priv(dev);
1473
1474 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1475 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1476 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1477}
1478
1479static int set_pauseparam(struct net_device *dev,
1480 struct ethtool_pauseparam *epause)
1481{
1482 struct port_info *p = netdev_priv(dev);
1483 struct link_config *lc = &p->link_cfg;
1484
1485 if (epause->autoneg == AUTONEG_DISABLE)
1486 lc->requested_fc = 0;
1487 else if (lc->supported & FW_PORT_CAP_ANEG)
1488 lc->requested_fc = PAUSE_AUTONEG;
1489 else
1490 return -EINVAL;
1491
1492 if (epause->rx_pause)
1493 lc->requested_fc |= PAUSE_RX;
1494 if (epause->tx_pause)
1495 lc->requested_fc |= PAUSE_TX;
1496 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001497 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1498 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001499 return 0;
1500}
1501
1502static u32 get_rx_csum(struct net_device *dev)
1503{
1504 struct port_info *p = netdev_priv(dev);
1505
1506 return p->rx_offload & RX_CSO;
1507}
1508
1509static int set_rx_csum(struct net_device *dev, u32 data)
1510{
1511 struct port_info *p = netdev_priv(dev);
1512
1513 if (data)
1514 p->rx_offload |= RX_CSO;
1515 else
1516 p->rx_offload &= ~RX_CSO;
1517 return 0;
1518}
1519
1520static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1521{
1522 const struct port_info *pi = netdev_priv(dev);
1523 const struct sge *s = &pi->adapter->sge;
1524
1525 e->rx_max_pending = MAX_RX_BUFFERS;
1526 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1527 e->rx_jumbo_max_pending = 0;
1528 e->tx_max_pending = MAX_TXQ_ENTRIES;
1529
1530 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1531 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1532 e->rx_jumbo_pending = 0;
1533 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1534}
1535
1536static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1537{
1538 int i;
1539 const struct port_info *pi = netdev_priv(dev);
1540 struct adapter *adapter = pi->adapter;
1541 struct sge *s = &adapter->sge;
1542
1543 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1544 e->tx_pending > MAX_TXQ_ENTRIES ||
1545 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1546 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1547 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1548 return -EINVAL;
1549
1550 if (adapter->flags & FULL_INIT_DONE)
1551 return -EBUSY;
1552
1553 for (i = 0; i < pi->nqsets; ++i) {
1554 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1555 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1556 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1557 }
1558 return 0;
1559}
1560
1561static int closest_timer(const struct sge *s, int time)
1562{
1563 int i, delta, match = 0, min_delta = INT_MAX;
1564
1565 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1566 delta = time - s->timer_val[i];
1567 if (delta < 0)
1568 delta = -delta;
1569 if (delta < min_delta) {
1570 min_delta = delta;
1571 match = i;
1572 }
1573 }
1574 return match;
1575}
1576
1577static int closest_thres(const struct sge *s, int thres)
1578{
1579 int i, delta, match = 0, min_delta = INT_MAX;
1580
1581 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1582 delta = thres - s->counter_val[i];
1583 if (delta < 0)
1584 delta = -delta;
1585 if (delta < min_delta) {
1586 min_delta = delta;
1587 match = i;
1588 }
1589 }
1590 return match;
1591}
1592
1593/*
1594 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1595 */
1596static unsigned int qtimer_val(const struct adapter *adap,
1597 const struct sge_rspq *q)
1598{
1599 unsigned int idx = q->intr_params >> 1;
1600
1601 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1602}
1603
1604/**
1605 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1606 * @adap: the adapter
1607 * @q: the Rx queue
1608 * @us: the hold-off time in us, or 0 to disable timer
1609 * @cnt: the hold-off packet count, or 0 to disable counter
1610 *
1611 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1612 * one of the two needs to be enabled for the queue to generate interrupts.
1613 */
1614static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1615 unsigned int us, unsigned int cnt)
1616{
1617 if ((us | cnt) == 0)
1618 cnt = 1;
1619
1620 if (cnt) {
1621 int err;
1622 u32 v, new_idx;
1623
1624 new_idx = closest_thres(&adap->sge, cnt);
1625 if (q->desc && q->pktcnt_idx != new_idx) {
1626 /* the queue has already been created, update it */
1627 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1628 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1629 FW_PARAMS_PARAM_YZ(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001630 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1631 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001632 if (err)
1633 return err;
1634 }
1635 q->pktcnt_idx = new_idx;
1636 }
1637
1638 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1639 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1640 return 0;
1641}
1642
1643static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1644{
1645 const struct port_info *pi = netdev_priv(dev);
1646 struct adapter *adap = pi->adapter;
1647
1648 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1649 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1650}
1651
1652static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1653{
1654 const struct port_info *pi = netdev_priv(dev);
1655 const struct adapter *adap = pi->adapter;
1656 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1657
1658 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1659 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1660 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1661 return 0;
1662}
1663
1664/*
1665 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1666 * through virtual addresses starting at 31K, the rest is accessed through
1667 * virtual addresses starting at 0. This mapping is correct only for PF0.
1668 */
1669static int eeprom_ptov(unsigned int phys_addr)
1670{
1671 if (phys_addr < 1024)
1672 return phys_addr + (31 << 10);
1673 if (phys_addr < EEPROMSIZE)
1674 return phys_addr - 1024;
1675 return -EINVAL;
1676}
1677
1678/*
1679 * The next two routines implement eeprom read/write from physical addresses.
1680 * The physical->virtual translation is correct only for PF0.
1681 */
1682static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1683{
1684 int vaddr = eeprom_ptov(phys_addr);
1685
1686 if (vaddr >= 0)
1687 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1688 return vaddr < 0 ? vaddr : 0;
1689}
1690
1691static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1692{
1693 int vaddr = eeprom_ptov(phys_addr);
1694
1695 if (vaddr >= 0)
1696 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1697 return vaddr < 0 ? vaddr : 0;
1698}
1699
1700#define EEPROM_MAGIC 0x38E2F10C
1701
1702static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1703 u8 *data)
1704{
1705 int i, err = 0;
1706 struct adapter *adapter = netdev2adap(dev);
1707
1708 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1709 if (!buf)
1710 return -ENOMEM;
1711
1712 e->magic = EEPROM_MAGIC;
1713 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1714 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1715
1716 if (!err)
1717 memcpy(data, buf + e->offset, e->len);
1718 kfree(buf);
1719 return err;
1720}
1721
1722static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1723 u8 *data)
1724{
1725 u8 *buf;
1726 int err = 0;
1727 u32 aligned_offset, aligned_len, *p;
1728 struct adapter *adapter = netdev2adap(dev);
1729
1730 if (eeprom->magic != EEPROM_MAGIC)
1731 return -EINVAL;
1732
1733 aligned_offset = eeprom->offset & ~3;
1734 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1735
1736 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1737 /*
1738 * RMW possibly needed for first or last words.
1739 */
1740 buf = kmalloc(aligned_len, GFP_KERNEL);
1741 if (!buf)
1742 return -ENOMEM;
1743 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1744 if (!err && aligned_len > 4)
1745 err = eeprom_rd_phys(adapter,
1746 aligned_offset + aligned_len - 4,
1747 (u32 *)&buf[aligned_len - 4]);
1748 if (err)
1749 goto out;
1750 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1751 } else
1752 buf = data;
1753
1754 err = t4_seeprom_wp(adapter, false);
1755 if (err)
1756 goto out;
1757
1758 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1759 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1760 aligned_offset += 4;
1761 }
1762
1763 if (!err)
1764 err = t4_seeprom_wp(adapter, true);
1765out:
1766 if (buf != data)
1767 kfree(buf);
1768 return err;
1769}
1770
1771static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1772{
1773 int ret;
1774 const struct firmware *fw;
1775 struct adapter *adap = netdev2adap(netdev);
1776
1777 ef->data[sizeof(ef->data) - 1] = '\0';
1778 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1779 if (ret < 0)
1780 return ret;
1781
1782 ret = t4_load_fw(adap, fw->data, fw->size);
1783 release_firmware(fw);
1784 if (!ret)
1785 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1786 return ret;
1787}
1788
1789#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1790#define BCAST_CRC 0xa0ccc1a6
1791
1792static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1793{
1794 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1795 wol->wolopts = netdev2adap(dev)->wol;
1796 memset(&wol->sopass, 0, sizeof(wol->sopass));
1797}
1798
1799static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1800{
1801 int err = 0;
1802 struct port_info *pi = netdev_priv(dev);
1803
1804 if (wol->wolopts & ~WOL_SUPPORTED)
1805 return -EINVAL;
1806 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1807 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1808 if (wol->wolopts & WAKE_BCAST) {
1809 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1810 ~0ULL, 0, false);
1811 if (!err)
1812 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1813 ~6ULL, ~0ULL, BCAST_CRC, true);
1814 } else
1815 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1816 return err;
1817}
1818
Dimitris Michailidis35d35682010-08-02 13:19:20 +00001819#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1820
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001821static int set_tso(struct net_device *dev, u32 value)
1822{
1823 if (value)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00001824 dev->features |= TSO_FLAGS;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001825 else
Dimitris Michailidis35d35682010-08-02 13:19:20 +00001826 dev->features &= ~TSO_FLAGS;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001827 return 0;
1828}
1829
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001830static int set_flags(struct net_device *dev, u32 flags)
1831{
Ben Hutchings1437ce32010-06-30 02:44:32 +00001832 return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001833}
1834
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001835static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1836{
1837 const struct port_info *pi = netdev_priv(dev);
1838 unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1839
1840 p->size = pi->rss_size;
1841 while (n--)
1842 p->ring_index[n] = pi->rss[n];
1843 return 0;
1844}
1845
1846static int set_rss_table(struct net_device *dev,
1847 const struct ethtool_rxfh_indir *p)
1848{
1849 unsigned int i;
1850 struct port_info *pi = netdev_priv(dev);
1851
1852 if (p->size != pi->rss_size)
1853 return -EINVAL;
1854 for (i = 0; i < p->size; i++)
1855 if (p->ring_index[i] >= pi->nqsets)
1856 return -EINVAL;
1857 for (i = 0; i < p->size; i++)
1858 pi->rss[i] = p->ring_index[i];
1859 if (pi->adapter->flags & FULL_INIT_DONE)
1860 return write_rss(pi, pi->rss);
1861 return 0;
1862}
1863
1864static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1865 void *rules)
1866{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001867 const struct port_info *pi = netdev_priv(dev);
1868
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001869 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001870 case ETHTOOL_GRXFH: {
1871 unsigned int v = pi->rss_mode;
1872
1873 info->data = 0;
1874 switch (info->flow_type) {
1875 case TCP_V4_FLOW:
1876 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1877 info->data = RXH_IP_SRC | RXH_IP_DST |
1878 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1879 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1880 info->data = RXH_IP_SRC | RXH_IP_DST;
1881 break;
1882 case UDP_V4_FLOW:
1883 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1884 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1885 info->data = RXH_IP_SRC | RXH_IP_DST |
1886 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1887 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1888 info->data = RXH_IP_SRC | RXH_IP_DST;
1889 break;
1890 case SCTP_V4_FLOW:
1891 case AH_ESP_V4_FLOW:
1892 case IPV4_FLOW:
1893 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1894 info->data = RXH_IP_SRC | RXH_IP_DST;
1895 break;
1896 case TCP_V6_FLOW:
1897 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1898 info->data = RXH_IP_SRC | RXH_IP_DST |
1899 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1900 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1901 info->data = RXH_IP_SRC | RXH_IP_DST;
1902 break;
1903 case UDP_V6_FLOW:
1904 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1905 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1906 info->data = RXH_IP_SRC | RXH_IP_DST |
1907 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1908 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1909 info->data = RXH_IP_SRC | RXH_IP_DST;
1910 break;
1911 case SCTP_V6_FLOW:
1912 case AH_ESP_V6_FLOW:
1913 case IPV6_FLOW:
1914 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1915 info->data = RXH_IP_SRC | RXH_IP_DST;
1916 break;
1917 }
1918 return 0;
1919 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001920 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001921 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001922 return 0;
1923 }
1924 return -EOPNOTSUPP;
1925}
1926
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001927static struct ethtool_ops cxgb_ethtool_ops = {
1928 .get_settings = get_settings,
1929 .set_settings = set_settings,
1930 .get_drvinfo = get_drvinfo,
1931 .get_msglevel = get_msglevel,
1932 .set_msglevel = set_msglevel,
1933 .get_ringparam = get_sge_param,
1934 .set_ringparam = set_sge_param,
1935 .get_coalesce = get_coalesce,
1936 .set_coalesce = set_coalesce,
1937 .get_eeprom_len = get_eeprom_len,
1938 .get_eeprom = get_eeprom,
1939 .set_eeprom = set_eeprom,
1940 .get_pauseparam = get_pauseparam,
1941 .set_pauseparam = set_pauseparam,
1942 .get_rx_csum = get_rx_csum,
1943 .set_rx_csum = set_rx_csum,
1944 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1945 .set_sg = ethtool_op_set_sg,
1946 .get_link = ethtool_op_get_link,
1947 .get_strings = get_strings,
1948 .phys_id = identify_port,
1949 .nway_reset = restart_autoneg,
1950 .get_sset_count = get_sset_count,
1951 .get_ethtool_stats = get_stats,
1952 .get_regs_len = get_regs_len,
1953 .get_regs = get_regs,
1954 .get_wol = get_wol,
1955 .set_wol = set_wol,
1956 .set_tso = set_tso,
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001957 .set_flags = set_flags,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001958 .get_rxnfc = get_rxnfc,
1959 .get_rxfh_indir = get_rss_table,
1960 .set_rxfh_indir = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001961 .flash_device = set_flash,
1962};
1963
1964/*
1965 * debugfs support
1966 */
1967
1968static int mem_open(struct inode *inode, struct file *file)
1969{
1970 file->private_data = inode->i_private;
1971 return 0;
1972}
1973
1974static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1975 loff_t *ppos)
1976{
1977 loff_t pos = *ppos;
1978 loff_t avail = file->f_path.dentry->d_inode->i_size;
1979 unsigned int mem = (uintptr_t)file->private_data & 3;
1980 struct adapter *adap = file->private_data - mem;
1981
1982 if (pos < 0)
1983 return -EINVAL;
1984 if (pos >= avail)
1985 return 0;
1986 if (count > avail - pos)
1987 count = avail - pos;
1988
1989 while (count) {
1990 size_t len;
1991 int ret, ofst;
1992 __be32 data[16];
1993
1994 if (mem == MEM_MC)
1995 ret = t4_mc_read(adap, pos, data, NULL);
1996 else
1997 ret = t4_edc_read(adap, mem, pos, data, NULL);
1998 if (ret)
1999 return ret;
2000
2001 ofst = pos % sizeof(data);
2002 len = min(count, sizeof(data) - ofst);
2003 if (copy_to_user(buf, (u8 *)data + ofst, len))
2004 return -EFAULT;
2005
2006 buf += len;
2007 pos += len;
2008 count -= len;
2009 }
2010 count = pos - *ppos;
2011 *ppos = pos;
2012 return count;
2013}
2014
2015static const struct file_operations mem_debugfs_fops = {
2016 .owner = THIS_MODULE,
2017 .open = mem_open,
2018 .read = mem_read,
2019};
2020
2021static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2022 unsigned int idx, unsigned int size_mb)
2023{
2024 struct dentry *de;
2025
2026 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2027 (void *)adap + idx, &mem_debugfs_fops);
2028 if (de && de->d_inode)
2029 de->d_inode->i_size = size_mb << 20;
2030}
2031
2032static int __devinit setup_debugfs(struct adapter *adap)
2033{
2034 int i;
2035
2036 if (IS_ERR_OR_NULL(adap->debugfs_root))
2037 return -1;
2038
2039 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2040 if (i & EDRAM0_ENABLE)
2041 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2042 if (i & EDRAM1_ENABLE)
2043 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2044 if (i & EXT_MEM_ENABLE)
2045 add_debugfs_mem(adap, "mc", MEM_MC,
2046 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2047 if (adap->l2t)
2048 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2049 &t4_l2t_fops);
2050 return 0;
2051}
2052
2053/*
2054 * upper-layer driver support
2055 */
2056
2057/*
2058 * Allocate an active-open TID and set it to the supplied value.
2059 */
2060int cxgb4_alloc_atid(struct tid_info *t, void *data)
2061{
2062 int atid = -1;
2063
2064 spin_lock_bh(&t->atid_lock);
2065 if (t->afree) {
2066 union aopen_entry *p = t->afree;
2067
2068 atid = p - t->atid_tab;
2069 t->afree = p->next;
2070 p->data = data;
2071 t->atids_in_use++;
2072 }
2073 spin_unlock_bh(&t->atid_lock);
2074 return atid;
2075}
2076EXPORT_SYMBOL(cxgb4_alloc_atid);
2077
2078/*
2079 * Release an active-open TID.
2080 */
2081void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2082{
2083 union aopen_entry *p = &t->atid_tab[atid];
2084
2085 spin_lock_bh(&t->atid_lock);
2086 p->next = t->afree;
2087 t->afree = p;
2088 t->atids_in_use--;
2089 spin_unlock_bh(&t->atid_lock);
2090}
2091EXPORT_SYMBOL(cxgb4_free_atid);
2092
2093/*
2094 * Allocate a server TID and set it to the supplied value.
2095 */
2096int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2097{
2098 int stid;
2099
2100 spin_lock_bh(&t->stid_lock);
2101 if (family == PF_INET) {
2102 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2103 if (stid < t->nstids)
2104 __set_bit(stid, t->stid_bmap);
2105 else
2106 stid = -1;
2107 } else {
2108 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2109 if (stid < 0)
2110 stid = -1;
2111 }
2112 if (stid >= 0) {
2113 t->stid_tab[stid].data = data;
2114 stid += t->stid_base;
2115 t->stids_in_use++;
2116 }
2117 spin_unlock_bh(&t->stid_lock);
2118 return stid;
2119}
2120EXPORT_SYMBOL(cxgb4_alloc_stid);
2121
2122/*
2123 * Release a server TID.
2124 */
2125void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2126{
2127 stid -= t->stid_base;
2128 spin_lock_bh(&t->stid_lock);
2129 if (family == PF_INET)
2130 __clear_bit(stid, t->stid_bmap);
2131 else
2132 bitmap_release_region(t->stid_bmap, stid, 2);
2133 t->stid_tab[stid].data = NULL;
2134 t->stids_in_use--;
2135 spin_unlock_bh(&t->stid_lock);
2136}
2137EXPORT_SYMBOL(cxgb4_free_stid);
2138
2139/*
2140 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2141 */
2142static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2143 unsigned int tid)
2144{
2145 struct cpl_tid_release *req;
2146
2147 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2148 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2149 INIT_TP_WR(req, tid);
2150 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2151}
2152
2153/*
2154 * Queue a TID release request and if necessary schedule a work queue to
2155 * process it.
2156 */
2157void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2158 unsigned int tid)
2159{
2160 void **p = &t->tid_tab[tid];
2161 struct adapter *adap = container_of(t, struct adapter, tids);
2162
2163 spin_lock_bh(&adap->tid_release_lock);
2164 *p = adap->tid_release_head;
2165 /* Low 2 bits encode the Tx channel number */
2166 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2167 if (!adap->tid_release_task_busy) {
2168 adap->tid_release_task_busy = true;
2169 schedule_work(&adap->tid_release_task);
2170 }
2171 spin_unlock_bh(&adap->tid_release_lock);
2172}
2173EXPORT_SYMBOL(cxgb4_queue_tid_release);
2174
2175/*
2176 * Process the list of pending TID release requests.
2177 */
2178static void process_tid_release_list(struct work_struct *work)
2179{
2180 struct sk_buff *skb;
2181 struct adapter *adap;
2182
2183 adap = container_of(work, struct adapter, tid_release_task);
2184
2185 spin_lock_bh(&adap->tid_release_lock);
2186 while (adap->tid_release_head) {
2187 void **p = adap->tid_release_head;
2188 unsigned int chan = (uintptr_t)p & 3;
2189 p = (void *)p - chan;
2190
2191 adap->tid_release_head = *p;
2192 *p = NULL;
2193 spin_unlock_bh(&adap->tid_release_lock);
2194
2195 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2196 GFP_KERNEL)))
2197 schedule_timeout_uninterruptible(1);
2198
2199 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2200 t4_ofld_send(adap, skb);
2201 spin_lock_bh(&adap->tid_release_lock);
2202 }
2203 adap->tid_release_task_busy = false;
2204 spin_unlock_bh(&adap->tid_release_lock);
2205}
2206
2207/*
2208 * Release a TID and inform HW. If we are unable to allocate the release
2209 * message we defer to a work queue.
2210 */
2211void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2212{
2213 void *old;
2214 struct sk_buff *skb;
2215 struct adapter *adap = container_of(t, struct adapter, tids);
2216
2217 old = t->tid_tab[tid];
2218 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2219 if (likely(skb)) {
2220 t->tid_tab[tid] = NULL;
2221 mk_tid_release(skb, chan, tid);
2222 t4_ofld_send(adap, skb);
2223 } else
2224 cxgb4_queue_tid_release(t, chan, tid);
2225 if (old)
2226 atomic_dec(&t->tids_in_use);
2227}
2228EXPORT_SYMBOL(cxgb4_remove_tid);
2229
2230/*
2231 * Allocate and initialize the TID tables. Returns 0 on success.
2232 */
2233static int tid_init(struct tid_info *t)
2234{
2235 size_t size;
2236 unsigned int natids = t->natids;
2237
2238 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2239 t->nstids * sizeof(*t->stid_tab) +
2240 BITS_TO_LONGS(t->nstids) * sizeof(long);
2241 t->tid_tab = t4_alloc_mem(size);
2242 if (!t->tid_tab)
2243 return -ENOMEM;
2244
2245 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2246 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2247 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2248 spin_lock_init(&t->stid_lock);
2249 spin_lock_init(&t->atid_lock);
2250
2251 t->stids_in_use = 0;
2252 t->afree = NULL;
2253 t->atids_in_use = 0;
2254 atomic_set(&t->tids_in_use, 0);
2255
2256 /* Setup the free list for atid_tab and clear the stid bitmap. */
2257 if (natids) {
2258 while (--natids)
2259 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2260 t->afree = t->atid_tab;
2261 }
2262 bitmap_zero(t->stid_bmap, t->nstids);
2263 return 0;
2264}
2265
2266/**
2267 * cxgb4_create_server - create an IP server
2268 * @dev: the device
2269 * @stid: the server TID
2270 * @sip: local IP address to bind server to
2271 * @sport: the server's TCP port
2272 * @queue: queue to direct messages from this server to
2273 *
2274 * Create an IP server for the given port and address.
2275 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2276 */
2277int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2278 __be32 sip, __be16 sport, unsigned int queue)
2279{
2280 unsigned int chan;
2281 struct sk_buff *skb;
2282 struct adapter *adap;
2283 struct cpl_pass_open_req *req;
2284
2285 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2286 if (!skb)
2287 return -ENOMEM;
2288
2289 adap = netdev2adap(dev);
2290 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2291 INIT_TP_WR(req, 0);
2292 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2293 req->local_port = sport;
2294 req->peer_port = htons(0);
2295 req->local_ip = sip;
2296 req->peer_ip = htonl(0);
2297 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2298 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2299 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2300 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2301 return t4_mgmt_tx(adap, skb);
2302}
2303EXPORT_SYMBOL(cxgb4_create_server);
2304
2305/**
2306 * cxgb4_create_server6 - create an IPv6 server
2307 * @dev: the device
2308 * @stid: the server TID
2309 * @sip: local IPv6 address to bind server to
2310 * @sport: the server's TCP port
2311 * @queue: queue to direct messages from this server to
2312 *
2313 * Create an IPv6 server for the given port and address.
2314 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2315 */
2316int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2317 const struct in6_addr *sip, __be16 sport,
2318 unsigned int queue)
2319{
2320 unsigned int chan;
2321 struct sk_buff *skb;
2322 struct adapter *adap;
2323 struct cpl_pass_open_req6 *req;
2324
2325 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2326 if (!skb)
2327 return -ENOMEM;
2328
2329 adap = netdev2adap(dev);
2330 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2331 INIT_TP_WR(req, 0);
2332 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2333 req->local_port = sport;
2334 req->peer_port = htons(0);
2335 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2336 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2337 req->peer_ip_hi = cpu_to_be64(0);
2338 req->peer_ip_lo = cpu_to_be64(0);
2339 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2340 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2341 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2342 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2343 return t4_mgmt_tx(adap, skb);
2344}
2345EXPORT_SYMBOL(cxgb4_create_server6);
2346
2347/**
2348 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2349 * @mtus: the HW MTU table
2350 * @mtu: the target MTU
2351 * @idx: index of selected entry in the MTU table
2352 *
2353 * Returns the index and the value in the HW MTU table that is closest to
2354 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2355 * table, in which case that smallest available value is selected.
2356 */
2357unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2358 unsigned int *idx)
2359{
2360 unsigned int i = 0;
2361
2362 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2363 ++i;
2364 if (idx)
2365 *idx = i;
2366 return mtus[i];
2367}
2368EXPORT_SYMBOL(cxgb4_best_mtu);
2369
2370/**
2371 * cxgb4_port_chan - get the HW channel of a port
2372 * @dev: the net device for the port
2373 *
2374 * Return the HW Tx channel of the given port.
2375 */
2376unsigned int cxgb4_port_chan(const struct net_device *dev)
2377{
2378 return netdev2pinfo(dev)->tx_chan;
2379}
2380EXPORT_SYMBOL(cxgb4_port_chan);
2381
2382/**
2383 * cxgb4_port_viid - get the VI id of a port
2384 * @dev: the net device for the port
2385 *
2386 * Return the VI id of the given port.
2387 */
2388unsigned int cxgb4_port_viid(const struct net_device *dev)
2389{
2390 return netdev2pinfo(dev)->viid;
2391}
2392EXPORT_SYMBOL(cxgb4_port_viid);
2393
2394/**
2395 * cxgb4_port_idx - get the index of a port
2396 * @dev: the net device for the port
2397 *
2398 * Return the index of the given port.
2399 */
2400unsigned int cxgb4_port_idx(const struct net_device *dev)
2401{
2402 return netdev2pinfo(dev)->port_id;
2403}
2404EXPORT_SYMBOL(cxgb4_port_idx);
2405
2406/**
2407 * cxgb4_netdev_by_hwid - return the net device of a HW port
2408 * @pdev: identifies the adapter
2409 * @id: the HW port id
2410 *
2411 * Return the net device associated with the interface with the given HW
2412 * id.
2413 */
2414struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2415{
2416 const struct adapter *adap = pci_get_drvdata(pdev);
2417
2418 if (!adap || id >= NCHAN)
2419 return NULL;
2420 id = adap->chan_map[id];
2421 return id < MAX_NPORTS ? adap->port[id] : NULL;
2422}
2423EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2424
2425void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2426 struct tp_tcp_stats *v6)
2427{
2428 struct adapter *adap = pci_get_drvdata(pdev);
2429
2430 spin_lock(&adap->stats_lock);
2431 t4_tp_get_tcp_stats(adap, v4, v6);
2432 spin_unlock(&adap->stats_lock);
2433}
2434EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2435
2436void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2437 const unsigned int *pgsz_order)
2438{
2439 struct adapter *adap = netdev2adap(dev);
2440
2441 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2442 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2443 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2444 HPZ3(pgsz_order[3]));
2445}
2446EXPORT_SYMBOL(cxgb4_iscsi_init);
2447
2448static struct pci_driver cxgb4_driver;
2449
2450static void check_neigh_update(struct neighbour *neigh)
2451{
2452 const struct device *parent;
2453 const struct net_device *netdev = neigh->dev;
2454
2455 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2456 netdev = vlan_dev_real_dev(netdev);
2457 parent = netdev->dev.parent;
2458 if (parent && parent->driver == &cxgb4_driver.driver)
2459 t4_l2t_update(dev_get_drvdata(parent), neigh);
2460}
2461
2462static int netevent_cb(struct notifier_block *nb, unsigned long event,
2463 void *data)
2464{
2465 switch (event) {
2466 case NETEVENT_NEIGH_UPDATE:
2467 check_neigh_update(data);
2468 break;
2469 case NETEVENT_PMTU_UPDATE:
2470 case NETEVENT_REDIRECT:
2471 default:
2472 break;
2473 }
2474 return 0;
2475}
2476
2477static bool netevent_registered;
2478static struct notifier_block cxgb4_netevent_nb = {
2479 .notifier_call = netevent_cb
2480};
2481
2482static void uld_attach(struct adapter *adap, unsigned int uld)
2483{
2484 void *handle;
2485 struct cxgb4_lld_info lli;
2486
2487 lli.pdev = adap->pdev;
2488 lli.l2t = adap->l2t;
2489 lli.tids = &adap->tids;
2490 lli.ports = adap->port;
2491 lli.vr = &adap->vres;
2492 lli.mtus = adap->params.mtus;
2493 if (uld == CXGB4_ULD_RDMA) {
2494 lli.rxq_ids = adap->sge.rdma_rxq;
2495 lli.nrxq = adap->sge.rdmaqs;
2496 } else if (uld == CXGB4_ULD_ISCSI) {
2497 lli.rxq_ids = adap->sge.ofld_rxq;
2498 lli.nrxq = adap->sge.ofldqsets;
2499 }
2500 lli.ntxq = adap->sge.ofldqsets;
2501 lli.nchan = adap->params.nports;
2502 lli.nports = adap->params.nports;
2503 lli.wr_cred = adap->params.ofldq_wr_cred;
2504 lli.adapter_type = adap->params.rev;
2505 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2506 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002507 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2508 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002509 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002510 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2511 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002512 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2513 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2514 lli.fw_vers = adap->params.fw_vers;
2515
2516 handle = ulds[uld].add(&lli);
2517 if (IS_ERR(handle)) {
2518 dev_warn(adap->pdev_dev,
2519 "could not attach to the %s driver, error %ld\n",
2520 uld_str[uld], PTR_ERR(handle));
2521 return;
2522 }
2523
2524 adap->uld_handle[uld] = handle;
2525
2526 if (!netevent_registered) {
2527 register_netevent_notifier(&cxgb4_netevent_nb);
2528 netevent_registered = true;
2529 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00002530
2531 if (adap->flags & FULL_INIT_DONE)
2532 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002533}
2534
2535static void attach_ulds(struct adapter *adap)
2536{
2537 unsigned int i;
2538
2539 mutex_lock(&uld_mutex);
2540 list_add_tail(&adap->list_node, &adapter_list);
2541 for (i = 0; i < CXGB4_ULD_MAX; i++)
2542 if (ulds[i].add)
2543 uld_attach(adap, i);
2544 mutex_unlock(&uld_mutex);
2545}
2546
2547static void detach_ulds(struct adapter *adap)
2548{
2549 unsigned int i;
2550
2551 mutex_lock(&uld_mutex);
2552 list_del(&adap->list_node);
2553 for (i = 0; i < CXGB4_ULD_MAX; i++)
2554 if (adap->uld_handle[i]) {
2555 ulds[i].state_change(adap->uld_handle[i],
2556 CXGB4_STATE_DETACH);
2557 adap->uld_handle[i] = NULL;
2558 }
2559 if (netevent_registered && list_empty(&adapter_list)) {
2560 unregister_netevent_notifier(&cxgb4_netevent_nb);
2561 netevent_registered = false;
2562 }
2563 mutex_unlock(&uld_mutex);
2564}
2565
2566static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2567{
2568 unsigned int i;
2569
2570 mutex_lock(&uld_mutex);
2571 for (i = 0; i < CXGB4_ULD_MAX; i++)
2572 if (adap->uld_handle[i])
2573 ulds[i].state_change(adap->uld_handle[i], new_state);
2574 mutex_unlock(&uld_mutex);
2575}
2576
2577/**
2578 * cxgb4_register_uld - register an upper-layer driver
2579 * @type: the ULD type
2580 * @p: the ULD methods
2581 *
2582 * Registers an upper-layer driver with this driver and notifies the ULD
2583 * about any presently available devices that support its type. Returns
2584 * %-EBUSY if a ULD of the same type is already registered.
2585 */
2586int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2587{
2588 int ret = 0;
2589 struct adapter *adap;
2590
2591 if (type >= CXGB4_ULD_MAX)
2592 return -EINVAL;
2593 mutex_lock(&uld_mutex);
2594 if (ulds[type].add) {
2595 ret = -EBUSY;
2596 goto out;
2597 }
2598 ulds[type] = *p;
2599 list_for_each_entry(adap, &adapter_list, list_node)
2600 uld_attach(adap, type);
2601out: mutex_unlock(&uld_mutex);
2602 return ret;
2603}
2604EXPORT_SYMBOL(cxgb4_register_uld);
2605
2606/**
2607 * cxgb4_unregister_uld - unregister an upper-layer driver
2608 * @type: the ULD type
2609 *
2610 * Unregisters an existing upper-layer driver.
2611 */
2612int cxgb4_unregister_uld(enum cxgb4_uld type)
2613{
2614 struct adapter *adap;
2615
2616 if (type >= CXGB4_ULD_MAX)
2617 return -EINVAL;
2618 mutex_lock(&uld_mutex);
2619 list_for_each_entry(adap, &adapter_list, list_node)
2620 adap->uld_handle[type] = NULL;
2621 ulds[type].add = NULL;
2622 mutex_unlock(&uld_mutex);
2623 return 0;
2624}
2625EXPORT_SYMBOL(cxgb4_unregister_uld);
2626
2627/**
2628 * cxgb_up - enable the adapter
2629 * @adap: adapter being enabled
2630 *
2631 * Called when the first port is enabled, this function performs the
2632 * actions necessary to make an adapter operational, such as completing
2633 * the initialization of HW modules, and enabling interrupts.
2634 *
2635 * Must be called with the rtnl lock held.
2636 */
2637static int cxgb_up(struct adapter *adap)
2638{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002639 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002640
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002641 err = setup_sge_queues(adap);
2642 if (err)
2643 goto out;
2644 err = setup_rss(adap);
2645 if (err)
2646 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002647
2648 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002649 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002650 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2651 adap->msix_info[0].desc, adap);
2652 if (err)
2653 goto irq_err;
2654
2655 err = request_msix_queue_irqs(adap);
2656 if (err) {
2657 free_irq(adap->msix_info[0].vec, adap);
2658 goto irq_err;
2659 }
2660 } else {
2661 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2662 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2663 adap->name, adap);
2664 if (err)
2665 goto irq_err;
2666 }
2667 enable_rx(adap);
2668 t4_sge_start(adap);
2669 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002670 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002671 notify_ulds(adap, CXGB4_STATE_UP);
2672 out:
2673 return err;
2674 irq_err:
2675 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002676 freeq:
2677 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002678 goto out;
2679}
2680
2681static void cxgb_down(struct adapter *adapter)
2682{
2683 t4_intr_disable(adapter);
2684 cancel_work_sync(&adapter->tid_release_task);
2685 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002686 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002687
2688 if (adapter->flags & USING_MSIX) {
2689 free_msix_queue_irqs(adapter);
2690 free_irq(adapter->msix_info[0].vec, adapter);
2691 } else
2692 free_irq(adapter->pdev->irq, adapter);
2693 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002694 t4_sge_stop(adapter);
2695 t4_free_sge_resources(adapter);
2696 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002697}
2698
2699/*
2700 * net_device operations
2701 */
2702static int cxgb_open(struct net_device *dev)
2703{
2704 int err;
2705 struct port_info *pi = netdev_priv(dev);
2706 struct adapter *adapter = pi->adapter;
2707
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002708 if (!(adapter->flags & FULL_INIT_DONE)) {
2709 err = cxgb_up(adapter);
2710 if (err < 0)
2711 return err;
2712 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002713
2714 dev->real_num_tx_queues = pi->nqsets;
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00002715 err = link_start(dev);
2716 if (!err)
2717 netif_tx_start_all_queues(dev);
2718 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002719}
2720
2721static int cxgb_close(struct net_device *dev)
2722{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002723 struct port_info *pi = netdev_priv(dev);
2724 struct adapter *adapter = pi->adapter;
2725
2726 netif_tx_stop_all_queues(dev);
2727 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002728 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002729}
2730
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00002731static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2732 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002733{
2734 struct port_stats stats;
2735 struct port_info *p = netdev_priv(dev);
2736 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002737
2738 spin_lock(&adapter->stats_lock);
2739 t4_get_port_stats(adapter, p->tx_chan, &stats);
2740 spin_unlock(&adapter->stats_lock);
2741
2742 ns->tx_bytes = stats.tx_octets;
2743 ns->tx_packets = stats.tx_frames;
2744 ns->rx_bytes = stats.rx_octets;
2745 ns->rx_packets = stats.rx_frames;
2746 ns->multicast = stats.rx_mcast_frames;
2747
2748 /* detailed rx_errors */
2749 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2750 stats.rx_runt;
2751 ns->rx_over_errors = 0;
2752 ns->rx_crc_errors = stats.rx_fcs_err;
2753 ns->rx_frame_errors = stats.rx_symbol_err;
2754 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2755 stats.rx_ovflow2 + stats.rx_ovflow3 +
2756 stats.rx_trunc0 + stats.rx_trunc1 +
2757 stats.rx_trunc2 + stats.rx_trunc3;
2758 ns->rx_missed_errors = 0;
2759
2760 /* detailed tx_errors */
2761 ns->tx_aborted_errors = 0;
2762 ns->tx_carrier_errors = 0;
2763 ns->tx_fifo_errors = 0;
2764 ns->tx_heartbeat_errors = 0;
2765 ns->tx_window_errors = 0;
2766
2767 ns->tx_errors = stats.tx_error_frames;
2768 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2769 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2770 return ns;
2771}
2772
2773static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2774{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002775 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002776 int ret = 0, prtad, devad;
2777 struct port_info *pi = netdev_priv(dev);
2778 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2779
2780 switch (cmd) {
2781 case SIOCGMIIPHY:
2782 if (pi->mdio_addr < 0)
2783 return -EOPNOTSUPP;
2784 data->phy_id = pi->mdio_addr;
2785 break;
2786 case SIOCGMIIREG:
2787 case SIOCSMIIREG:
2788 if (mdio_phy_id_is_c45(data->phy_id)) {
2789 prtad = mdio_phy_id_prtad(data->phy_id);
2790 devad = mdio_phy_id_devad(data->phy_id);
2791 } else if (data->phy_id < 32) {
2792 prtad = data->phy_id;
2793 devad = 0;
2794 data->reg_num &= 0x1f;
2795 } else
2796 return -EINVAL;
2797
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002798 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002799 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002800 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002801 data->reg_num, &data->val_out);
2802 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002803 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002804 data->reg_num, data->val_in);
2805 break;
2806 default:
2807 return -EOPNOTSUPP;
2808 }
2809 return ret;
2810}
2811
2812static void cxgb_set_rxmode(struct net_device *dev)
2813{
2814 /* unfortunately we can't return errors to the stack */
2815 set_rxmode(dev, -1, false);
2816}
2817
2818static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2819{
2820 int ret;
2821 struct port_info *pi = netdev_priv(dev);
2822
2823 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2824 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002825 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2826 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002827 if (!ret)
2828 dev->mtu = new_mtu;
2829 return ret;
2830}
2831
2832static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2833{
2834 int ret;
2835 struct sockaddr *addr = p;
2836 struct port_info *pi = netdev_priv(dev);
2837
2838 if (!is_valid_ether_addr(addr->sa_data))
2839 return -EINVAL;
2840
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002841 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2842 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002843 if (ret < 0)
2844 return ret;
2845
2846 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2847 pi->xact_addr_filt = ret;
2848 return 0;
2849}
2850
2851static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2852{
2853 struct port_info *pi = netdev_priv(dev);
2854
2855 pi->vlan_grp = grp;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002856 t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
2857 grp != NULL, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002858}
2859
2860#ifdef CONFIG_NET_POLL_CONTROLLER
2861static void cxgb_netpoll(struct net_device *dev)
2862{
2863 struct port_info *pi = netdev_priv(dev);
2864 struct adapter *adap = pi->adapter;
2865
2866 if (adap->flags & USING_MSIX) {
2867 int i;
2868 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2869
2870 for (i = pi->nqsets; i; i--, rx++)
2871 t4_sge_intr_msix(0, &rx->rspq);
2872 } else
2873 t4_intr_handler(adap)(0, adap);
2874}
2875#endif
2876
2877static const struct net_device_ops cxgb4_netdev_ops = {
2878 .ndo_open = cxgb_open,
2879 .ndo_stop = cxgb_close,
2880 .ndo_start_xmit = t4_eth_xmit,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002881 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002882 .ndo_set_rx_mode = cxgb_set_rxmode,
2883 .ndo_set_mac_address = cxgb_set_mac_addr,
2884 .ndo_validate_addr = eth_validate_addr,
2885 .ndo_do_ioctl = cxgb_ioctl,
2886 .ndo_change_mtu = cxgb_change_mtu,
2887 .ndo_vlan_rx_register = vlan_rx_register,
2888#ifdef CONFIG_NET_POLL_CONTROLLER
2889 .ndo_poll_controller = cxgb_netpoll,
2890#endif
2891};
2892
2893void t4_fatal_err(struct adapter *adap)
2894{
2895 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2896 t4_intr_disable(adap);
2897 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2898}
2899
2900static void setup_memwin(struct adapter *adap)
2901{
2902 u32 bar0;
2903
2904 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2905 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2906 (bar0 + MEMWIN0_BASE) | BIR(0) |
2907 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2908 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2909 (bar0 + MEMWIN1_BASE) | BIR(0) |
2910 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2911 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2912 (bar0 + MEMWIN2_BASE) | BIR(0) |
2913 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00002914 if (adap->vres.ocq.size) {
2915 unsigned int start, sz_kb;
2916
2917 start = pci_resource_start(adap->pdev, 2) +
2918 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2919 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2920 t4_write_reg(adap,
2921 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2922 start | BIR(1) | WINDOW(ilog2(sz_kb)));
2923 t4_write_reg(adap,
2924 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2925 adap->vres.ocq.start);
2926 t4_read_reg(adap,
2927 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2928 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002929}
2930
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002931static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2932{
2933 u32 v;
2934 int ret;
2935
2936 /* get device capabilities */
2937 memset(c, 0, sizeof(*c));
2938 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2939 FW_CMD_REQUEST | FW_CMD_READ);
2940 c->retval_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002941 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002942 if (ret < 0)
2943 return ret;
2944
2945 /* select capabilities we'll be using */
2946 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2947 if (!vf_acls)
2948 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2949 else
2950 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2951 } else if (vf_acls) {
2952 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2953 return ret;
2954 }
2955 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2956 FW_CMD_REQUEST | FW_CMD_WRITE);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002957 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002958 if (ret < 0)
2959 return ret;
2960
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002961 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002962 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2963 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2964 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2965 if (ret < 0)
2966 return ret;
2967
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002968 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2969 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002970 if (ret < 0)
2971 return ret;
2972
2973 t4_sge_init(adap);
2974
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002975 /* tweak some settings */
2976 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2977 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2978 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2979 v = t4_read_reg(adap, TP_PIO_DATA);
2980 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002981
2982 /* get basic stuff going */
2983 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002984}
2985
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002986/*
2987 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2988 */
2989#define MAX_ATIDS 8192U
2990
2991/*
2992 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2993 */
2994static int adap_init0(struct adapter *adap)
2995{
2996 int ret;
2997 u32 v, port_vec;
2998 enum dev_state state;
2999 u32 params[7], val[7];
3000 struct fw_caps_config_cmd c;
3001
3002 ret = t4_check_fw_version(adap);
3003 if (ret == -EINVAL || ret > 0) {
3004 if (upgrade_fw(adap) >= 0) /* recache FW version */
3005 ret = t4_check_fw_version(adap);
3006 }
3007 if (ret < 0)
3008 return ret;
3009
3010 /* contact FW, request master */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003011 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003012 if (ret < 0) {
3013 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3014 ret);
3015 return ret;
3016 }
3017
3018 /* reset device */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003019 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003020 if (ret < 0)
3021 goto bye;
3022
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003023 for (v = 0; v < SGE_NTIMERS - 1; v++)
3024 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
3025 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3026 adap->sge.counter_val[0] = 1;
3027 for (v = 1; v < SGE_NCOUNTERS; v++)
3028 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
3029 THRESHOLD_3_MASK);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003030#define FW_PARAM_DEV(param) \
3031 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3032 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3033
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003034 params[0] = FW_PARAM_DEV(CCLK);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003035 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003036 if (ret < 0)
3037 goto bye;
3038 adap->params.vpd.cclk = val[0];
3039
3040 ret = adap_init1(adap, &c);
3041 if (ret < 0)
3042 goto bye;
3043
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003044#define FW_PARAM_PFVF(param) \
3045 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003046 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3047 FW_PARAMS_PARAM_Y(adap->fn))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003048
3049 params[0] = FW_PARAM_DEV(PORTVEC);
3050 params[1] = FW_PARAM_PFVF(L2T_START);
3051 params[2] = FW_PARAM_PFVF(L2T_END);
3052 params[3] = FW_PARAM_PFVF(FILTER_START);
3053 params[4] = FW_PARAM_PFVF(FILTER_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003054 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003055 if (ret < 0)
3056 goto bye;
3057 port_vec = val[0];
3058 adap->tids.ftid_base = val[3];
3059 adap->tids.nftids = val[4] - val[3] + 1;
3060
3061 if (c.ofldcaps) {
3062 /* query offload-related parameters */
3063 params[0] = FW_PARAM_DEV(NTID);
3064 params[1] = FW_PARAM_PFVF(SERVER_START);
3065 params[2] = FW_PARAM_PFVF(SERVER_END);
3066 params[3] = FW_PARAM_PFVF(TDDP_START);
3067 params[4] = FW_PARAM_PFVF(TDDP_END);
3068 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003069 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3070 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003071 if (ret < 0)
3072 goto bye;
3073 adap->tids.ntids = val[0];
3074 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3075 adap->tids.stid_base = val[1];
3076 adap->tids.nstids = val[2] - val[1] + 1;
3077 adap->vres.ddp.start = val[3];
3078 adap->vres.ddp.size = val[4] - val[3] + 1;
3079 adap->params.ofldq_wr_cred = val[5];
3080 adap->params.offload = 1;
3081 }
3082 if (c.rdmacaps) {
3083 params[0] = FW_PARAM_PFVF(STAG_START);
3084 params[1] = FW_PARAM_PFVF(STAG_END);
3085 params[2] = FW_PARAM_PFVF(RQ_START);
3086 params[3] = FW_PARAM_PFVF(RQ_END);
3087 params[4] = FW_PARAM_PFVF(PBL_START);
3088 params[5] = FW_PARAM_PFVF(PBL_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003089 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3090 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003091 if (ret < 0)
3092 goto bye;
3093 adap->vres.stag.start = val[0];
3094 adap->vres.stag.size = val[1] - val[0] + 1;
3095 adap->vres.rq.start = val[2];
3096 adap->vres.rq.size = val[3] - val[2] + 1;
3097 adap->vres.pbl.start = val[4];
3098 adap->vres.pbl.size = val[5] - val[4] + 1;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003099
3100 params[0] = FW_PARAM_PFVF(SQRQ_START);
3101 params[1] = FW_PARAM_PFVF(SQRQ_END);
3102 params[2] = FW_PARAM_PFVF(CQ_START);
3103 params[3] = FW_PARAM_PFVF(CQ_END);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003104 params[4] = FW_PARAM_PFVF(OCQ_START);
3105 params[5] = FW_PARAM_PFVF(OCQ_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003106 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3107 val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003108 if (ret < 0)
3109 goto bye;
3110 adap->vres.qp.start = val[0];
3111 adap->vres.qp.size = val[1] - val[0] + 1;
3112 adap->vres.cq.start = val[2];
3113 adap->vres.cq.size = val[3] - val[2] + 1;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003114 adap->vres.ocq.start = val[4];
3115 adap->vres.ocq.size = val[5] - val[4] + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003116 }
3117 if (c.iscsicaps) {
3118 params[0] = FW_PARAM_PFVF(ISCSI_START);
3119 params[1] = FW_PARAM_PFVF(ISCSI_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003120 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3121 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003122 if (ret < 0)
3123 goto bye;
3124 adap->vres.iscsi.start = val[0];
3125 adap->vres.iscsi.size = val[1] - val[0] + 1;
3126 }
3127#undef FW_PARAM_PFVF
3128#undef FW_PARAM_DEV
3129
3130 adap->params.nports = hweight32(port_vec);
3131 adap->params.portvec = port_vec;
3132 adap->flags |= FW_OK;
3133
3134 /* These are finalized by FW initialization, load their values now */
3135 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3136 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3137 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3138 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3139 adap->params.b_wnd);
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003140
3141#ifdef CONFIG_PCI_IOV
3142 /*
3143 * Provision resource limits for Virtual Functions. We currently
3144 * grant them all the same static resource limits except for the Port
3145 * Access Rights Mask which we're assigning based on the PF. All of
3146 * the static provisioning stuff for both the PF and VF really needs
3147 * to be managed in a persistent manner for each device which the
3148 * firmware controls.
3149 */
3150 {
3151 int pf, vf;
3152
3153 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3154 if (num_vf[pf] <= 0)
3155 continue;
3156
3157 /* VF numbering starts at 1! */
3158 for (vf = 1; vf <= num_vf[pf]; vf++) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003159 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003160 VFRES_NEQ, VFRES_NETHCTRL,
3161 VFRES_NIQFLINT, VFRES_NIQ,
3162 VFRES_TC, VFRES_NVI,
3163 FW_PFVF_CMD_CMASK_MASK,
3164 pfvfres_pmask(adap, pf, vf),
3165 VFRES_NEXACTF,
3166 VFRES_R_CAPS, VFRES_WX_CAPS);
3167 if (ret < 0)
3168 dev_warn(adap->pdev_dev, "failed to "
3169 "provision pf/vf=%d/%d; "
3170 "err=%d\n", pf, vf, ret);
3171 }
3172 }
3173 }
3174#endif
3175
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003176 setup_memwin(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003177 return 0;
3178
3179 /*
3180 * If a command timed out or failed with EIO FW does not operate within
3181 * its spec or something catastrophic happened to HW/FW, stop issuing
3182 * commands.
3183 */
3184bye: if (ret != -ETIMEDOUT && ret != -EIO)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003185 t4_fw_bye(adap, adap->fn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003186 return ret;
3187}
3188
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003189/* EEH callbacks */
3190
3191static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3192 pci_channel_state_t state)
3193{
3194 int i;
3195 struct adapter *adap = pci_get_drvdata(pdev);
3196
3197 if (!adap)
3198 goto out;
3199
3200 rtnl_lock();
3201 adap->flags &= ~FW_OK;
3202 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3203 for_each_port(adap, i) {
3204 struct net_device *dev = adap->port[i];
3205
3206 netif_device_detach(dev);
3207 netif_carrier_off(dev);
3208 }
3209 if (adap->flags & FULL_INIT_DONE)
3210 cxgb_down(adap);
3211 rtnl_unlock();
3212 pci_disable_device(pdev);
3213out: return state == pci_channel_io_perm_failure ?
3214 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3215}
3216
3217static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3218{
3219 int i, ret;
3220 struct fw_caps_config_cmd c;
3221 struct adapter *adap = pci_get_drvdata(pdev);
3222
3223 if (!adap) {
3224 pci_restore_state(pdev);
3225 pci_save_state(pdev);
3226 return PCI_ERS_RESULT_RECOVERED;
3227 }
3228
3229 if (pci_enable_device(pdev)) {
3230 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3231 return PCI_ERS_RESULT_DISCONNECT;
3232 }
3233
3234 pci_set_master(pdev);
3235 pci_restore_state(pdev);
3236 pci_save_state(pdev);
3237 pci_cleanup_aer_uncorrect_error_status(pdev);
3238
3239 if (t4_wait_dev_ready(adap) < 0)
3240 return PCI_ERS_RESULT_DISCONNECT;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003241 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003242 return PCI_ERS_RESULT_DISCONNECT;
3243 adap->flags |= FW_OK;
3244 if (adap_init1(adap, &c))
3245 return PCI_ERS_RESULT_DISCONNECT;
3246
3247 for_each_port(adap, i) {
3248 struct port_info *p = adap2pinfo(adap, i);
3249
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003250 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3251 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003252 if (ret < 0)
3253 return PCI_ERS_RESULT_DISCONNECT;
3254 p->viid = ret;
3255 p->xact_addr_filt = -1;
3256 }
3257
3258 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3259 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003260 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003261 if (cxgb_up(adap))
3262 return PCI_ERS_RESULT_DISCONNECT;
3263 return PCI_ERS_RESULT_RECOVERED;
3264}
3265
3266static void eeh_resume(struct pci_dev *pdev)
3267{
3268 int i;
3269 struct adapter *adap = pci_get_drvdata(pdev);
3270
3271 if (!adap)
3272 return;
3273
3274 rtnl_lock();
3275 for_each_port(adap, i) {
3276 struct net_device *dev = adap->port[i];
3277
3278 if (netif_running(dev)) {
3279 link_start(dev);
3280 cxgb_set_rxmode(dev);
3281 }
3282 netif_device_attach(dev);
3283 }
3284 rtnl_unlock();
3285}
3286
3287static struct pci_error_handlers cxgb4_eeh = {
3288 .error_detected = eeh_err_detected,
3289 .slot_reset = eeh_slot_reset,
3290 .resume = eeh_resume,
3291};
3292
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003293static inline bool is_10g_port(const struct link_config *lc)
3294{
3295 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3296}
3297
3298static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3299 unsigned int size, unsigned int iqe_size)
3300{
3301 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3302 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3303 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3304 q->iqe_len = iqe_size;
3305 q->size = size;
3306}
3307
3308/*
3309 * Perform default configuration of DMA queues depending on the number and type
3310 * of ports we found and the number of available CPUs. Most settings can be
3311 * modified by the admin prior to actual use.
3312 */
3313static void __devinit cfg_queues(struct adapter *adap)
3314{
3315 struct sge *s = &adap->sge;
3316 int i, q10g = 0, n10g = 0, qidx = 0;
3317
3318 for_each_port(adap, i)
3319 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3320
3321 /*
3322 * We default to 1 queue per non-10G port and up to # of cores queues
3323 * per 10G port.
3324 */
3325 if (n10g)
3326 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3327 if (q10g > num_online_cpus())
3328 q10g = num_online_cpus();
3329
3330 for_each_port(adap, i) {
3331 struct port_info *pi = adap2pinfo(adap, i);
3332
3333 pi->first_qset = qidx;
3334 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3335 qidx += pi->nqsets;
3336 }
3337
3338 s->ethqsets = qidx;
3339 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3340
3341 if (is_offload(adap)) {
3342 /*
3343 * For offload we use 1 queue/channel if all ports are up to 1G,
3344 * otherwise we divide all available queues amongst the channels
3345 * capped by the number of available cores.
3346 */
3347 if (n10g) {
3348 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3349 num_online_cpus());
3350 s->ofldqsets = roundup(i, adap->params.nports);
3351 } else
3352 s->ofldqsets = adap->params.nports;
3353 /* For RDMA one Rx queue per channel suffices */
3354 s->rdmaqs = adap->params.nports;
3355 }
3356
3357 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3358 struct sge_eth_rxq *r = &s->ethrxq[i];
3359
3360 init_rspq(&r->rspq, 0, 0, 1024, 64);
3361 r->fl.size = 72;
3362 }
3363
3364 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3365 s->ethtxq[i].q.size = 1024;
3366
3367 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3368 s->ctrlq[i].q.size = 512;
3369
3370 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3371 s->ofldtxq[i].q.size = 1024;
3372
3373 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3374 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3375
3376 init_rspq(&r->rspq, 0, 0, 1024, 64);
3377 r->rspq.uld = CXGB4_ULD_ISCSI;
3378 r->fl.size = 72;
3379 }
3380
3381 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3382 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3383
3384 init_rspq(&r->rspq, 0, 0, 511, 64);
3385 r->rspq.uld = CXGB4_ULD_RDMA;
3386 r->fl.size = 72;
3387 }
3388
3389 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3390 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3391}
3392
3393/*
3394 * Reduce the number of Ethernet queues across all ports to at most n.
3395 * n provides at least one queue per port.
3396 */
3397static void __devinit reduce_ethqs(struct adapter *adap, int n)
3398{
3399 int i;
3400 struct port_info *pi;
3401
3402 while (n < adap->sge.ethqsets)
3403 for_each_port(adap, i) {
3404 pi = adap2pinfo(adap, i);
3405 if (pi->nqsets > 1) {
3406 pi->nqsets--;
3407 adap->sge.ethqsets--;
3408 if (adap->sge.ethqsets <= n)
3409 break;
3410 }
3411 }
3412
3413 n = 0;
3414 for_each_port(adap, i) {
3415 pi = adap2pinfo(adap, i);
3416 pi->first_qset = n;
3417 n += pi->nqsets;
3418 }
3419}
3420
3421/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3422#define EXTRA_VECS 2
3423
3424static int __devinit enable_msix(struct adapter *adap)
3425{
3426 int ofld_need = 0;
3427 int i, err, want, need;
3428 struct sge *s = &adap->sge;
3429 unsigned int nchan = adap->params.nports;
3430 struct msix_entry entries[MAX_INGQ + 1];
3431
3432 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3433 entries[i].entry = i;
3434
3435 want = s->max_ethqsets + EXTRA_VECS;
3436 if (is_offload(adap)) {
3437 want += s->rdmaqs + s->ofldqsets;
3438 /* need nchan for each possible ULD */
3439 ofld_need = 2 * nchan;
3440 }
3441 need = adap->params.nports + EXTRA_VECS + ofld_need;
3442
3443 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3444 want = err;
3445
3446 if (!err) {
3447 /*
3448 * Distribute available vectors to the various queue groups.
3449 * Every group gets its minimum requirement and NIC gets top
3450 * priority for leftovers.
3451 */
3452 i = want - EXTRA_VECS - ofld_need;
3453 if (i < s->max_ethqsets) {
3454 s->max_ethqsets = i;
3455 if (i < s->ethqsets)
3456 reduce_ethqs(adap, i);
3457 }
3458 if (is_offload(adap)) {
3459 i = want - EXTRA_VECS - s->max_ethqsets;
3460 i -= ofld_need - nchan;
3461 s->ofldqsets = (i / nchan) * nchan; /* round down */
3462 }
3463 for (i = 0; i < want; ++i)
3464 adap->msix_info[i].vec = entries[i].vector;
3465 } else if (err > 0)
3466 dev_info(adap->pdev_dev,
3467 "only %d MSI-X vectors left, not using MSI-X\n", err);
3468 return err;
3469}
3470
3471#undef EXTRA_VECS
3472
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003473static int __devinit init_rss(struct adapter *adap)
3474{
3475 unsigned int i, j;
3476
3477 for_each_port(adap, i) {
3478 struct port_info *pi = adap2pinfo(adap, i);
3479
3480 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3481 if (!pi->rss)
3482 return -ENOMEM;
3483 for (j = 0; j < pi->rss_size; j++)
3484 pi->rss[j] = j % pi->nqsets;
3485 }
3486 return 0;
3487}
3488
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003489static void __devinit print_port_info(struct adapter *adap)
3490{
3491 static const char *base[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003492 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3493 "KX", "KR", "KR SFP+", "KR FEC"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003494 };
3495
3496 int i;
3497 char buf[80];
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003498 const char *spd = "";
3499
3500 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3501 spd = " 2.5 GT/s";
3502 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3503 spd = " 5 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003504
3505 for_each_port(adap, i) {
3506 struct net_device *dev = adap->port[i];
3507 const struct port_info *pi = netdev_priv(dev);
3508 char *bufp = buf;
3509
3510 if (!test_bit(i, &adap->registered_device_map))
3511 continue;
3512
3513 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3514 bufp += sprintf(bufp, "100/");
3515 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3516 bufp += sprintf(bufp, "1000/");
3517 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3518 bufp += sprintf(bufp, "10G/");
3519 if (bufp != buf)
3520 --bufp;
3521 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3522
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003523 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003524 adap->params.vpd.id, adap->params.rev,
3525 buf, is_offload(adap) ? "R" : "",
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003526 adap->params.pci.width, spd,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003527 (adap->flags & USING_MSIX) ? " MSI-X" :
3528 (adap->flags & USING_MSI) ? " MSI" : "");
3529 if (adap->name == dev->name)
3530 netdev_info(dev, "S/N: %s, E/C: %s\n",
3531 adap->params.vpd.sn, adap->params.vpd.ec);
3532 }
3533}
3534
Dimitris Michailidis06546392010-07-11 12:01:16 +00003535/*
3536 * Free the following resources:
3537 * - memory used for tables
3538 * - MSI/MSI-X
3539 * - net devices
3540 * - resources FW is holding for us
3541 */
3542static void free_some_resources(struct adapter *adapter)
3543{
3544 unsigned int i;
3545
3546 t4_free_mem(adapter->l2t);
3547 t4_free_mem(adapter->tids.tid_tab);
3548 disable_msi(adapter);
3549
3550 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003551 if (adapter->port[i]) {
3552 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00003553 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003554 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00003555 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003556 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00003557}
3558
Dimitris Michailidis35d35682010-08-02 13:19:20 +00003559#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003560 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3561
3562static int __devinit init_one(struct pci_dev *pdev,
3563 const struct pci_device_id *ent)
3564{
3565 int func, i, err;
3566 struct port_info *pi;
3567 unsigned int highdma = 0;
3568 struct adapter *adapter = NULL;
3569
3570 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3571
3572 err = pci_request_regions(pdev, KBUILD_MODNAME);
3573 if (err) {
3574 /* Just info, some other driver may have claimed the device. */
3575 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3576 return err;
3577 }
3578
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003579 /* We control everything through one PF */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003580 func = PCI_FUNC(pdev->devfn);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003581 if (func != ent->driver_data) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003582 pci_save_state(pdev); /* to restore SR-IOV later */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003583 goto sriov;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003584 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003585
3586 err = pci_enable_device(pdev);
3587 if (err) {
3588 dev_err(&pdev->dev, "cannot enable PCI device\n");
3589 goto out_release_regions;
3590 }
3591
3592 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3593 highdma = NETIF_F_HIGHDMA;
3594 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3595 if (err) {
3596 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3597 "coherent allocations\n");
3598 goto out_disable_device;
3599 }
3600 } else {
3601 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3602 if (err) {
3603 dev_err(&pdev->dev, "no usable DMA configuration\n");
3604 goto out_disable_device;
3605 }
3606 }
3607
3608 pci_enable_pcie_error_reporting(pdev);
3609 pci_set_master(pdev);
3610 pci_save_state(pdev);
3611
3612 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3613 if (!adapter) {
3614 err = -ENOMEM;
3615 goto out_disable_device;
3616 }
3617
3618 adapter->regs = pci_ioremap_bar(pdev, 0);
3619 if (!adapter->regs) {
3620 dev_err(&pdev->dev, "cannot map device registers\n");
3621 err = -ENOMEM;
3622 goto out_free_adapter;
3623 }
3624
3625 adapter->pdev = pdev;
3626 adapter->pdev_dev = &pdev->dev;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003627 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003628 adapter->name = pci_name(pdev);
3629 adapter->msg_enable = dflt_msg_enable;
3630 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3631
3632 spin_lock_init(&adapter->stats_lock);
3633 spin_lock_init(&adapter->tid_release_lock);
3634
3635 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3636
3637 err = t4_prep_adapter(adapter);
3638 if (err)
3639 goto out_unmap_bar;
3640 err = adap_init0(adapter);
3641 if (err)
3642 goto out_unmap_bar;
3643
3644 for_each_port(adapter, i) {
3645 struct net_device *netdev;
3646
3647 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3648 MAX_ETH_QSETS);
3649 if (!netdev) {
3650 err = -ENOMEM;
3651 goto out_free_dev;
3652 }
3653
3654 SET_NETDEV_DEV(netdev, &pdev->dev);
3655
3656 adapter->port[i] = netdev;
3657 pi = netdev_priv(netdev);
3658 pi->adapter = adapter;
3659 pi->xact_addr_filt = -1;
3660 pi->rx_offload = RX_CSO;
3661 pi->port_id = i;
3662 netif_carrier_off(netdev);
3663 netif_tx_stop_all_queues(netdev);
3664 netdev->irq = pdev->irq;
3665
Dimitris Michailidis35d35682010-08-02 13:19:20 +00003666 netdev->features |= NETIF_F_SG | TSO_FLAGS;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003667 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07003668 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003669 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3670 netdev->vlan_features = netdev->features & VLAN_FEAT;
3671
3672 netdev->netdev_ops = &cxgb4_netdev_ops;
3673 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3674 }
3675
3676 pci_set_drvdata(pdev, adapter);
3677
3678 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003679 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003680 if (err)
3681 goto out_free_dev;
3682 }
3683
3684 /*
3685 * Configure queues and allocate tables now, they can be needed as
3686 * soon as the first register_netdev completes.
3687 */
3688 cfg_queues(adapter);
3689
3690 adapter->l2t = t4_init_l2t();
3691 if (!adapter->l2t) {
3692 /* We tolerate a lack of L2T, giving up some functionality */
3693 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3694 adapter->params.offload = 0;
3695 }
3696
3697 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3698 dev_warn(&pdev->dev, "could not allocate TID table, "
3699 "continuing\n");
3700 adapter->params.offload = 0;
3701 }
3702
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00003703 /* See what interrupts we'll be using */
3704 if (msi > 1 && enable_msix(adapter) == 0)
3705 adapter->flags |= USING_MSIX;
3706 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3707 adapter->flags |= USING_MSI;
3708
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003709 err = init_rss(adapter);
3710 if (err)
3711 goto out_free_dev;
3712
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003713 /*
3714 * The card is now ready to go. If any errors occur during device
3715 * registration we do not fail the whole card but rather proceed only
3716 * with the ports we manage to register successfully. However we must
3717 * register at least one net device.
3718 */
3719 for_each_port(adapter, i) {
3720 err = register_netdev(adapter->port[i]);
3721 if (err)
3722 dev_warn(&pdev->dev,
3723 "cannot register net device %s, skipping\n",
3724 adapter->port[i]->name);
3725 else {
3726 /*
3727 * Change the name we use for messages to the name of
3728 * the first successfully registered interface.
3729 */
3730 if (!adapter->registered_device_map)
3731 adapter->name = adapter->port[i]->name;
3732
3733 __set_bit(i, &adapter->registered_device_map);
3734 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3735 }
3736 }
3737 if (!adapter->registered_device_map) {
3738 dev_err(&pdev->dev, "could not register any net devices\n");
3739 goto out_free_dev;
3740 }
3741
3742 if (cxgb4_debugfs_root) {
3743 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3744 cxgb4_debugfs_root);
3745 setup_debugfs(adapter);
3746 }
3747
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003748 if (is_offload(adapter))
3749 attach_ulds(adapter);
3750
3751 print_port_info(adapter);
3752
3753sriov:
3754#ifdef CONFIG_PCI_IOV
3755 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3756 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3757 dev_info(&pdev->dev,
3758 "instantiated %u virtual functions\n",
3759 num_vf[func]);
3760#endif
3761 return 0;
3762
3763 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00003764 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003765 out_unmap_bar:
3766 iounmap(adapter->regs);
3767 out_free_adapter:
3768 kfree(adapter);
3769 out_disable_device:
3770 pci_disable_pcie_error_reporting(pdev);
3771 pci_disable_device(pdev);
3772 out_release_regions:
3773 pci_release_regions(pdev);
3774 pci_set_drvdata(pdev, NULL);
3775 return err;
3776}
3777
3778static void __devexit remove_one(struct pci_dev *pdev)
3779{
3780 struct adapter *adapter = pci_get_drvdata(pdev);
3781
3782 pci_disable_sriov(pdev);
3783
3784 if (adapter) {
3785 int i;
3786
3787 if (is_offload(adapter))
3788 detach_ulds(adapter);
3789
3790 for_each_port(adapter, i)
3791 if (test_bit(i, &adapter->registered_device_map))
3792 unregister_netdev(adapter->port[i]);
3793
3794 if (adapter->debugfs_root)
3795 debugfs_remove_recursive(adapter->debugfs_root);
3796
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003797 if (adapter->flags & FULL_INIT_DONE)
3798 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003799
Dimitris Michailidis06546392010-07-11 12:01:16 +00003800 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003801 iounmap(adapter->regs);
3802 kfree(adapter);
3803 pci_disable_pcie_error_reporting(pdev);
3804 pci_disable_device(pdev);
3805 pci_release_regions(pdev);
3806 pci_set_drvdata(pdev, NULL);
3807 } else if (PCI_FUNC(pdev->devfn) > 0)
3808 pci_release_regions(pdev);
3809}
3810
3811static struct pci_driver cxgb4_driver = {
3812 .name = KBUILD_MODNAME,
3813 .id_table = cxgb4_pci_tbl,
3814 .probe = init_one,
3815 .remove = __devexit_p(remove_one),
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003816 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003817};
3818
3819static int __init cxgb4_init_module(void)
3820{
3821 int ret;
3822
3823 /* Debugfs support is optional, just warn if this fails */
3824 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3825 if (!cxgb4_debugfs_root)
3826 pr_warning("could not create debugfs entry, continuing\n");
3827
3828 ret = pci_register_driver(&cxgb4_driver);
3829 if (ret < 0)
3830 debugfs_remove(cxgb4_debugfs_root);
3831 return ret;
3832}
3833
3834static void __exit cxgb4_cleanup_module(void)
3835{
3836 pci_unregister_driver(&cxgb4_driver);
3837 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3838}
3839
3840module_init(cxgb4_init_module);
3841module_exit(cxgb4_cleanup_module);