blob: c02b4d3b73fceedb3f0841b3f2dfa2c3b7747a45 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
Dimitris Michailidis99e6d062010-08-02 13:19:24 +000070#define DRV_VERSION "1.3.0-ko"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000071#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
Casey Leedom7ee9ff92010-06-25 12:11:46 +000080#ifdef CONFIG_PCI_IOV
81/*
82 * Virtual Function provisioning constants. We need two extra Ingress Queues
83 * with Interrupt capability to serve as the VF's Firmware Event Queue and
84 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
85 * Lists associated with them). For each Ethernet/Control Egress Queue and
86 * for each Free List, we need an Egress Context.
87 */
88enum {
89 VFRES_NPORTS = 1, /* # of "ports" per VF */
90 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
91
92 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
93 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
94 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
95 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
96 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
97 VFRES_TC = 0, /* PCI-E traffic class */
98 VFRES_NEXACTF = 16, /* # of exact MPS filters */
99
100 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
101 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
102};
103
104/*
105 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
106 * static and likely not to be useful in the long run. We really need to
107 * implement some form of persistent configuration which the firmware
108 * controls.
109 */
110static unsigned int pfvfres_pmask(struct adapter *adapter,
111 unsigned int pf, unsigned int vf)
112{
113 unsigned int portn, portvec;
114
115 /*
116 * Give PF's access to all of the ports.
117 */
118 if (vf == 0)
119 return FW_PFVF_CMD_PMASK_MASK;
120
121 /*
122 * For VFs, we'll assign them access to the ports based purely on the
123 * PF. We assign active ports in order, wrapping around if there are
124 * fewer active ports than PFs: e.g. active port[pf % nports].
125 * Unfortunately the adapter's port_info structs haven't been
126 * initialized yet so we have to compute this.
127 */
128 if (adapter->params.nports == 0)
129 return 0;
130
131 portn = pf % adapter->params.nports;
132 portvec = adapter->params.portvec;
133 for (;;) {
134 /*
135 * Isolate the lowest set bit in the port vector. If we're at
136 * the port number that we want, return that as the pmask.
137 * otherwise mask that bit out of the port vector and
138 * decrement our port number ...
139 */
140 unsigned int pmask = portvec ^ (portvec & (portvec-1));
141 if (portn == 0)
142 return pmask;
143 portn--;
144 portvec &= ~pmask;
145 }
146 /*NOTREACHED*/
147}
148#endif
149
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000150enum {
151 MEMWIN0_APERTURE = 65536,
152 MEMWIN0_BASE = 0x30000,
153 MEMWIN1_APERTURE = 32768,
154 MEMWIN1_BASE = 0x28000,
155 MEMWIN2_APERTURE = 2048,
156 MEMWIN2_BASE = 0x1b800,
157};
158
159enum {
160 MAX_TXQ_ENTRIES = 16384,
161 MAX_CTRL_TXQ_ENTRIES = 1024,
162 MAX_RSPQ_ENTRIES = 16384,
163 MAX_RX_BUFFERS = 16384,
164 MIN_TXQ_ENTRIES = 32,
165 MIN_CTRL_TXQ_ENTRIES = 32,
166 MIN_RSPQ_ENTRIES = 128,
167 MIN_FL_ENTRIES = 16
168};
169
170#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
171 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
172 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
173
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000174#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000177 CH_DEVICE(0xa000, 0), /* PE10K */
Dimitris Michailidisccea7902010-08-23 17:21:01 +0000178 CH_DEVICE(0x4001, -1),
179 CH_DEVICE(0x4002, -1),
180 CH_DEVICE(0x4003, -1),
181 CH_DEVICE(0x4004, -1),
182 CH_DEVICE(0x4005, -1),
183 CH_DEVICE(0x4006, -1),
184 CH_DEVICE(0x4007, -1),
185 CH_DEVICE(0x4008, -1),
186 CH_DEVICE(0x4009, -1),
187 CH_DEVICE(0x400a, -1),
188 CH_DEVICE(0x4401, 4),
189 CH_DEVICE(0x4402, 4),
190 CH_DEVICE(0x4403, 4),
191 CH_DEVICE(0x4404, 4),
192 CH_DEVICE(0x4405, 4),
193 CH_DEVICE(0x4406, 4),
194 CH_DEVICE(0x4407, 4),
195 CH_DEVICE(0x4408, 4),
196 CH_DEVICE(0x4409, 4),
197 CH_DEVICE(0x440a, 4),
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000198 { 0, }
199};
200
201#define FW_FNAME "cxgb4/t4fw.bin"
202
203MODULE_DESCRIPTION(DRV_DESC);
204MODULE_AUTHOR("Chelsio Communications");
205MODULE_LICENSE("Dual BSD/GPL");
206MODULE_VERSION(DRV_VERSION);
207MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
208MODULE_FIRMWARE(FW_FNAME);
209
210static int dflt_msg_enable = DFLT_MSG_ENABLE;
211
212module_param(dflt_msg_enable, int, 0644);
213MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
214
215/*
216 * The driver uses the best interrupt scheme available on a platform in the
217 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
218 * of these schemes the driver may consider as follows:
219 *
220 * msi = 2: choose from among all three options
221 * msi = 1: only consider MSI and INTx interrupts
222 * msi = 0: force INTx interrupts
223 */
224static int msi = 2;
225
226module_param(msi, int, 0644);
227MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
228
229/*
230 * Queue interrupt hold-off timer values. Queues default to the first of these
231 * upon creation.
232 */
233static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
234
235module_param_array(intr_holdoff, uint, NULL, 0644);
236MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
237 "0..4 in microseconds");
238
239static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
240
241module_param_array(intr_cnt, uint, NULL, 0644);
242MODULE_PARM_DESC(intr_cnt,
243 "thresholds 1..3 for queue interrupt packet counters");
244
245static int vf_acls;
246
247#ifdef CONFIG_PCI_IOV
248module_param(vf_acls, bool, 0644);
249MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
250
251static unsigned int num_vf[4];
252
253module_param_array(num_vf, uint, NULL, 0644);
254MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
255#endif
256
257static struct dentry *cxgb4_debugfs_root;
258
259static LIST_HEAD(adapter_list);
260static DEFINE_MUTEX(uld_mutex);
261static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
262static const char *uld_str[] = { "RDMA", "iSCSI" };
263
264static void link_report(struct net_device *dev)
265{
266 if (!netif_carrier_ok(dev))
267 netdev_info(dev, "link down\n");
268 else {
269 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
270
271 const char *s = "10Mbps";
272 const struct port_info *p = netdev_priv(dev);
273
274 switch (p->link_cfg.speed) {
275 case SPEED_10000:
276 s = "10Gbps";
277 break;
278 case SPEED_1000:
279 s = "1000Mbps";
280 break;
281 case SPEED_100:
282 s = "100Mbps";
283 break;
284 }
285
286 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
287 fc[p->link_cfg.fc]);
288 }
289}
290
291void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
292{
293 struct net_device *dev = adapter->port[port_id];
294
295 /* Skip changes from disabled ports. */
296 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
297 if (link_stat)
298 netif_carrier_on(dev);
299 else
300 netif_carrier_off(dev);
301
302 link_report(dev);
303 }
304}
305
306void t4_os_portmod_changed(const struct adapter *adap, int port_id)
307{
308 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000309 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000310 };
311
312 const struct net_device *dev = adap->port[port_id];
313 const struct port_info *pi = netdev_priv(dev);
314
315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000317 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319}
320
321/*
322 * Configure the exact and hash address filters to handle a port's multicast
323 * and secondary unicast MAC addresses.
324 */
325static int set_addr_filters(const struct net_device *dev, bool sleep)
326{
327 u64 mhash = 0;
328 u64 uhash = 0;
329 bool free = true;
330 u16 filt_idx[7];
331 const u8 *addr[7];
332 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000333 const struct netdev_hw_addr *ha;
334 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700335 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000336 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000337 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000338
339 /* first do the secondary unicast addresses */
340 netdev_for_each_uc_addr(ha, dev) {
341 addr[naddr++] = ha->addr;
342 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000343 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000344 naddr, addr, filt_idx, &uhash, sleep);
345 if (ret < 0)
346 return ret;
347
348 free = false;
349 naddr = 0;
350 }
351 }
352
353 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700354 netdev_for_each_mc_addr(ha, dev) {
355 addr[naddr++] = ha->addr;
356 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000357 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000358 naddr, addr, filt_idx, &mhash, sleep);
359 if (ret < 0)
360 return ret;
361
362 free = false;
363 naddr = 0;
364 }
365 }
366
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000367 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000368 uhash | mhash, sleep);
369}
370
371/*
372 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
373 * If @mtu is -1 it is left unchanged.
374 */
375static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
376{
377 int ret;
378 struct port_info *pi = netdev_priv(dev);
379
380 ret = set_addr_filters(dev, sleep_ok);
381 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000382 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000383 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000384 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000385 sleep_ok);
386 return ret;
387}
388
389/**
390 * link_start - enable a port
391 * @dev: the port to enable
392 *
393 * Performs the MAC and PHY actions needed to enable a port.
394 */
395static int link_start(struct net_device *dev)
396{
397 int ret;
398 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000399 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000400
401 /*
402 * We do not set address filters and promiscuity here, the stack does
403 * that step explicitly.
404 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000405 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Dimitris Michailidis19ecae22010-10-21 11:29:56 +0000406 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000407 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000408 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000409 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000410 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000411 if (ret >= 0) {
412 pi->xact_addr_filt = ret;
413 ret = 0;
414 }
415 }
416 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000417 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
418 &pi->link_cfg);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000419 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000420 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000421 return ret;
422}
423
424/*
425 * Response queue handler for the FW event queue.
426 */
427static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
428 const struct pkt_gl *gl)
429{
430 u8 opcode = ((const struct rss_header *)rsp)->opcode;
431
432 rsp++; /* skip RSS header */
433 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
434 const struct cpl_sge_egr_update *p = (void *)rsp;
435 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000436 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000437
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000438 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000439 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000440 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000441 struct sge_eth_txq *eq;
442
443 eq = container_of(txq, struct sge_eth_txq, q);
444 netif_tx_wake_queue(eq->txq);
445 } else {
446 struct sge_ofld_txq *oq;
447
448 oq = container_of(txq, struct sge_ofld_txq, q);
449 tasklet_schedule(&oq->qresume_tsk);
450 }
451 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
452 const struct cpl_fw6_msg *p = (void *)rsp;
453
454 if (p->type == 0)
455 t4_handle_fw_rpl(q->adap, p->data);
456 } else if (opcode == CPL_L2T_WRITE_RPL) {
457 const struct cpl_l2t_write_rpl *p = (void *)rsp;
458
459 do_l2t_write_rpl(q->adap, p);
460 } else
461 dev_err(q->adap->pdev_dev,
462 "unexpected CPL %#x on FW event queue\n", opcode);
463 return 0;
464}
465
466/**
467 * uldrx_handler - response queue handler for ULD queues
468 * @q: the response queue that received the packet
469 * @rsp: the response queue descriptor holding the offload message
470 * @gl: the gather list of packet fragments
471 *
472 * Deliver an ingress offload packet to a ULD. All processing is done by
473 * the ULD, we just maintain statistics.
474 */
475static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
476 const struct pkt_gl *gl)
477{
478 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
479
480 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
481 rxq->stats.nomem++;
482 return -1;
483 }
484 if (gl == NULL)
485 rxq->stats.imm++;
486 else if (gl == CXGB4_MSG_AN)
487 rxq->stats.an++;
488 else
489 rxq->stats.pkts++;
490 return 0;
491}
492
493static void disable_msi(struct adapter *adapter)
494{
495 if (adapter->flags & USING_MSIX) {
496 pci_disable_msix(adapter->pdev);
497 adapter->flags &= ~USING_MSIX;
498 } else if (adapter->flags & USING_MSI) {
499 pci_disable_msi(adapter->pdev);
500 adapter->flags &= ~USING_MSI;
501 }
502}
503
504/*
505 * Interrupt handler for non-data events used with MSI-X.
506 */
507static irqreturn_t t4_nondata_intr(int irq, void *cookie)
508{
509 struct adapter *adap = cookie;
510
511 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
512 if (v & PFSW) {
513 adap->swintr = 1;
514 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
515 }
516 t4_slow_intr_handler(adap);
517 return IRQ_HANDLED;
518}
519
520/*
521 * Name the MSI-X interrupts.
522 */
523static void name_msix_vecs(struct adapter *adap)
524{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000525 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000526
527 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000528 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000529
530 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000531 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
532 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000533
534 /* Ethernet queues */
535 for_each_port(adap, j) {
536 struct net_device *d = adap->port[j];
537 const struct port_info *pi = netdev_priv(d);
538
Dimitris Michailidisba278162010-12-14 21:36:50 +0000539 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000540 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
541 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000542 }
543
544 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000545 for_each_ofldrxq(&adap->sge, i)
546 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000547 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000548
549 for_each_rdmarxq(&adap->sge, i)
550 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000551 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000552}
553
554static int request_msix_queue_irqs(struct adapter *adap)
555{
556 struct sge *s = &adap->sge;
557 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
558
559 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
560 adap->msix_info[1].desc, &s->fw_evtq);
561 if (err)
562 return err;
563
564 for_each_ethrxq(s, ethqidx) {
565 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
566 adap->msix_info[msi].desc,
567 &s->ethrxq[ethqidx].rspq);
568 if (err)
569 goto unwind;
570 msi++;
571 }
572 for_each_ofldrxq(s, ofldqidx) {
573 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
574 adap->msix_info[msi].desc,
575 &s->ofldrxq[ofldqidx].rspq);
576 if (err)
577 goto unwind;
578 msi++;
579 }
580 for_each_rdmarxq(s, rdmaqidx) {
581 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
582 adap->msix_info[msi].desc,
583 &s->rdmarxq[rdmaqidx].rspq);
584 if (err)
585 goto unwind;
586 msi++;
587 }
588 return 0;
589
590unwind:
591 while (--rdmaqidx >= 0)
592 free_irq(adap->msix_info[--msi].vec,
593 &s->rdmarxq[rdmaqidx].rspq);
594 while (--ofldqidx >= 0)
595 free_irq(adap->msix_info[--msi].vec,
596 &s->ofldrxq[ofldqidx].rspq);
597 while (--ethqidx >= 0)
598 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
599 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
600 return err;
601}
602
603static void free_msix_queue_irqs(struct adapter *adap)
604{
605 int i, msi = 2;
606 struct sge *s = &adap->sge;
607
608 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
609 for_each_ethrxq(s, i)
610 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
611 for_each_ofldrxq(s, i)
612 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
613 for_each_rdmarxq(s, i)
614 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
615}
616
617/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000618 * write_rss - write the RSS table for a given port
619 * @pi: the port
620 * @queues: array of queue indices for RSS
621 *
622 * Sets up the portion of the HW RSS table for the port's VI to distribute
623 * packets to the Rx queues in @queues.
624 */
625static int write_rss(const struct port_info *pi, const u16 *queues)
626{
627 u16 *rss;
628 int i, err;
629 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
630
631 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
632 if (!rss)
633 return -ENOMEM;
634
635 /* map the queue indices to queue ids */
636 for (i = 0; i < pi->rss_size; i++, queues++)
637 rss[i] = q[*queues].rspq.abs_id;
638
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000639 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
640 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000641 kfree(rss);
642 return err;
643}
644
645/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000646 * setup_rss - configure RSS
647 * @adap: the adapter
648 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000649 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000650 */
651static int setup_rss(struct adapter *adap)
652{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000653 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000654
655 for_each_port(adap, i) {
656 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000657
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000658 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000659 if (err)
660 return err;
661 }
662 return 0;
663}
664
665/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000666 * Return the channel of the ingress queue with the given qid.
667 */
668static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
669{
670 qid -= p->ingr_start;
671 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
672}
673
674/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000675 * Wait until all NAPI handlers are descheduled.
676 */
677static void quiesce_rx(struct adapter *adap)
678{
679 int i;
680
681 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
682 struct sge_rspq *q = adap->sge.ingr_map[i];
683
684 if (q && q->handler)
685 napi_disable(&q->napi);
686 }
687}
688
689/*
690 * Enable NAPI scheduling and interrupt generation for all Rx queues.
691 */
692static void enable_rx(struct adapter *adap)
693{
694 int i;
695
696 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
697 struct sge_rspq *q = adap->sge.ingr_map[i];
698
699 if (!q)
700 continue;
701 if (q->handler)
702 napi_enable(&q->napi);
703 /* 0-increment GTS to start the timer and enable interrupts */
704 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
705 SEINTARM(q->intr_params) |
706 INGRESSQID(q->cntxt_id));
707 }
708}
709
710/**
711 * setup_sge_queues - configure SGE Tx/Rx/response queues
712 * @adap: the adapter
713 *
714 * Determines how many sets of SGE queues to use and initializes them.
715 * We support multiple queue sets per port if we have MSI-X, otherwise
716 * just one queue set per port.
717 */
718static int setup_sge_queues(struct adapter *adap)
719{
720 int err, msi_idx, i, j;
721 struct sge *s = &adap->sge;
722
723 bitmap_zero(s->starving_fl, MAX_EGRQ);
724 bitmap_zero(s->txq_maperr, MAX_EGRQ);
725
726 if (adap->flags & USING_MSIX)
727 msi_idx = 1; /* vector 0 is for non-queue interrupts */
728 else {
729 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
730 NULL, NULL);
731 if (err)
732 return err;
733 msi_idx = -((int)s->intrq.abs_id + 1);
734 }
735
736 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
737 msi_idx, NULL, fwevtq_handler);
738 if (err) {
739freeout: t4_free_sge_resources(adap);
740 return err;
741 }
742
743 for_each_port(adap, i) {
744 struct net_device *dev = adap->port[i];
745 struct port_info *pi = netdev_priv(dev);
746 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
747 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
748
749 for (j = 0; j < pi->nqsets; j++, q++) {
750 if (msi_idx > 0)
751 msi_idx++;
752 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
753 msi_idx, &q->fl,
754 t4_ethrx_handler);
755 if (err)
756 goto freeout;
757 q->rspq.idx = j;
758 memset(&q->stats, 0, sizeof(q->stats));
759 }
760 for (j = 0; j < pi->nqsets; j++, t++) {
761 err = t4_sge_alloc_eth_txq(adap, t, dev,
762 netdev_get_tx_queue(dev, j),
763 s->fw_evtq.cntxt_id);
764 if (err)
765 goto freeout;
766 }
767 }
768
769 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
770 for_each_ofldrxq(s, i) {
771 struct sge_ofld_rxq *q = &s->ofldrxq[i];
772 struct net_device *dev = adap->port[i / j];
773
774 if (msi_idx > 0)
775 msi_idx++;
776 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
777 &q->fl, uldrx_handler);
778 if (err)
779 goto freeout;
780 memset(&q->stats, 0, sizeof(q->stats));
781 s->ofld_rxq[i] = q->rspq.abs_id;
782 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
783 s->fw_evtq.cntxt_id);
784 if (err)
785 goto freeout;
786 }
787
788 for_each_rdmarxq(s, i) {
789 struct sge_ofld_rxq *q = &s->rdmarxq[i];
790
791 if (msi_idx > 0)
792 msi_idx++;
793 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
794 msi_idx, &q->fl, uldrx_handler);
795 if (err)
796 goto freeout;
797 memset(&q->stats, 0, sizeof(q->stats));
798 s->rdma_rxq[i] = q->rspq.abs_id;
799 }
800
801 for_each_port(adap, i) {
802 /*
803 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
804 * have RDMA queues, and that's the right value.
805 */
806 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
807 s->fw_evtq.cntxt_id,
808 s->rdmarxq[i].rspq.cntxt_id);
809 if (err)
810 goto freeout;
811 }
812
813 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
814 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
815 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
816 return 0;
817}
818
819/*
820 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
821 * started but failed, and a negative errno if flash load couldn't start.
822 */
823static int upgrade_fw(struct adapter *adap)
824{
825 int ret;
826 u32 vers;
827 const struct fw_hdr *hdr;
828 const struct firmware *fw;
829 struct device *dev = adap->pdev_dev;
830
831 ret = request_firmware(&fw, FW_FNAME, dev);
832 if (ret < 0) {
833 dev_err(dev, "unable to load firmware image " FW_FNAME
834 ", error %d\n", ret);
835 return ret;
836 }
837
838 hdr = (const struct fw_hdr *)fw->data;
839 vers = ntohl(hdr->fw_ver);
840 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
841 ret = -EINVAL; /* wrong major version, won't do */
842 goto out;
843 }
844
845 /*
846 * If the flash FW is unusable or we found something newer, load it.
847 */
848 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
849 vers > adap->params.fw_vers) {
850 ret = -t4_load_fw(adap, fw->data, fw->size);
851 if (!ret)
852 dev_info(dev, "firmware upgraded to version %pI4 from "
853 FW_FNAME "\n", &hdr->fw_ver);
854 }
855out: release_firmware(fw);
856 return ret;
857}
858
859/*
860 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
861 * The allocated memory is cleared.
862 */
863void *t4_alloc_mem(size_t size)
864{
Eric Dumazet89bf67f2010-11-22 00:15:06 +0000865 void *p = kzalloc(size, GFP_KERNEL);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000866
867 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +0000868 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000869 return p;
870}
871
872/*
873 * Free memory allocated through alloc_mem().
874 */
stephen hemminger31b9c192010-10-18 05:39:18 +0000875static void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000876{
877 if (is_vmalloc_addr(addr))
878 vfree(addr);
879 else
880 kfree(addr);
881}
882
883static inline int is_offload(const struct adapter *adap)
884{
885 return adap->params.offload;
886}
887
888/*
889 * Implementation of ethtool operations.
890 */
891
892static u32 get_msglevel(struct net_device *dev)
893{
894 return netdev2adap(dev)->msg_enable;
895}
896
897static void set_msglevel(struct net_device *dev, u32 val)
898{
899 netdev2adap(dev)->msg_enable = val;
900}
901
902static char stats_strings[][ETH_GSTRING_LEN] = {
903 "TxOctetsOK ",
904 "TxFramesOK ",
905 "TxBroadcastFrames ",
906 "TxMulticastFrames ",
907 "TxUnicastFrames ",
908 "TxErrorFrames ",
909
910 "TxFrames64 ",
911 "TxFrames65To127 ",
912 "TxFrames128To255 ",
913 "TxFrames256To511 ",
914 "TxFrames512To1023 ",
915 "TxFrames1024To1518 ",
916 "TxFrames1519ToMax ",
917
918 "TxFramesDropped ",
919 "TxPauseFrames ",
920 "TxPPP0Frames ",
921 "TxPPP1Frames ",
922 "TxPPP2Frames ",
923 "TxPPP3Frames ",
924 "TxPPP4Frames ",
925 "TxPPP5Frames ",
926 "TxPPP6Frames ",
927 "TxPPP7Frames ",
928
929 "RxOctetsOK ",
930 "RxFramesOK ",
931 "RxBroadcastFrames ",
932 "RxMulticastFrames ",
933 "RxUnicastFrames ",
934
935 "RxFramesTooLong ",
936 "RxJabberErrors ",
937 "RxFCSErrors ",
938 "RxLengthErrors ",
939 "RxSymbolErrors ",
940 "RxRuntFrames ",
941
942 "RxFrames64 ",
943 "RxFrames65To127 ",
944 "RxFrames128To255 ",
945 "RxFrames256To511 ",
946 "RxFrames512To1023 ",
947 "RxFrames1024To1518 ",
948 "RxFrames1519ToMax ",
949
950 "RxPauseFrames ",
951 "RxPPP0Frames ",
952 "RxPPP1Frames ",
953 "RxPPP2Frames ",
954 "RxPPP3Frames ",
955 "RxPPP4Frames ",
956 "RxPPP5Frames ",
957 "RxPPP6Frames ",
958 "RxPPP7Frames ",
959
960 "RxBG0FramesDropped ",
961 "RxBG1FramesDropped ",
962 "RxBG2FramesDropped ",
963 "RxBG3FramesDropped ",
964 "RxBG0FramesTrunc ",
965 "RxBG1FramesTrunc ",
966 "RxBG2FramesTrunc ",
967 "RxBG3FramesTrunc ",
968
969 "TSO ",
970 "TxCsumOffload ",
971 "RxCsumGood ",
972 "VLANextractions ",
973 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000974 "GROpackets ",
975 "GROmerged ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000976};
977
978static int get_sset_count(struct net_device *dev, int sset)
979{
980 switch (sset) {
981 case ETH_SS_STATS:
982 return ARRAY_SIZE(stats_strings);
983 default:
984 return -EOPNOTSUPP;
985 }
986}
987
988#define T4_REGMAP_SIZE (160 * 1024)
989
990static int get_regs_len(struct net_device *dev)
991{
992 return T4_REGMAP_SIZE;
993}
994
995static int get_eeprom_len(struct net_device *dev)
996{
997 return EEPROMSIZE;
998}
999
1000static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1001{
1002 struct adapter *adapter = netdev2adap(dev);
1003
1004 strcpy(info->driver, KBUILD_MODNAME);
1005 strcpy(info->version, DRV_VERSION);
1006 strcpy(info->bus_info, pci_name(adapter->pdev));
1007
1008 if (!adapter->params.fw_vers)
1009 strcpy(info->fw_version, "N/A");
1010 else
1011 snprintf(info->fw_version, sizeof(info->fw_version),
1012 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1013 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1014 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1015 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1016 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1017 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1018 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1019 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1020 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1021}
1022
1023static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1024{
1025 if (stringset == ETH_SS_STATS)
1026 memcpy(data, stats_strings, sizeof(stats_strings));
1027}
1028
1029/*
1030 * port stats maintained per queue of the port. They should be in the same
1031 * order as in stats_strings above.
1032 */
1033struct queue_port_stats {
1034 u64 tso;
1035 u64 tx_csum;
1036 u64 rx_csum;
1037 u64 vlan_ex;
1038 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001039 u64 gro_pkts;
1040 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001041};
1042
1043static void collect_sge_port_stats(const struct adapter *adap,
1044 const struct port_info *p, struct queue_port_stats *s)
1045{
1046 int i;
1047 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1048 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1049
1050 memset(s, 0, sizeof(*s));
1051 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1052 s->tso += tx->tso;
1053 s->tx_csum += tx->tx_cso;
1054 s->rx_csum += rx->stats.rx_cso;
1055 s->vlan_ex += rx->stats.vlan_ex;
1056 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001057 s->gro_pkts += rx->stats.lro_pkts;
1058 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001059 }
1060}
1061
1062static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1063 u64 *data)
1064{
1065 struct port_info *pi = netdev_priv(dev);
1066 struct adapter *adapter = pi->adapter;
1067
1068 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1069
1070 data += sizeof(struct port_stats) / sizeof(u64);
1071 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1072}
1073
1074/*
1075 * Return a version number to identify the type of adapter. The scheme is:
1076 * - bits 0..9: chip version
1077 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001078 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001079 */
1080static inline unsigned int mk_adap_vers(const struct adapter *ap)
1081{
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001082 return 4 | (ap->params.rev << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001083}
1084
1085static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1086 unsigned int end)
1087{
1088 u32 *p = buf + start;
1089
1090 for ( ; start <= end; start += sizeof(u32))
1091 *p++ = t4_read_reg(ap, start);
1092}
1093
1094static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1095 void *buf)
1096{
1097 static const unsigned int reg_ranges[] = {
1098 0x1008, 0x1108,
1099 0x1180, 0x11b4,
1100 0x11fc, 0x123c,
1101 0x1300, 0x173c,
1102 0x1800, 0x18fc,
1103 0x3000, 0x30d8,
1104 0x30e0, 0x5924,
1105 0x5960, 0x59d4,
1106 0x5a00, 0x5af8,
1107 0x6000, 0x6098,
1108 0x6100, 0x6150,
1109 0x6200, 0x6208,
1110 0x6240, 0x6248,
1111 0x6280, 0x6338,
1112 0x6370, 0x638c,
1113 0x6400, 0x643c,
1114 0x6500, 0x6524,
1115 0x6a00, 0x6a38,
1116 0x6a60, 0x6a78,
1117 0x6b00, 0x6b84,
1118 0x6bf0, 0x6c84,
1119 0x6cf0, 0x6d84,
1120 0x6df0, 0x6e84,
1121 0x6ef0, 0x6f84,
1122 0x6ff0, 0x7084,
1123 0x70f0, 0x7184,
1124 0x71f0, 0x7284,
1125 0x72f0, 0x7384,
1126 0x73f0, 0x7450,
1127 0x7500, 0x7530,
1128 0x7600, 0x761c,
1129 0x7680, 0x76cc,
1130 0x7700, 0x7798,
1131 0x77c0, 0x77fc,
1132 0x7900, 0x79fc,
1133 0x7b00, 0x7c38,
1134 0x7d00, 0x7efc,
1135 0x8dc0, 0x8e1c,
1136 0x8e30, 0x8e78,
1137 0x8ea0, 0x8f6c,
1138 0x8fc0, 0x9074,
1139 0x90fc, 0x90fc,
1140 0x9400, 0x9458,
1141 0x9600, 0x96bc,
1142 0x9800, 0x9808,
1143 0x9820, 0x983c,
1144 0x9850, 0x9864,
1145 0x9c00, 0x9c6c,
1146 0x9c80, 0x9cec,
1147 0x9d00, 0x9d6c,
1148 0x9d80, 0x9dec,
1149 0x9e00, 0x9e6c,
1150 0x9e80, 0x9eec,
1151 0x9f00, 0x9f6c,
1152 0x9f80, 0x9fec,
1153 0xd004, 0xd03c,
1154 0xdfc0, 0xdfe0,
1155 0xe000, 0xea7c,
1156 0xf000, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001157 0x19040, 0x1906c,
1158 0x19078, 0x19080,
1159 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001160 0x19150, 0x191b0,
1161 0x191d0, 0x191e8,
1162 0x19238, 0x1924c,
1163 0x193f8, 0x19474,
1164 0x19490, 0x194f8,
1165 0x19800, 0x19f30,
1166 0x1a000, 0x1a06c,
1167 0x1a0b0, 0x1a120,
1168 0x1a128, 0x1a138,
1169 0x1a190, 0x1a1c4,
1170 0x1a1fc, 0x1a1fc,
1171 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001172 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001173 0x1e2c0, 0x1e2c0,
1174 0x1e2e0, 0x1e2e0,
1175 0x1e300, 0x1e384,
1176 0x1e3c0, 0x1e3c8,
1177 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001178 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001179 0x1e6c0, 0x1e6c0,
1180 0x1e6e0, 0x1e6e0,
1181 0x1e700, 0x1e784,
1182 0x1e7c0, 0x1e7c8,
1183 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001184 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001185 0x1eac0, 0x1eac0,
1186 0x1eae0, 0x1eae0,
1187 0x1eb00, 0x1eb84,
1188 0x1ebc0, 0x1ebc8,
1189 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001190 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001191 0x1eec0, 0x1eec0,
1192 0x1eee0, 0x1eee0,
1193 0x1ef00, 0x1ef84,
1194 0x1efc0, 0x1efc8,
1195 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001196 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001197 0x1f2c0, 0x1f2c0,
1198 0x1f2e0, 0x1f2e0,
1199 0x1f300, 0x1f384,
1200 0x1f3c0, 0x1f3c8,
1201 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001202 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001203 0x1f6c0, 0x1f6c0,
1204 0x1f6e0, 0x1f6e0,
1205 0x1f700, 0x1f784,
1206 0x1f7c0, 0x1f7c8,
1207 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001208 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001209 0x1fac0, 0x1fac0,
1210 0x1fae0, 0x1fae0,
1211 0x1fb00, 0x1fb84,
1212 0x1fbc0, 0x1fbc8,
1213 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001214 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001215 0x1fec0, 0x1fec0,
1216 0x1fee0, 0x1fee0,
1217 0x1ff00, 0x1ff84,
1218 0x1ffc0, 0x1ffc8,
1219 0x20000, 0x2002c,
1220 0x20100, 0x2013c,
1221 0x20190, 0x201c8,
1222 0x20200, 0x20318,
1223 0x20400, 0x20528,
1224 0x20540, 0x20614,
1225 0x21000, 0x21040,
1226 0x2104c, 0x21060,
1227 0x210c0, 0x210ec,
1228 0x21200, 0x21268,
1229 0x21270, 0x21284,
1230 0x212fc, 0x21388,
1231 0x21400, 0x21404,
1232 0x21500, 0x21518,
1233 0x2152c, 0x2153c,
1234 0x21550, 0x21554,
1235 0x21600, 0x21600,
1236 0x21608, 0x21628,
1237 0x21630, 0x2163c,
1238 0x21700, 0x2171c,
1239 0x21780, 0x2178c,
1240 0x21800, 0x21c38,
1241 0x21c80, 0x21d7c,
1242 0x21e00, 0x21e04,
1243 0x22000, 0x2202c,
1244 0x22100, 0x2213c,
1245 0x22190, 0x221c8,
1246 0x22200, 0x22318,
1247 0x22400, 0x22528,
1248 0x22540, 0x22614,
1249 0x23000, 0x23040,
1250 0x2304c, 0x23060,
1251 0x230c0, 0x230ec,
1252 0x23200, 0x23268,
1253 0x23270, 0x23284,
1254 0x232fc, 0x23388,
1255 0x23400, 0x23404,
1256 0x23500, 0x23518,
1257 0x2352c, 0x2353c,
1258 0x23550, 0x23554,
1259 0x23600, 0x23600,
1260 0x23608, 0x23628,
1261 0x23630, 0x2363c,
1262 0x23700, 0x2371c,
1263 0x23780, 0x2378c,
1264 0x23800, 0x23c38,
1265 0x23c80, 0x23d7c,
1266 0x23e00, 0x23e04,
1267 0x24000, 0x2402c,
1268 0x24100, 0x2413c,
1269 0x24190, 0x241c8,
1270 0x24200, 0x24318,
1271 0x24400, 0x24528,
1272 0x24540, 0x24614,
1273 0x25000, 0x25040,
1274 0x2504c, 0x25060,
1275 0x250c0, 0x250ec,
1276 0x25200, 0x25268,
1277 0x25270, 0x25284,
1278 0x252fc, 0x25388,
1279 0x25400, 0x25404,
1280 0x25500, 0x25518,
1281 0x2552c, 0x2553c,
1282 0x25550, 0x25554,
1283 0x25600, 0x25600,
1284 0x25608, 0x25628,
1285 0x25630, 0x2563c,
1286 0x25700, 0x2571c,
1287 0x25780, 0x2578c,
1288 0x25800, 0x25c38,
1289 0x25c80, 0x25d7c,
1290 0x25e00, 0x25e04,
1291 0x26000, 0x2602c,
1292 0x26100, 0x2613c,
1293 0x26190, 0x261c8,
1294 0x26200, 0x26318,
1295 0x26400, 0x26528,
1296 0x26540, 0x26614,
1297 0x27000, 0x27040,
1298 0x2704c, 0x27060,
1299 0x270c0, 0x270ec,
1300 0x27200, 0x27268,
1301 0x27270, 0x27284,
1302 0x272fc, 0x27388,
1303 0x27400, 0x27404,
1304 0x27500, 0x27518,
1305 0x2752c, 0x2753c,
1306 0x27550, 0x27554,
1307 0x27600, 0x27600,
1308 0x27608, 0x27628,
1309 0x27630, 0x2763c,
1310 0x27700, 0x2771c,
1311 0x27780, 0x2778c,
1312 0x27800, 0x27c38,
1313 0x27c80, 0x27d7c,
1314 0x27e00, 0x27e04
1315 };
1316
1317 int i;
1318 struct adapter *ap = netdev2adap(dev);
1319
1320 regs->version = mk_adap_vers(ap);
1321
1322 memset(buf, 0, T4_REGMAP_SIZE);
1323 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1324 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1325}
1326
1327static int restart_autoneg(struct net_device *dev)
1328{
1329 struct port_info *p = netdev_priv(dev);
1330
1331 if (!netif_running(dev))
1332 return -EAGAIN;
1333 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1334 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001335 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001336 return 0;
1337}
1338
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001339static int identify_port(struct net_device *dev,
1340 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001341{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001342 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001343 struct adapter *adap = netdev2adap(dev);
1344
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001345 if (state == ETHTOOL_ID_ACTIVE)
1346 val = 0xffff;
1347 else if (state == ETHTOOL_ID_INACTIVE)
1348 val = 0;
1349 else
1350 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001351
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001352 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001353}
1354
1355static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1356{
1357 unsigned int v = 0;
1358
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001359 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1360 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001361 v |= SUPPORTED_TP;
1362 if (caps & FW_PORT_CAP_SPEED_100M)
1363 v |= SUPPORTED_100baseT_Full;
1364 if (caps & FW_PORT_CAP_SPEED_1G)
1365 v |= SUPPORTED_1000baseT_Full;
1366 if (caps & FW_PORT_CAP_SPEED_10G)
1367 v |= SUPPORTED_10000baseT_Full;
1368 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1369 v |= SUPPORTED_Backplane;
1370 if (caps & FW_PORT_CAP_SPEED_1G)
1371 v |= SUPPORTED_1000baseKX_Full;
1372 if (caps & FW_PORT_CAP_SPEED_10G)
1373 v |= SUPPORTED_10000baseKX4_Full;
1374 } else if (type == FW_PORT_TYPE_KR)
1375 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001376 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00001377 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1378 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1379 else if (type == FW_PORT_TYPE_BP4_AP)
1380 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1381 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1382 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001383 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1384 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001385 v |= SUPPORTED_FIBRE;
1386
1387 if (caps & FW_PORT_CAP_ANEG)
1388 v |= SUPPORTED_Autoneg;
1389 return v;
1390}
1391
1392static unsigned int to_fw_linkcaps(unsigned int caps)
1393{
1394 unsigned int v = 0;
1395
1396 if (caps & ADVERTISED_100baseT_Full)
1397 v |= FW_PORT_CAP_SPEED_100M;
1398 if (caps & ADVERTISED_1000baseT_Full)
1399 v |= FW_PORT_CAP_SPEED_1G;
1400 if (caps & ADVERTISED_10000baseT_Full)
1401 v |= FW_PORT_CAP_SPEED_10G;
1402 return v;
1403}
1404
1405static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1406{
1407 const struct port_info *p = netdev_priv(dev);
1408
1409 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001410 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001411 p->port_type == FW_PORT_TYPE_BT_XAUI)
1412 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001413 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1414 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001415 cmd->port = PORT_FIBRE;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001416 else if (p->port_type == FW_PORT_TYPE_SFP) {
1417 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1418 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1419 cmd->port = PORT_DA;
1420 else
1421 cmd->port = PORT_FIBRE;
1422 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001423 cmd->port = PORT_OTHER;
1424
1425 if (p->mdio_addr >= 0) {
1426 cmd->phy_address = p->mdio_addr;
1427 cmd->transceiver = XCVR_EXTERNAL;
1428 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1429 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1430 } else {
1431 cmd->phy_address = 0; /* not really, but no better option */
1432 cmd->transceiver = XCVR_INTERNAL;
1433 cmd->mdio_support = 0;
1434 }
1435
1436 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1437 cmd->advertising = from_fw_linkcaps(p->port_type,
1438 p->link_cfg.advertising);
1439 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1440 cmd->duplex = DUPLEX_FULL;
1441 cmd->autoneg = p->link_cfg.autoneg;
1442 cmd->maxtxpkt = 0;
1443 cmd->maxrxpkt = 0;
1444 return 0;
1445}
1446
1447static unsigned int speed_to_caps(int speed)
1448{
1449 if (speed == SPEED_100)
1450 return FW_PORT_CAP_SPEED_100M;
1451 if (speed == SPEED_1000)
1452 return FW_PORT_CAP_SPEED_1G;
1453 if (speed == SPEED_10000)
1454 return FW_PORT_CAP_SPEED_10G;
1455 return 0;
1456}
1457
1458static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1459{
1460 unsigned int cap;
1461 struct port_info *p = netdev_priv(dev);
1462 struct link_config *lc = &p->link_cfg;
David Decotigny25db0332011-04-27 18:32:39 +00001463 u32 speed = ethtool_cmd_speed(cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001464
1465 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1466 return -EINVAL;
1467
1468 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1469 /*
1470 * PHY offers a single speed. See if that's what's
1471 * being requested.
1472 */
1473 if (cmd->autoneg == AUTONEG_DISABLE &&
David Decotigny25db0332011-04-27 18:32:39 +00001474 (lc->supported & speed_to_caps(speed)))
1475 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001476 return -EINVAL;
1477 }
1478
1479 if (cmd->autoneg == AUTONEG_DISABLE) {
David Decotigny25db0332011-04-27 18:32:39 +00001480 cap = speed_to_caps(speed);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001481
David Decotigny25db0332011-04-27 18:32:39 +00001482 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1483 (speed == SPEED_10000))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001484 return -EINVAL;
1485 lc->requested_speed = cap;
1486 lc->advertising = 0;
1487 } else {
1488 cap = to_fw_linkcaps(cmd->advertising);
1489 if (!(lc->supported & cap))
1490 return -EINVAL;
1491 lc->requested_speed = 0;
1492 lc->advertising = cap | FW_PORT_CAP_ANEG;
1493 }
1494 lc->autoneg = cmd->autoneg;
1495
1496 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001497 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1498 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001499 return 0;
1500}
1501
1502static void get_pauseparam(struct net_device *dev,
1503 struct ethtool_pauseparam *epause)
1504{
1505 struct port_info *p = netdev_priv(dev);
1506
1507 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1508 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1509 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1510}
1511
1512static int set_pauseparam(struct net_device *dev,
1513 struct ethtool_pauseparam *epause)
1514{
1515 struct port_info *p = netdev_priv(dev);
1516 struct link_config *lc = &p->link_cfg;
1517
1518 if (epause->autoneg == AUTONEG_DISABLE)
1519 lc->requested_fc = 0;
1520 else if (lc->supported & FW_PORT_CAP_ANEG)
1521 lc->requested_fc = PAUSE_AUTONEG;
1522 else
1523 return -EINVAL;
1524
1525 if (epause->rx_pause)
1526 lc->requested_fc |= PAUSE_RX;
1527 if (epause->tx_pause)
1528 lc->requested_fc |= PAUSE_TX;
1529 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001530 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1531 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001532 return 0;
1533}
1534
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001535static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1536{
1537 const struct port_info *pi = netdev_priv(dev);
1538 const struct sge *s = &pi->adapter->sge;
1539
1540 e->rx_max_pending = MAX_RX_BUFFERS;
1541 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1542 e->rx_jumbo_max_pending = 0;
1543 e->tx_max_pending = MAX_TXQ_ENTRIES;
1544
1545 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1546 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1547 e->rx_jumbo_pending = 0;
1548 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1549}
1550
1551static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1552{
1553 int i;
1554 const struct port_info *pi = netdev_priv(dev);
1555 struct adapter *adapter = pi->adapter;
1556 struct sge *s = &adapter->sge;
1557
1558 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1559 e->tx_pending > MAX_TXQ_ENTRIES ||
1560 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1561 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1562 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1563 return -EINVAL;
1564
1565 if (adapter->flags & FULL_INIT_DONE)
1566 return -EBUSY;
1567
1568 for (i = 0; i < pi->nqsets; ++i) {
1569 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1570 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1571 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1572 }
1573 return 0;
1574}
1575
1576static int closest_timer(const struct sge *s, int time)
1577{
1578 int i, delta, match = 0, min_delta = INT_MAX;
1579
1580 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1581 delta = time - s->timer_val[i];
1582 if (delta < 0)
1583 delta = -delta;
1584 if (delta < min_delta) {
1585 min_delta = delta;
1586 match = i;
1587 }
1588 }
1589 return match;
1590}
1591
1592static int closest_thres(const struct sge *s, int thres)
1593{
1594 int i, delta, match = 0, min_delta = INT_MAX;
1595
1596 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1597 delta = thres - s->counter_val[i];
1598 if (delta < 0)
1599 delta = -delta;
1600 if (delta < min_delta) {
1601 min_delta = delta;
1602 match = i;
1603 }
1604 }
1605 return match;
1606}
1607
1608/*
1609 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1610 */
1611static unsigned int qtimer_val(const struct adapter *adap,
1612 const struct sge_rspq *q)
1613{
1614 unsigned int idx = q->intr_params >> 1;
1615
1616 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1617}
1618
1619/**
1620 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1621 * @adap: the adapter
1622 * @q: the Rx queue
1623 * @us: the hold-off time in us, or 0 to disable timer
1624 * @cnt: the hold-off packet count, or 0 to disable counter
1625 *
1626 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1627 * one of the two needs to be enabled for the queue to generate interrupts.
1628 */
1629static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1630 unsigned int us, unsigned int cnt)
1631{
1632 if ((us | cnt) == 0)
1633 cnt = 1;
1634
1635 if (cnt) {
1636 int err;
1637 u32 v, new_idx;
1638
1639 new_idx = closest_thres(&adap->sge, cnt);
1640 if (q->desc && q->pktcnt_idx != new_idx) {
1641 /* the queue has already been created, update it */
1642 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1643 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1644 FW_PARAMS_PARAM_YZ(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001645 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1646 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001647 if (err)
1648 return err;
1649 }
1650 q->pktcnt_idx = new_idx;
1651 }
1652
1653 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1654 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1655 return 0;
1656}
1657
1658static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1659{
1660 const struct port_info *pi = netdev_priv(dev);
1661 struct adapter *adap = pi->adapter;
1662
1663 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1664 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1665}
1666
1667static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1668{
1669 const struct port_info *pi = netdev_priv(dev);
1670 const struct adapter *adap = pi->adapter;
1671 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1672
1673 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1674 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1675 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1676 return 0;
1677}
1678
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001679/**
1680 * eeprom_ptov - translate a physical EEPROM address to virtual
1681 * @phys_addr: the physical EEPROM address
1682 * @fn: the PCI function number
1683 * @sz: size of function-specific area
1684 *
1685 * Translate a physical EEPROM address to virtual. The first 1K is
1686 * accessed through virtual addresses starting at 31K, the rest is
1687 * accessed through virtual addresses starting at 0.
1688 *
1689 * The mapping is as follows:
1690 * [0..1K) -> [31K..32K)
1691 * [1K..1K+A) -> [31K-A..31K)
1692 * [1K+A..ES) -> [0..ES-A-1K)
1693 *
1694 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001695 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001696static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001697{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001698 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001699 if (phys_addr < 1024)
1700 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001701 if (phys_addr < 1024 + fn)
1702 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001703 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001704 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001705 return -EINVAL;
1706}
1707
1708/*
1709 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001710 */
1711static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1712{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001713 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001714
1715 if (vaddr >= 0)
1716 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1717 return vaddr < 0 ? vaddr : 0;
1718}
1719
1720static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1721{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001722 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001723
1724 if (vaddr >= 0)
1725 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1726 return vaddr < 0 ? vaddr : 0;
1727}
1728
1729#define EEPROM_MAGIC 0x38E2F10C
1730
1731static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1732 u8 *data)
1733{
1734 int i, err = 0;
1735 struct adapter *adapter = netdev2adap(dev);
1736
1737 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1738 if (!buf)
1739 return -ENOMEM;
1740
1741 e->magic = EEPROM_MAGIC;
1742 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1743 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1744
1745 if (!err)
1746 memcpy(data, buf + e->offset, e->len);
1747 kfree(buf);
1748 return err;
1749}
1750
1751static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1752 u8 *data)
1753{
1754 u8 *buf;
1755 int err = 0;
1756 u32 aligned_offset, aligned_len, *p;
1757 struct adapter *adapter = netdev2adap(dev);
1758
1759 if (eeprom->magic != EEPROM_MAGIC)
1760 return -EINVAL;
1761
1762 aligned_offset = eeprom->offset & ~3;
1763 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1764
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001765 if (adapter->fn > 0) {
1766 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1767
1768 if (aligned_offset < start ||
1769 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1770 return -EPERM;
1771 }
1772
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001773 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1774 /*
1775 * RMW possibly needed for first or last words.
1776 */
1777 buf = kmalloc(aligned_len, GFP_KERNEL);
1778 if (!buf)
1779 return -ENOMEM;
1780 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1781 if (!err && aligned_len > 4)
1782 err = eeprom_rd_phys(adapter,
1783 aligned_offset + aligned_len - 4,
1784 (u32 *)&buf[aligned_len - 4]);
1785 if (err)
1786 goto out;
1787 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1788 } else
1789 buf = data;
1790
1791 err = t4_seeprom_wp(adapter, false);
1792 if (err)
1793 goto out;
1794
1795 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1796 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1797 aligned_offset += 4;
1798 }
1799
1800 if (!err)
1801 err = t4_seeprom_wp(adapter, true);
1802out:
1803 if (buf != data)
1804 kfree(buf);
1805 return err;
1806}
1807
1808static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1809{
1810 int ret;
1811 const struct firmware *fw;
1812 struct adapter *adap = netdev2adap(netdev);
1813
1814 ef->data[sizeof(ef->data) - 1] = '\0';
1815 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1816 if (ret < 0)
1817 return ret;
1818
1819 ret = t4_load_fw(adap, fw->data, fw->size);
1820 release_firmware(fw);
1821 if (!ret)
1822 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1823 return ret;
1824}
1825
1826#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1827#define BCAST_CRC 0xa0ccc1a6
1828
1829static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1830{
1831 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1832 wol->wolopts = netdev2adap(dev)->wol;
1833 memset(&wol->sopass, 0, sizeof(wol->sopass));
1834}
1835
1836static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1837{
1838 int err = 0;
1839 struct port_info *pi = netdev_priv(dev);
1840
1841 if (wol->wolopts & ~WOL_SUPPORTED)
1842 return -EINVAL;
1843 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1844 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1845 if (wol->wolopts & WAKE_BCAST) {
1846 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1847 ~0ULL, 0, false);
1848 if (!err)
1849 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1850 ~6ULL, ~0ULL, BCAST_CRC, true);
1851 } else
1852 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1853 return err;
1854}
1855
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00001856static int cxgb_set_features(struct net_device *dev, u32 features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001857{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00001858 const struct port_info *pi = netdev_priv(dev);
1859 u32 changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00001860 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00001861
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00001862 if (!(changed & NETIF_F_HW_VLAN_RX))
1863 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00001864
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00001865 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1866 -1, -1, -1,
1867 !!(features & NETIF_F_HW_VLAN_RX), true);
1868 if (unlikely(err))
1869 dev->features = features ^ NETIF_F_HW_VLAN_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00001870 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001871}
1872
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001873static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1874{
1875 const struct port_info *pi = netdev_priv(dev);
1876 unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1877
1878 p->size = pi->rss_size;
1879 while (n--)
1880 p->ring_index[n] = pi->rss[n];
1881 return 0;
1882}
1883
1884static int set_rss_table(struct net_device *dev,
1885 const struct ethtool_rxfh_indir *p)
1886{
1887 unsigned int i;
1888 struct port_info *pi = netdev_priv(dev);
1889
1890 if (p->size != pi->rss_size)
1891 return -EINVAL;
1892 for (i = 0; i < p->size; i++)
1893 if (p->ring_index[i] >= pi->nqsets)
1894 return -EINVAL;
1895 for (i = 0; i < p->size; i++)
1896 pi->rss[i] = p->ring_index[i];
1897 if (pi->adapter->flags & FULL_INIT_DONE)
1898 return write_rss(pi, pi->rss);
1899 return 0;
1900}
1901
1902static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1903 void *rules)
1904{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001905 const struct port_info *pi = netdev_priv(dev);
1906
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001907 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001908 case ETHTOOL_GRXFH: {
1909 unsigned int v = pi->rss_mode;
1910
1911 info->data = 0;
1912 switch (info->flow_type) {
1913 case TCP_V4_FLOW:
1914 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1915 info->data = RXH_IP_SRC | RXH_IP_DST |
1916 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1917 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1918 info->data = RXH_IP_SRC | RXH_IP_DST;
1919 break;
1920 case UDP_V4_FLOW:
1921 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1922 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1923 info->data = RXH_IP_SRC | RXH_IP_DST |
1924 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1925 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1926 info->data = RXH_IP_SRC | RXH_IP_DST;
1927 break;
1928 case SCTP_V4_FLOW:
1929 case AH_ESP_V4_FLOW:
1930 case IPV4_FLOW:
1931 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1932 info->data = RXH_IP_SRC | RXH_IP_DST;
1933 break;
1934 case TCP_V6_FLOW:
1935 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1936 info->data = RXH_IP_SRC | RXH_IP_DST |
1937 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1938 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1939 info->data = RXH_IP_SRC | RXH_IP_DST;
1940 break;
1941 case UDP_V6_FLOW:
1942 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1943 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1944 info->data = RXH_IP_SRC | RXH_IP_DST |
1945 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1946 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1947 info->data = RXH_IP_SRC | RXH_IP_DST;
1948 break;
1949 case SCTP_V6_FLOW:
1950 case AH_ESP_V6_FLOW:
1951 case IPV6_FLOW:
1952 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1953 info->data = RXH_IP_SRC | RXH_IP_DST;
1954 break;
1955 }
1956 return 0;
1957 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001958 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001959 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001960 return 0;
1961 }
1962 return -EOPNOTSUPP;
1963}
1964
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001965static struct ethtool_ops cxgb_ethtool_ops = {
1966 .get_settings = get_settings,
1967 .set_settings = set_settings,
1968 .get_drvinfo = get_drvinfo,
1969 .get_msglevel = get_msglevel,
1970 .set_msglevel = set_msglevel,
1971 .get_ringparam = get_sge_param,
1972 .set_ringparam = set_sge_param,
1973 .get_coalesce = get_coalesce,
1974 .set_coalesce = set_coalesce,
1975 .get_eeprom_len = get_eeprom_len,
1976 .get_eeprom = get_eeprom,
1977 .set_eeprom = set_eeprom,
1978 .get_pauseparam = get_pauseparam,
1979 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001980 .get_link = ethtool_op_get_link,
1981 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001982 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001983 .nway_reset = restart_autoneg,
1984 .get_sset_count = get_sset_count,
1985 .get_ethtool_stats = get_stats,
1986 .get_regs_len = get_regs_len,
1987 .get_regs = get_regs,
1988 .get_wol = get_wol,
1989 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001990 .get_rxnfc = get_rxnfc,
1991 .get_rxfh_indir = get_rss_table,
1992 .set_rxfh_indir = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001993 .flash_device = set_flash,
1994};
1995
1996/*
1997 * debugfs support
1998 */
1999
2000static int mem_open(struct inode *inode, struct file *file)
2001{
2002 file->private_data = inode->i_private;
2003 return 0;
2004}
2005
2006static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2007 loff_t *ppos)
2008{
2009 loff_t pos = *ppos;
2010 loff_t avail = file->f_path.dentry->d_inode->i_size;
2011 unsigned int mem = (uintptr_t)file->private_data & 3;
2012 struct adapter *adap = file->private_data - mem;
2013
2014 if (pos < 0)
2015 return -EINVAL;
2016 if (pos >= avail)
2017 return 0;
2018 if (count > avail - pos)
2019 count = avail - pos;
2020
2021 while (count) {
2022 size_t len;
2023 int ret, ofst;
2024 __be32 data[16];
2025
2026 if (mem == MEM_MC)
2027 ret = t4_mc_read(adap, pos, data, NULL);
2028 else
2029 ret = t4_edc_read(adap, mem, pos, data, NULL);
2030 if (ret)
2031 return ret;
2032
2033 ofst = pos % sizeof(data);
2034 len = min(count, sizeof(data) - ofst);
2035 if (copy_to_user(buf, (u8 *)data + ofst, len))
2036 return -EFAULT;
2037
2038 buf += len;
2039 pos += len;
2040 count -= len;
2041 }
2042 count = pos - *ppos;
2043 *ppos = pos;
2044 return count;
2045}
2046
2047static const struct file_operations mem_debugfs_fops = {
2048 .owner = THIS_MODULE,
2049 .open = mem_open,
2050 .read = mem_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02002051 .llseek = default_llseek,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002052};
2053
2054static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2055 unsigned int idx, unsigned int size_mb)
2056{
2057 struct dentry *de;
2058
2059 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2060 (void *)adap + idx, &mem_debugfs_fops);
2061 if (de && de->d_inode)
2062 de->d_inode->i_size = size_mb << 20;
2063}
2064
2065static int __devinit setup_debugfs(struct adapter *adap)
2066{
2067 int i;
2068
2069 if (IS_ERR_OR_NULL(adap->debugfs_root))
2070 return -1;
2071
2072 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2073 if (i & EDRAM0_ENABLE)
2074 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2075 if (i & EDRAM1_ENABLE)
2076 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2077 if (i & EXT_MEM_ENABLE)
2078 add_debugfs_mem(adap, "mc", MEM_MC,
2079 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2080 if (adap->l2t)
2081 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2082 &t4_l2t_fops);
2083 return 0;
2084}
2085
2086/*
2087 * upper-layer driver support
2088 */
2089
2090/*
2091 * Allocate an active-open TID and set it to the supplied value.
2092 */
2093int cxgb4_alloc_atid(struct tid_info *t, void *data)
2094{
2095 int atid = -1;
2096
2097 spin_lock_bh(&t->atid_lock);
2098 if (t->afree) {
2099 union aopen_entry *p = t->afree;
2100
2101 atid = p - t->atid_tab;
2102 t->afree = p->next;
2103 p->data = data;
2104 t->atids_in_use++;
2105 }
2106 spin_unlock_bh(&t->atid_lock);
2107 return atid;
2108}
2109EXPORT_SYMBOL(cxgb4_alloc_atid);
2110
2111/*
2112 * Release an active-open TID.
2113 */
2114void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2115{
2116 union aopen_entry *p = &t->atid_tab[atid];
2117
2118 spin_lock_bh(&t->atid_lock);
2119 p->next = t->afree;
2120 t->afree = p;
2121 t->atids_in_use--;
2122 spin_unlock_bh(&t->atid_lock);
2123}
2124EXPORT_SYMBOL(cxgb4_free_atid);
2125
2126/*
2127 * Allocate a server TID and set it to the supplied value.
2128 */
2129int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2130{
2131 int stid;
2132
2133 spin_lock_bh(&t->stid_lock);
2134 if (family == PF_INET) {
2135 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2136 if (stid < t->nstids)
2137 __set_bit(stid, t->stid_bmap);
2138 else
2139 stid = -1;
2140 } else {
2141 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2142 if (stid < 0)
2143 stid = -1;
2144 }
2145 if (stid >= 0) {
2146 t->stid_tab[stid].data = data;
2147 stid += t->stid_base;
2148 t->stids_in_use++;
2149 }
2150 spin_unlock_bh(&t->stid_lock);
2151 return stid;
2152}
2153EXPORT_SYMBOL(cxgb4_alloc_stid);
2154
2155/*
2156 * Release a server TID.
2157 */
2158void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2159{
2160 stid -= t->stid_base;
2161 spin_lock_bh(&t->stid_lock);
2162 if (family == PF_INET)
2163 __clear_bit(stid, t->stid_bmap);
2164 else
2165 bitmap_release_region(t->stid_bmap, stid, 2);
2166 t->stid_tab[stid].data = NULL;
2167 t->stids_in_use--;
2168 spin_unlock_bh(&t->stid_lock);
2169}
2170EXPORT_SYMBOL(cxgb4_free_stid);
2171
2172/*
2173 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2174 */
2175static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2176 unsigned int tid)
2177{
2178 struct cpl_tid_release *req;
2179
2180 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2181 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2182 INIT_TP_WR(req, tid);
2183 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2184}
2185
2186/*
2187 * Queue a TID release request and if necessary schedule a work queue to
2188 * process it.
2189 */
stephen hemminger31b9c192010-10-18 05:39:18 +00002190static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2191 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002192{
2193 void **p = &t->tid_tab[tid];
2194 struct adapter *adap = container_of(t, struct adapter, tids);
2195
2196 spin_lock_bh(&adap->tid_release_lock);
2197 *p = adap->tid_release_head;
2198 /* Low 2 bits encode the Tx channel number */
2199 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2200 if (!adap->tid_release_task_busy) {
2201 adap->tid_release_task_busy = true;
2202 schedule_work(&adap->tid_release_task);
2203 }
2204 spin_unlock_bh(&adap->tid_release_lock);
2205}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002206
2207/*
2208 * Process the list of pending TID release requests.
2209 */
2210static void process_tid_release_list(struct work_struct *work)
2211{
2212 struct sk_buff *skb;
2213 struct adapter *adap;
2214
2215 adap = container_of(work, struct adapter, tid_release_task);
2216
2217 spin_lock_bh(&adap->tid_release_lock);
2218 while (adap->tid_release_head) {
2219 void **p = adap->tid_release_head;
2220 unsigned int chan = (uintptr_t)p & 3;
2221 p = (void *)p - chan;
2222
2223 adap->tid_release_head = *p;
2224 *p = NULL;
2225 spin_unlock_bh(&adap->tid_release_lock);
2226
2227 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2228 GFP_KERNEL)))
2229 schedule_timeout_uninterruptible(1);
2230
2231 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2232 t4_ofld_send(adap, skb);
2233 spin_lock_bh(&adap->tid_release_lock);
2234 }
2235 adap->tid_release_task_busy = false;
2236 spin_unlock_bh(&adap->tid_release_lock);
2237}
2238
2239/*
2240 * Release a TID and inform HW. If we are unable to allocate the release
2241 * message we defer to a work queue.
2242 */
2243void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2244{
2245 void *old;
2246 struct sk_buff *skb;
2247 struct adapter *adap = container_of(t, struct adapter, tids);
2248
2249 old = t->tid_tab[tid];
2250 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2251 if (likely(skb)) {
2252 t->tid_tab[tid] = NULL;
2253 mk_tid_release(skb, chan, tid);
2254 t4_ofld_send(adap, skb);
2255 } else
2256 cxgb4_queue_tid_release(t, chan, tid);
2257 if (old)
2258 atomic_dec(&t->tids_in_use);
2259}
2260EXPORT_SYMBOL(cxgb4_remove_tid);
2261
2262/*
2263 * Allocate and initialize the TID tables. Returns 0 on success.
2264 */
2265static int tid_init(struct tid_info *t)
2266{
2267 size_t size;
2268 unsigned int natids = t->natids;
2269
2270 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2271 t->nstids * sizeof(*t->stid_tab) +
2272 BITS_TO_LONGS(t->nstids) * sizeof(long);
2273 t->tid_tab = t4_alloc_mem(size);
2274 if (!t->tid_tab)
2275 return -ENOMEM;
2276
2277 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2278 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2279 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2280 spin_lock_init(&t->stid_lock);
2281 spin_lock_init(&t->atid_lock);
2282
2283 t->stids_in_use = 0;
2284 t->afree = NULL;
2285 t->atids_in_use = 0;
2286 atomic_set(&t->tids_in_use, 0);
2287
2288 /* Setup the free list for atid_tab and clear the stid bitmap. */
2289 if (natids) {
2290 while (--natids)
2291 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2292 t->afree = t->atid_tab;
2293 }
2294 bitmap_zero(t->stid_bmap, t->nstids);
2295 return 0;
2296}
2297
2298/**
2299 * cxgb4_create_server - create an IP server
2300 * @dev: the device
2301 * @stid: the server TID
2302 * @sip: local IP address to bind server to
2303 * @sport: the server's TCP port
2304 * @queue: queue to direct messages from this server to
2305 *
2306 * Create an IP server for the given port and address.
2307 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2308 */
2309int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2310 __be32 sip, __be16 sport, unsigned int queue)
2311{
2312 unsigned int chan;
2313 struct sk_buff *skb;
2314 struct adapter *adap;
2315 struct cpl_pass_open_req *req;
2316
2317 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2318 if (!skb)
2319 return -ENOMEM;
2320
2321 adap = netdev2adap(dev);
2322 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2323 INIT_TP_WR(req, 0);
2324 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2325 req->local_port = sport;
2326 req->peer_port = htons(0);
2327 req->local_ip = sip;
2328 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00002329 chan = rxq_to_chan(&adap->sge, queue);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002330 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2331 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2332 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2333 return t4_mgmt_tx(adap, skb);
2334}
2335EXPORT_SYMBOL(cxgb4_create_server);
2336
2337/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002338 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2339 * @mtus: the HW MTU table
2340 * @mtu: the target MTU
2341 * @idx: index of selected entry in the MTU table
2342 *
2343 * Returns the index and the value in the HW MTU table that is closest to
2344 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2345 * table, in which case that smallest available value is selected.
2346 */
2347unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2348 unsigned int *idx)
2349{
2350 unsigned int i = 0;
2351
2352 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2353 ++i;
2354 if (idx)
2355 *idx = i;
2356 return mtus[i];
2357}
2358EXPORT_SYMBOL(cxgb4_best_mtu);
2359
2360/**
2361 * cxgb4_port_chan - get the HW channel of a port
2362 * @dev: the net device for the port
2363 *
2364 * Return the HW Tx channel of the given port.
2365 */
2366unsigned int cxgb4_port_chan(const struct net_device *dev)
2367{
2368 return netdev2pinfo(dev)->tx_chan;
2369}
2370EXPORT_SYMBOL(cxgb4_port_chan);
2371
2372/**
2373 * cxgb4_port_viid - get the VI id of a port
2374 * @dev: the net device for the port
2375 *
2376 * Return the VI id of the given port.
2377 */
2378unsigned int cxgb4_port_viid(const struct net_device *dev)
2379{
2380 return netdev2pinfo(dev)->viid;
2381}
2382EXPORT_SYMBOL(cxgb4_port_viid);
2383
2384/**
2385 * cxgb4_port_idx - get the index of a port
2386 * @dev: the net device for the port
2387 *
2388 * Return the index of the given port.
2389 */
2390unsigned int cxgb4_port_idx(const struct net_device *dev)
2391{
2392 return netdev2pinfo(dev)->port_id;
2393}
2394EXPORT_SYMBOL(cxgb4_port_idx);
2395
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002396void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2397 struct tp_tcp_stats *v6)
2398{
2399 struct adapter *adap = pci_get_drvdata(pdev);
2400
2401 spin_lock(&adap->stats_lock);
2402 t4_tp_get_tcp_stats(adap, v4, v6);
2403 spin_unlock(&adap->stats_lock);
2404}
2405EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2406
2407void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2408 const unsigned int *pgsz_order)
2409{
2410 struct adapter *adap = netdev2adap(dev);
2411
2412 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2413 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2414 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2415 HPZ3(pgsz_order[3]));
2416}
2417EXPORT_SYMBOL(cxgb4_iscsi_init);
2418
2419static struct pci_driver cxgb4_driver;
2420
2421static void check_neigh_update(struct neighbour *neigh)
2422{
2423 const struct device *parent;
2424 const struct net_device *netdev = neigh->dev;
2425
2426 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2427 netdev = vlan_dev_real_dev(netdev);
2428 parent = netdev->dev.parent;
2429 if (parent && parent->driver == &cxgb4_driver.driver)
2430 t4_l2t_update(dev_get_drvdata(parent), neigh);
2431}
2432
2433static int netevent_cb(struct notifier_block *nb, unsigned long event,
2434 void *data)
2435{
2436 switch (event) {
2437 case NETEVENT_NEIGH_UPDATE:
2438 check_neigh_update(data);
2439 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002440 case NETEVENT_REDIRECT:
2441 default:
2442 break;
2443 }
2444 return 0;
2445}
2446
2447static bool netevent_registered;
2448static struct notifier_block cxgb4_netevent_nb = {
2449 .notifier_call = netevent_cb
2450};
2451
2452static void uld_attach(struct adapter *adap, unsigned int uld)
2453{
2454 void *handle;
2455 struct cxgb4_lld_info lli;
2456
2457 lli.pdev = adap->pdev;
2458 lli.l2t = adap->l2t;
2459 lli.tids = &adap->tids;
2460 lli.ports = adap->port;
2461 lli.vr = &adap->vres;
2462 lli.mtus = adap->params.mtus;
2463 if (uld == CXGB4_ULD_RDMA) {
2464 lli.rxq_ids = adap->sge.rdma_rxq;
2465 lli.nrxq = adap->sge.rdmaqs;
2466 } else if (uld == CXGB4_ULD_ISCSI) {
2467 lli.rxq_ids = adap->sge.ofld_rxq;
2468 lli.nrxq = adap->sge.ofldqsets;
2469 }
2470 lli.ntxq = adap->sge.ofldqsets;
2471 lli.nchan = adap->params.nports;
2472 lli.nports = adap->params.nports;
2473 lli.wr_cred = adap->params.ofldq_wr_cred;
2474 lli.adapter_type = adap->params.rev;
2475 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2476 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002477 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2478 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002479 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002480 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2481 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002482 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2483 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2484 lli.fw_vers = adap->params.fw_vers;
2485
2486 handle = ulds[uld].add(&lli);
2487 if (IS_ERR(handle)) {
2488 dev_warn(adap->pdev_dev,
2489 "could not attach to the %s driver, error %ld\n",
2490 uld_str[uld], PTR_ERR(handle));
2491 return;
2492 }
2493
2494 adap->uld_handle[uld] = handle;
2495
2496 if (!netevent_registered) {
2497 register_netevent_notifier(&cxgb4_netevent_nb);
2498 netevent_registered = true;
2499 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00002500
2501 if (adap->flags & FULL_INIT_DONE)
2502 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002503}
2504
2505static void attach_ulds(struct adapter *adap)
2506{
2507 unsigned int i;
2508
2509 mutex_lock(&uld_mutex);
2510 list_add_tail(&adap->list_node, &adapter_list);
2511 for (i = 0; i < CXGB4_ULD_MAX; i++)
2512 if (ulds[i].add)
2513 uld_attach(adap, i);
2514 mutex_unlock(&uld_mutex);
2515}
2516
2517static void detach_ulds(struct adapter *adap)
2518{
2519 unsigned int i;
2520
2521 mutex_lock(&uld_mutex);
2522 list_del(&adap->list_node);
2523 for (i = 0; i < CXGB4_ULD_MAX; i++)
2524 if (adap->uld_handle[i]) {
2525 ulds[i].state_change(adap->uld_handle[i],
2526 CXGB4_STATE_DETACH);
2527 adap->uld_handle[i] = NULL;
2528 }
2529 if (netevent_registered && list_empty(&adapter_list)) {
2530 unregister_netevent_notifier(&cxgb4_netevent_nb);
2531 netevent_registered = false;
2532 }
2533 mutex_unlock(&uld_mutex);
2534}
2535
2536static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2537{
2538 unsigned int i;
2539
2540 mutex_lock(&uld_mutex);
2541 for (i = 0; i < CXGB4_ULD_MAX; i++)
2542 if (adap->uld_handle[i])
2543 ulds[i].state_change(adap->uld_handle[i], new_state);
2544 mutex_unlock(&uld_mutex);
2545}
2546
2547/**
2548 * cxgb4_register_uld - register an upper-layer driver
2549 * @type: the ULD type
2550 * @p: the ULD methods
2551 *
2552 * Registers an upper-layer driver with this driver and notifies the ULD
2553 * about any presently available devices that support its type. Returns
2554 * %-EBUSY if a ULD of the same type is already registered.
2555 */
2556int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2557{
2558 int ret = 0;
2559 struct adapter *adap;
2560
2561 if (type >= CXGB4_ULD_MAX)
2562 return -EINVAL;
2563 mutex_lock(&uld_mutex);
2564 if (ulds[type].add) {
2565 ret = -EBUSY;
2566 goto out;
2567 }
2568 ulds[type] = *p;
2569 list_for_each_entry(adap, &adapter_list, list_node)
2570 uld_attach(adap, type);
2571out: mutex_unlock(&uld_mutex);
2572 return ret;
2573}
2574EXPORT_SYMBOL(cxgb4_register_uld);
2575
2576/**
2577 * cxgb4_unregister_uld - unregister an upper-layer driver
2578 * @type: the ULD type
2579 *
2580 * Unregisters an existing upper-layer driver.
2581 */
2582int cxgb4_unregister_uld(enum cxgb4_uld type)
2583{
2584 struct adapter *adap;
2585
2586 if (type >= CXGB4_ULD_MAX)
2587 return -EINVAL;
2588 mutex_lock(&uld_mutex);
2589 list_for_each_entry(adap, &adapter_list, list_node)
2590 adap->uld_handle[type] = NULL;
2591 ulds[type].add = NULL;
2592 mutex_unlock(&uld_mutex);
2593 return 0;
2594}
2595EXPORT_SYMBOL(cxgb4_unregister_uld);
2596
2597/**
2598 * cxgb_up - enable the adapter
2599 * @adap: adapter being enabled
2600 *
2601 * Called when the first port is enabled, this function performs the
2602 * actions necessary to make an adapter operational, such as completing
2603 * the initialization of HW modules, and enabling interrupts.
2604 *
2605 * Must be called with the rtnl lock held.
2606 */
2607static int cxgb_up(struct adapter *adap)
2608{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002609 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002610
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002611 err = setup_sge_queues(adap);
2612 if (err)
2613 goto out;
2614 err = setup_rss(adap);
2615 if (err)
2616 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002617
2618 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002619 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002620 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2621 adap->msix_info[0].desc, adap);
2622 if (err)
2623 goto irq_err;
2624
2625 err = request_msix_queue_irqs(adap);
2626 if (err) {
2627 free_irq(adap->msix_info[0].vec, adap);
2628 goto irq_err;
2629 }
2630 } else {
2631 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2632 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00002633 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002634 if (err)
2635 goto irq_err;
2636 }
2637 enable_rx(adap);
2638 t4_sge_start(adap);
2639 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002640 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002641 notify_ulds(adap, CXGB4_STATE_UP);
2642 out:
2643 return err;
2644 irq_err:
2645 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002646 freeq:
2647 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002648 goto out;
2649}
2650
2651static void cxgb_down(struct adapter *adapter)
2652{
2653 t4_intr_disable(adapter);
2654 cancel_work_sync(&adapter->tid_release_task);
2655 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002656 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002657
2658 if (adapter->flags & USING_MSIX) {
2659 free_msix_queue_irqs(adapter);
2660 free_irq(adapter->msix_info[0].vec, adapter);
2661 } else
2662 free_irq(adapter->pdev->irq, adapter);
2663 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002664 t4_sge_stop(adapter);
2665 t4_free_sge_resources(adapter);
2666 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002667}
2668
2669/*
2670 * net_device operations
2671 */
2672static int cxgb_open(struct net_device *dev)
2673{
2674 int err;
2675 struct port_info *pi = netdev_priv(dev);
2676 struct adapter *adapter = pi->adapter;
2677
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00002678 netif_carrier_off(dev);
2679
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002680 if (!(adapter->flags & FULL_INIT_DONE)) {
2681 err = cxgb_up(adapter);
2682 if (err < 0)
2683 return err;
2684 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002685
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00002686 err = link_start(dev);
2687 if (!err)
2688 netif_tx_start_all_queues(dev);
2689 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002690}
2691
2692static int cxgb_close(struct net_device *dev)
2693{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002694 struct port_info *pi = netdev_priv(dev);
2695 struct adapter *adapter = pi->adapter;
2696
2697 netif_tx_stop_all_queues(dev);
2698 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002699 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002700}
2701
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00002702static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2703 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002704{
2705 struct port_stats stats;
2706 struct port_info *p = netdev_priv(dev);
2707 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002708
2709 spin_lock(&adapter->stats_lock);
2710 t4_get_port_stats(adapter, p->tx_chan, &stats);
2711 spin_unlock(&adapter->stats_lock);
2712
2713 ns->tx_bytes = stats.tx_octets;
2714 ns->tx_packets = stats.tx_frames;
2715 ns->rx_bytes = stats.rx_octets;
2716 ns->rx_packets = stats.rx_frames;
2717 ns->multicast = stats.rx_mcast_frames;
2718
2719 /* detailed rx_errors */
2720 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2721 stats.rx_runt;
2722 ns->rx_over_errors = 0;
2723 ns->rx_crc_errors = stats.rx_fcs_err;
2724 ns->rx_frame_errors = stats.rx_symbol_err;
2725 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2726 stats.rx_ovflow2 + stats.rx_ovflow3 +
2727 stats.rx_trunc0 + stats.rx_trunc1 +
2728 stats.rx_trunc2 + stats.rx_trunc3;
2729 ns->rx_missed_errors = 0;
2730
2731 /* detailed tx_errors */
2732 ns->tx_aborted_errors = 0;
2733 ns->tx_carrier_errors = 0;
2734 ns->tx_fifo_errors = 0;
2735 ns->tx_heartbeat_errors = 0;
2736 ns->tx_window_errors = 0;
2737
2738 ns->tx_errors = stats.tx_error_frames;
2739 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2740 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2741 return ns;
2742}
2743
2744static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2745{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002746 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002747 int ret = 0, prtad, devad;
2748 struct port_info *pi = netdev_priv(dev);
2749 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2750
2751 switch (cmd) {
2752 case SIOCGMIIPHY:
2753 if (pi->mdio_addr < 0)
2754 return -EOPNOTSUPP;
2755 data->phy_id = pi->mdio_addr;
2756 break;
2757 case SIOCGMIIREG:
2758 case SIOCSMIIREG:
2759 if (mdio_phy_id_is_c45(data->phy_id)) {
2760 prtad = mdio_phy_id_prtad(data->phy_id);
2761 devad = mdio_phy_id_devad(data->phy_id);
2762 } else if (data->phy_id < 32) {
2763 prtad = data->phy_id;
2764 devad = 0;
2765 data->reg_num &= 0x1f;
2766 } else
2767 return -EINVAL;
2768
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002769 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002770 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002771 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002772 data->reg_num, &data->val_out);
2773 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002774 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002775 data->reg_num, data->val_in);
2776 break;
2777 default:
2778 return -EOPNOTSUPP;
2779 }
2780 return ret;
2781}
2782
2783static void cxgb_set_rxmode(struct net_device *dev)
2784{
2785 /* unfortunately we can't return errors to the stack */
2786 set_rxmode(dev, -1, false);
2787}
2788
2789static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2790{
2791 int ret;
2792 struct port_info *pi = netdev_priv(dev);
2793
2794 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2795 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002796 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2797 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002798 if (!ret)
2799 dev->mtu = new_mtu;
2800 return ret;
2801}
2802
2803static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2804{
2805 int ret;
2806 struct sockaddr *addr = p;
2807 struct port_info *pi = netdev_priv(dev);
2808
2809 if (!is_valid_ether_addr(addr->sa_data))
2810 return -EINVAL;
2811
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002812 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2813 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002814 if (ret < 0)
2815 return ret;
2816
2817 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2818 pi->xact_addr_filt = ret;
2819 return 0;
2820}
2821
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002822#ifdef CONFIG_NET_POLL_CONTROLLER
2823static void cxgb_netpoll(struct net_device *dev)
2824{
2825 struct port_info *pi = netdev_priv(dev);
2826 struct adapter *adap = pi->adapter;
2827
2828 if (adap->flags & USING_MSIX) {
2829 int i;
2830 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2831
2832 for (i = pi->nqsets; i; i--, rx++)
2833 t4_sge_intr_msix(0, &rx->rspq);
2834 } else
2835 t4_intr_handler(adap)(0, adap);
2836}
2837#endif
2838
2839static const struct net_device_ops cxgb4_netdev_ops = {
2840 .ndo_open = cxgb_open,
2841 .ndo_stop = cxgb_close,
2842 .ndo_start_xmit = t4_eth_xmit,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002843 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002844 .ndo_set_rx_mode = cxgb_set_rxmode,
2845 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002846 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002847 .ndo_validate_addr = eth_validate_addr,
2848 .ndo_do_ioctl = cxgb_ioctl,
2849 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002850#ifdef CONFIG_NET_POLL_CONTROLLER
2851 .ndo_poll_controller = cxgb_netpoll,
2852#endif
2853};
2854
2855void t4_fatal_err(struct adapter *adap)
2856{
2857 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2858 t4_intr_disable(adap);
2859 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2860}
2861
2862static void setup_memwin(struct adapter *adap)
2863{
2864 u32 bar0;
2865
2866 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2867 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2868 (bar0 + MEMWIN0_BASE) | BIR(0) |
2869 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2870 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2871 (bar0 + MEMWIN1_BASE) | BIR(0) |
2872 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2873 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2874 (bar0 + MEMWIN2_BASE) | BIR(0) |
2875 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00002876 if (adap->vres.ocq.size) {
2877 unsigned int start, sz_kb;
2878
2879 start = pci_resource_start(adap->pdev, 2) +
2880 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2881 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2882 t4_write_reg(adap,
2883 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2884 start | BIR(1) | WINDOW(ilog2(sz_kb)));
2885 t4_write_reg(adap,
2886 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2887 adap->vres.ocq.start);
2888 t4_read_reg(adap,
2889 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2890 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002891}
2892
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002893static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2894{
2895 u32 v;
2896 int ret;
2897
2898 /* get device capabilities */
2899 memset(c, 0, sizeof(*c));
2900 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2901 FW_CMD_REQUEST | FW_CMD_READ);
2902 c->retval_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002903 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002904 if (ret < 0)
2905 return ret;
2906
2907 /* select capabilities we'll be using */
2908 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2909 if (!vf_acls)
2910 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2911 else
2912 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2913 } else if (vf_acls) {
2914 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2915 return ret;
2916 }
2917 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2918 FW_CMD_REQUEST | FW_CMD_WRITE);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002919 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002920 if (ret < 0)
2921 return ret;
2922
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002923 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002924 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2925 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2926 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2927 if (ret < 0)
2928 return ret;
2929
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002930 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2931 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002932 if (ret < 0)
2933 return ret;
2934
2935 t4_sge_init(adap);
2936
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002937 /* tweak some settings */
2938 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2939 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2940 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2941 v = t4_read_reg(adap, TP_PIO_DATA);
2942 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002943
2944 /* get basic stuff going */
2945 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002946}
2947
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002948/*
2949 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2950 */
2951#define MAX_ATIDS 8192U
2952
2953/*
2954 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2955 */
2956static int adap_init0(struct adapter *adap)
2957{
2958 int ret;
2959 u32 v, port_vec;
2960 enum dev_state state;
2961 u32 params[7], val[7];
2962 struct fw_caps_config_cmd c;
2963
2964 ret = t4_check_fw_version(adap);
2965 if (ret == -EINVAL || ret > 0) {
2966 if (upgrade_fw(adap) >= 0) /* recache FW version */
2967 ret = t4_check_fw_version(adap);
2968 }
2969 if (ret < 0)
2970 return ret;
2971
2972 /* contact FW, request master */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002973 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002974 if (ret < 0) {
2975 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2976 ret);
2977 return ret;
2978 }
2979
2980 /* reset device */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002981 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002982 if (ret < 0)
2983 goto bye;
2984
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002985 for (v = 0; v < SGE_NTIMERS - 1; v++)
2986 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2987 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2988 adap->sge.counter_val[0] = 1;
2989 for (v = 1; v < SGE_NCOUNTERS; v++)
2990 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2991 THRESHOLD_3_MASK);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002992#define FW_PARAM_DEV(param) \
2993 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2994 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2995
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002996 params[0] = FW_PARAM_DEV(CCLK);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002997 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002998 if (ret < 0)
2999 goto bye;
3000 adap->params.vpd.cclk = val[0];
3001
3002 ret = adap_init1(adap, &c);
3003 if (ret < 0)
3004 goto bye;
3005
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003006#define FW_PARAM_PFVF(param) \
3007 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003008 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3009 FW_PARAMS_PARAM_Y(adap->fn))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003010
3011 params[0] = FW_PARAM_DEV(PORTVEC);
3012 params[1] = FW_PARAM_PFVF(L2T_START);
3013 params[2] = FW_PARAM_PFVF(L2T_END);
3014 params[3] = FW_PARAM_PFVF(FILTER_START);
3015 params[4] = FW_PARAM_PFVF(FILTER_END);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003016 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3017 params[6] = FW_PARAM_PFVF(EQ_START);
3018 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003019 if (ret < 0)
3020 goto bye;
3021 port_vec = val[0];
3022 adap->tids.ftid_base = val[3];
3023 adap->tids.nftids = val[4] - val[3] + 1;
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003024 adap->sge.ingr_start = val[5];
3025 adap->sge.egr_start = val[6];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003026
3027 if (c.ofldcaps) {
3028 /* query offload-related parameters */
3029 params[0] = FW_PARAM_DEV(NTID);
3030 params[1] = FW_PARAM_PFVF(SERVER_START);
3031 params[2] = FW_PARAM_PFVF(SERVER_END);
3032 params[3] = FW_PARAM_PFVF(TDDP_START);
3033 params[4] = FW_PARAM_PFVF(TDDP_END);
3034 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003035 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3036 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003037 if (ret < 0)
3038 goto bye;
3039 adap->tids.ntids = val[0];
3040 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3041 adap->tids.stid_base = val[1];
3042 adap->tids.nstids = val[2] - val[1] + 1;
3043 adap->vres.ddp.start = val[3];
3044 adap->vres.ddp.size = val[4] - val[3] + 1;
3045 adap->params.ofldq_wr_cred = val[5];
3046 adap->params.offload = 1;
3047 }
3048 if (c.rdmacaps) {
3049 params[0] = FW_PARAM_PFVF(STAG_START);
3050 params[1] = FW_PARAM_PFVF(STAG_END);
3051 params[2] = FW_PARAM_PFVF(RQ_START);
3052 params[3] = FW_PARAM_PFVF(RQ_END);
3053 params[4] = FW_PARAM_PFVF(PBL_START);
3054 params[5] = FW_PARAM_PFVF(PBL_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003055 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3056 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003057 if (ret < 0)
3058 goto bye;
3059 adap->vres.stag.start = val[0];
3060 adap->vres.stag.size = val[1] - val[0] + 1;
3061 adap->vres.rq.start = val[2];
3062 adap->vres.rq.size = val[3] - val[2] + 1;
3063 adap->vres.pbl.start = val[4];
3064 adap->vres.pbl.size = val[5] - val[4] + 1;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003065
3066 params[0] = FW_PARAM_PFVF(SQRQ_START);
3067 params[1] = FW_PARAM_PFVF(SQRQ_END);
3068 params[2] = FW_PARAM_PFVF(CQ_START);
3069 params[3] = FW_PARAM_PFVF(CQ_END);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003070 params[4] = FW_PARAM_PFVF(OCQ_START);
3071 params[5] = FW_PARAM_PFVF(OCQ_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003072 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3073 val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003074 if (ret < 0)
3075 goto bye;
3076 adap->vres.qp.start = val[0];
3077 adap->vres.qp.size = val[1] - val[0] + 1;
3078 adap->vres.cq.start = val[2];
3079 adap->vres.cq.size = val[3] - val[2] + 1;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003080 adap->vres.ocq.start = val[4];
3081 adap->vres.ocq.size = val[5] - val[4] + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003082 }
3083 if (c.iscsicaps) {
3084 params[0] = FW_PARAM_PFVF(ISCSI_START);
3085 params[1] = FW_PARAM_PFVF(ISCSI_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003086 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3087 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003088 if (ret < 0)
3089 goto bye;
3090 adap->vres.iscsi.start = val[0];
3091 adap->vres.iscsi.size = val[1] - val[0] + 1;
3092 }
3093#undef FW_PARAM_PFVF
3094#undef FW_PARAM_DEV
3095
3096 adap->params.nports = hweight32(port_vec);
3097 adap->params.portvec = port_vec;
3098 adap->flags |= FW_OK;
3099
3100 /* These are finalized by FW initialization, load their values now */
3101 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3102 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3103 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3104 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3105 adap->params.b_wnd);
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003106
3107#ifdef CONFIG_PCI_IOV
3108 /*
3109 * Provision resource limits for Virtual Functions. We currently
3110 * grant them all the same static resource limits except for the Port
3111 * Access Rights Mask which we're assigning based on the PF. All of
3112 * the static provisioning stuff for both the PF and VF really needs
3113 * to be managed in a persistent manner for each device which the
3114 * firmware controls.
3115 */
3116 {
3117 int pf, vf;
3118
3119 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3120 if (num_vf[pf] <= 0)
3121 continue;
3122
3123 /* VF numbering starts at 1! */
3124 for (vf = 1; vf <= num_vf[pf]; vf++) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003125 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003126 VFRES_NEQ, VFRES_NETHCTRL,
3127 VFRES_NIQFLINT, VFRES_NIQ,
3128 VFRES_TC, VFRES_NVI,
3129 FW_PFVF_CMD_CMASK_MASK,
3130 pfvfres_pmask(adap, pf, vf),
3131 VFRES_NEXACTF,
3132 VFRES_R_CAPS, VFRES_WX_CAPS);
3133 if (ret < 0)
3134 dev_warn(adap->pdev_dev, "failed to "
3135 "provision pf/vf=%d/%d; "
3136 "err=%d\n", pf, vf, ret);
3137 }
3138 }
3139 }
3140#endif
3141
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003142 setup_memwin(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003143 return 0;
3144
3145 /*
3146 * If a command timed out or failed with EIO FW does not operate within
3147 * its spec or something catastrophic happened to HW/FW, stop issuing
3148 * commands.
3149 */
3150bye: if (ret != -ETIMEDOUT && ret != -EIO)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003151 t4_fw_bye(adap, adap->fn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003152 return ret;
3153}
3154
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003155/* EEH callbacks */
3156
3157static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3158 pci_channel_state_t state)
3159{
3160 int i;
3161 struct adapter *adap = pci_get_drvdata(pdev);
3162
3163 if (!adap)
3164 goto out;
3165
3166 rtnl_lock();
3167 adap->flags &= ~FW_OK;
3168 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3169 for_each_port(adap, i) {
3170 struct net_device *dev = adap->port[i];
3171
3172 netif_device_detach(dev);
3173 netif_carrier_off(dev);
3174 }
3175 if (adap->flags & FULL_INIT_DONE)
3176 cxgb_down(adap);
3177 rtnl_unlock();
3178 pci_disable_device(pdev);
3179out: return state == pci_channel_io_perm_failure ?
3180 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3181}
3182
3183static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3184{
3185 int i, ret;
3186 struct fw_caps_config_cmd c;
3187 struct adapter *adap = pci_get_drvdata(pdev);
3188
3189 if (!adap) {
3190 pci_restore_state(pdev);
3191 pci_save_state(pdev);
3192 return PCI_ERS_RESULT_RECOVERED;
3193 }
3194
3195 if (pci_enable_device(pdev)) {
3196 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3197 return PCI_ERS_RESULT_DISCONNECT;
3198 }
3199
3200 pci_set_master(pdev);
3201 pci_restore_state(pdev);
3202 pci_save_state(pdev);
3203 pci_cleanup_aer_uncorrect_error_status(pdev);
3204
3205 if (t4_wait_dev_ready(adap) < 0)
3206 return PCI_ERS_RESULT_DISCONNECT;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003207 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003208 return PCI_ERS_RESULT_DISCONNECT;
3209 adap->flags |= FW_OK;
3210 if (adap_init1(adap, &c))
3211 return PCI_ERS_RESULT_DISCONNECT;
3212
3213 for_each_port(adap, i) {
3214 struct port_info *p = adap2pinfo(adap, i);
3215
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003216 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3217 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003218 if (ret < 0)
3219 return PCI_ERS_RESULT_DISCONNECT;
3220 p->viid = ret;
3221 p->xact_addr_filt = -1;
3222 }
3223
3224 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3225 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003226 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003227 if (cxgb_up(adap))
3228 return PCI_ERS_RESULT_DISCONNECT;
3229 return PCI_ERS_RESULT_RECOVERED;
3230}
3231
3232static void eeh_resume(struct pci_dev *pdev)
3233{
3234 int i;
3235 struct adapter *adap = pci_get_drvdata(pdev);
3236
3237 if (!adap)
3238 return;
3239
3240 rtnl_lock();
3241 for_each_port(adap, i) {
3242 struct net_device *dev = adap->port[i];
3243
3244 if (netif_running(dev)) {
3245 link_start(dev);
3246 cxgb_set_rxmode(dev);
3247 }
3248 netif_device_attach(dev);
3249 }
3250 rtnl_unlock();
3251}
3252
3253static struct pci_error_handlers cxgb4_eeh = {
3254 .error_detected = eeh_err_detected,
3255 .slot_reset = eeh_slot_reset,
3256 .resume = eeh_resume,
3257};
3258
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003259static inline bool is_10g_port(const struct link_config *lc)
3260{
3261 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3262}
3263
3264static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3265 unsigned int size, unsigned int iqe_size)
3266{
3267 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3268 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3269 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3270 q->iqe_len = iqe_size;
3271 q->size = size;
3272}
3273
3274/*
3275 * Perform default configuration of DMA queues depending on the number and type
3276 * of ports we found and the number of available CPUs. Most settings can be
3277 * modified by the admin prior to actual use.
3278 */
3279static void __devinit cfg_queues(struct adapter *adap)
3280{
3281 struct sge *s = &adap->sge;
3282 int i, q10g = 0, n10g = 0, qidx = 0;
3283
3284 for_each_port(adap, i)
3285 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3286
3287 /*
3288 * We default to 1 queue per non-10G port and up to # of cores queues
3289 * per 10G port.
3290 */
3291 if (n10g)
3292 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3293 if (q10g > num_online_cpus())
3294 q10g = num_online_cpus();
3295
3296 for_each_port(adap, i) {
3297 struct port_info *pi = adap2pinfo(adap, i);
3298
3299 pi->first_qset = qidx;
3300 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3301 qidx += pi->nqsets;
3302 }
3303
3304 s->ethqsets = qidx;
3305 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3306
3307 if (is_offload(adap)) {
3308 /*
3309 * For offload we use 1 queue/channel if all ports are up to 1G,
3310 * otherwise we divide all available queues amongst the channels
3311 * capped by the number of available cores.
3312 */
3313 if (n10g) {
3314 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3315 num_online_cpus());
3316 s->ofldqsets = roundup(i, adap->params.nports);
3317 } else
3318 s->ofldqsets = adap->params.nports;
3319 /* For RDMA one Rx queue per channel suffices */
3320 s->rdmaqs = adap->params.nports;
3321 }
3322
3323 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3324 struct sge_eth_rxq *r = &s->ethrxq[i];
3325
3326 init_rspq(&r->rspq, 0, 0, 1024, 64);
3327 r->fl.size = 72;
3328 }
3329
3330 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3331 s->ethtxq[i].q.size = 1024;
3332
3333 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3334 s->ctrlq[i].q.size = 512;
3335
3336 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3337 s->ofldtxq[i].q.size = 1024;
3338
3339 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3340 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3341
3342 init_rspq(&r->rspq, 0, 0, 1024, 64);
3343 r->rspq.uld = CXGB4_ULD_ISCSI;
3344 r->fl.size = 72;
3345 }
3346
3347 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3348 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3349
3350 init_rspq(&r->rspq, 0, 0, 511, 64);
3351 r->rspq.uld = CXGB4_ULD_RDMA;
3352 r->fl.size = 72;
3353 }
3354
3355 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3356 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3357}
3358
3359/*
3360 * Reduce the number of Ethernet queues across all ports to at most n.
3361 * n provides at least one queue per port.
3362 */
3363static void __devinit reduce_ethqs(struct adapter *adap, int n)
3364{
3365 int i;
3366 struct port_info *pi;
3367
3368 while (n < adap->sge.ethqsets)
3369 for_each_port(adap, i) {
3370 pi = adap2pinfo(adap, i);
3371 if (pi->nqsets > 1) {
3372 pi->nqsets--;
3373 adap->sge.ethqsets--;
3374 if (adap->sge.ethqsets <= n)
3375 break;
3376 }
3377 }
3378
3379 n = 0;
3380 for_each_port(adap, i) {
3381 pi = adap2pinfo(adap, i);
3382 pi->first_qset = n;
3383 n += pi->nqsets;
3384 }
3385}
3386
3387/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3388#define EXTRA_VECS 2
3389
3390static int __devinit enable_msix(struct adapter *adap)
3391{
3392 int ofld_need = 0;
3393 int i, err, want, need;
3394 struct sge *s = &adap->sge;
3395 unsigned int nchan = adap->params.nports;
3396 struct msix_entry entries[MAX_INGQ + 1];
3397
3398 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3399 entries[i].entry = i;
3400
3401 want = s->max_ethqsets + EXTRA_VECS;
3402 if (is_offload(adap)) {
3403 want += s->rdmaqs + s->ofldqsets;
3404 /* need nchan for each possible ULD */
3405 ofld_need = 2 * nchan;
3406 }
3407 need = adap->params.nports + EXTRA_VECS + ofld_need;
3408
3409 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3410 want = err;
3411
3412 if (!err) {
3413 /*
3414 * Distribute available vectors to the various queue groups.
3415 * Every group gets its minimum requirement and NIC gets top
3416 * priority for leftovers.
3417 */
3418 i = want - EXTRA_VECS - ofld_need;
3419 if (i < s->max_ethqsets) {
3420 s->max_ethqsets = i;
3421 if (i < s->ethqsets)
3422 reduce_ethqs(adap, i);
3423 }
3424 if (is_offload(adap)) {
3425 i = want - EXTRA_VECS - s->max_ethqsets;
3426 i -= ofld_need - nchan;
3427 s->ofldqsets = (i / nchan) * nchan; /* round down */
3428 }
3429 for (i = 0; i < want; ++i)
3430 adap->msix_info[i].vec = entries[i].vector;
3431 } else if (err > 0)
3432 dev_info(adap->pdev_dev,
3433 "only %d MSI-X vectors left, not using MSI-X\n", err);
3434 return err;
3435}
3436
3437#undef EXTRA_VECS
3438
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003439static int __devinit init_rss(struct adapter *adap)
3440{
3441 unsigned int i, j;
3442
3443 for_each_port(adap, i) {
3444 struct port_info *pi = adap2pinfo(adap, i);
3445
3446 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3447 if (!pi->rss)
3448 return -ENOMEM;
3449 for (j = 0; j < pi->rss_size; j++)
3450 pi->rss[j] = j % pi->nqsets;
3451 }
3452 return 0;
3453}
3454
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003455static void __devinit print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003456{
3457 static const char *base[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003458 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00003459 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003460 };
3461
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003462 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003463 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003464 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003465 const struct port_info *pi = netdev_priv(dev);
3466 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003467
3468 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3469 spd = " 2.5 GT/s";
3470 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3471 spd = " 5 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003472
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003473 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3474 bufp += sprintf(bufp, "100/");
3475 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3476 bufp += sprintf(bufp, "1000/");
3477 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3478 bufp += sprintf(bufp, "10G/");
3479 if (bufp != buf)
3480 --bufp;
3481 sprintf(bufp, "BASE-%s", base[pi->port_type]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003482
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003483 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3484 adap->params.vpd.id, adap->params.rev, buf,
3485 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
3486 (adap->flags & USING_MSIX) ? " MSI-X" :
3487 (adap->flags & USING_MSI) ? " MSI" : "");
3488 netdev_info(dev, "S/N: %s, E/C: %s\n",
3489 adap->params.vpd.sn, adap->params.vpd.ec);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003490}
3491
Dimitris Michailidisef306b52010-12-14 21:36:44 +00003492static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3493{
3494 u16 v;
3495 int pos;
3496
3497 pos = pci_pcie_cap(dev);
3498 if (pos > 0) {
3499 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3500 v |= PCI_EXP_DEVCTL_RELAX_EN;
3501 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3502 }
3503}
3504
Dimitris Michailidis06546392010-07-11 12:01:16 +00003505/*
3506 * Free the following resources:
3507 * - memory used for tables
3508 * - MSI/MSI-X
3509 * - net devices
3510 * - resources FW is holding for us
3511 */
3512static void free_some_resources(struct adapter *adapter)
3513{
3514 unsigned int i;
3515
3516 t4_free_mem(adapter->l2t);
3517 t4_free_mem(adapter->tids.tid_tab);
3518 disable_msi(adapter);
3519
3520 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003521 if (adapter->port[i]) {
3522 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00003523 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003524 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00003525 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003526 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00003527}
3528
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00003529#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00003530#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003531 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3532
3533static int __devinit init_one(struct pci_dev *pdev,
3534 const struct pci_device_id *ent)
3535{
3536 int func, i, err;
3537 struct port_info *pi;
3538 unsigned int highdma = 0;
3539 struct adapter *adapter = NULL;
3540
3541 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3542
3543 err = pci_request_regions(pdev, KBUILD_MODNAME);
3544 if (err) {
3545 /* Just info, some other driver may have claimed the device. */
3546 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3547 return err;
3548 }
3549
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003550 /* We control everything through one PF */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003551 func = PCI_FUNC(pdev->devfn);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003552 if (func != ent->driver_data) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003553 pci_save_state(pdev); /* to restore SR-IOV later */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003554 goto sriov;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003555 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003556
3557 err = pci_enable_device(pdev);
3558 if (err) {
3559 dev_err(&pdev->dev, "cannot enable PCI device\n");
3560 goto out_release_regions;
3561 }
3562
3563 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3564 highdma = NETIF_F_HIGHDMA;
3565 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3566 if (err) {
3567 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3568 "coherent allocations\n");
3569 goto out_disable_device;
3570 }
3571 } else {
3572 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3573 if (err) {
3574 dev_err(&pdev->dev, "no usable DMA configuration\n");
3575 goto out_disable_device;
3576 }
3577 }
3578
3579 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00003580 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003581 pci_set_master(pdev);
3582 pci_save_state(pdev);
3583
3584 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3585 if (!adapter) {
3586 err = -ENOMEM;
3587 goto out_disable_device;
3588 }
3589
3590 adapter->regs = pci_ioremap_bar(pdev, 0);
3591 if (!adapter->regs) {
3592 dev_err(&pdev->dev, "cannot map device registers\n");
3593 err = -ENOMEM;
3594 goto out_free_adapter;
3595 }
3596
3597 adapter->pdev = pdev;
3598 adapter->pdev_dev = &pdev->dev;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003599 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003600 adapter->msg_enable = dflt_msg_enable;
3601 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3602
3603 spin_lock_init(&adapter->stats_lock);
3604 spin_lock_init(&adapter->tid_release_lock);
3605
3606 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3607
3608 err = t4_prep_adapter(adapter);
3609 if (err)
3610 goto out_unmap_bar;
3611 err = adap_init0(adapter);
3612 if (err)
3613 goto out_unmap_bar;
3614
3615 for_each_port(adapter, i) {
3616 struct net_device *netdev;
3617
3618 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3619 MAX_ETH_QSETS);
3620 if (!netdev) {
3621 err = -ENOMEM;
3622 goto out_free_dev;
3623 }
3624
3625 SET_NETDEV_DEV(netdev, &pdev->dev);
3626
3627 adapter->port[i] = netdev;
3628 pi = netdev_priv(netdev);
3629 pi->adapter = adapter;
3630 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003631 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003632 netdev->irq = pdev->irq;
3633
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00003634 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3635 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3636 NETIF_F_RXCSUM | NETIF_F_RXHASH |
3637 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3638 netdev->features |= netdev->hw_features | highdma;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003639 netdev->vlan_features = netdev->features & VLAN_FEAT;
3640
3641 netdev->netdev_ops = &cxgb4_netdev_ops;
3642 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3643 }
3644
3645 pci_set_drvdata(pdev, adapter);
3646
3647 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003648 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003649 if (err)
3650 goto out_free_dev;
3651 }
3652
3653 /*
3654 * Configure queues and allocate tables now, they can be needed as
3655 * soon as the first register_netdev completes.
3656 */
3657 cfg_queues(adapter);
3658
3659 adapter->l2t = t4_init_l2t();
3660 if (!adapter->l2t) {
3661 /* We tolerate a lack of L2T, giving up some functionality */
3662 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3663 adapter->params.offload = 0;
3664 }
3665
3666 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3667 dev_warn(&pdev->dev, "could not allocate TID table, "
3668 "continuing\n");
3669 adapter->params.offload = 0;
3670 }
3671
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00003672 /* See what interrupts we'll be using */
3673 if (msi > 1 && enable_msix(adapter) == 0)
3674 adapter->flags |= USING_MSIX;
3675 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3676 adapter->flags |= USING_MSI;
3677
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003678 err = init_rss(adapter);
3679 if (err)
3680 goto out_free_dev;
3681
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003682 /*
3683 * The card is now ready to go. If any errors occur during device
3684 * registration we do not fail the whole card but rather proceed only
3685 * with the ports we manage to register successfully. However we must
3686 * register at least one net device.
3687 */
3688 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00003689 pi = adap2pinfo(adapter, i);
3690 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
3691 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
3692
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003693 err = register_netdev(adapter->port[i]);
3694 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003695 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003696 adapter->chan_map[pi->tx_chan] = i;
3697 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003698 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003699 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003700 dev_err(&pdev->dev, "could not register any net devices\n");
3701 goto out_free_dev;
3702 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003703 if (err) {
3704 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
3705 err = 0;
3706 };
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003707
3708 if (cxgb4_debugfs_root) {
3709 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3710 cxgb4_debugfs_root);
3711 setup_debugfs(adapter);
3712 }
3713
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003714 if (is_offload(adapter))
3715 attach_ulds(adapter);
3716
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003717sriov:
3718#ifdef CONFIG_PCI_IOV
3719 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3720 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3721 dev_info(&pdev->dev,
3722 "instantiated %u virtual functions\n",
3723 num_vf[func]);
3724#endif
3725 return 0;
3726
3727 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00003728 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003729 out_unmap_bar:
3730 iounmap(adapter->regs);
3731 out_free_adapter:
3732 kfree(adapter);
3733 out_disable_device:
3734 pci_disable_pcie_error_reporting(pdev);
3735 pci_disable_device(pdev);
3736 out_release_regions:
3737 pci_release_regions(pdev);
3738 pci_set_drvdata(pdev, NULL);
3739 return err;
3740}
3741
3742static void __devexit remove_one(struct pci_dev *pdev)
3743{
3744 struct adapter *adapter = pci_get_drvdata(pdev);
3745
3746 pci_disable_sriov(pdev);
3747
3748 if (adapter) {
3749 int i;
3750
3751 if (is_offload(adapter))
3752 detach_ulds(adapter);
3753
3754 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00003755 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003756 unregister_netdev(adapter->port[i]);
3757
3758 if (adapter->debugfs_root)
3759 debugfs_remove_recursive(adapter->debugfs_root);
3760
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003761 if (adapter->flags & FULL_INIT_DONE)
3762 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003763
Dimitris Michailidis06546392010-07-11 12:01:16 +00003764 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003765 iounmap(adapter->regs);
3766 kfree(adapter);
3767 pci_disable_pcie_error_reporting(pdev);
3768 pci_disable_device(pdev);
3769 pci_release_regions(pdev);
3770 pci_set_drvdata(pdev, NULL);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00003771 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003772 pci_release_regions(pdev);
3773}
3774
3775static struct pci_driver cxgb4_driver = {
3776 .name = KBUILD_MODNAME,
3777 .id_table = cxgb4_pci_tbl,
3778 .probe = init_one,
3779 .remove = __devexit_p(remove_one),
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003780 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003781};
3782
3783static int __init cxgb4_init_module(void)
3784{
3785 int ret;
3786
3787 /* Debugfs support is optional, just warn if this fails */
3788 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3789 if (!cxgb4_debugfs_root)
3790 pr_warning("could not create debugfs entry, continuing\n");
3791
3792 ret = pci_register_driver(&cxgb4_driver);
3793 if (ret < 0)
3794 debugfs_remove(cxgb4_debugfs_root);
3795 return ret;
3796}
3797
3798static void __exit cxgb4_cleanup_module(void)
3799{
3800 pci_unregister_driver(&cxgb4_driver);
3801 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3802}
3803
3804module_init(cxgb4_init_module);
3805module_exit(cxgb4_cleanup_module);