blob: bdc868ca47ec4624a33b1a3f8ab6d3e3cdc3005c [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
Dimitris Michailidis99e6d062010-08-02 13:19:24 +000070#define DRV_VERSION "1.3.0-ko"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000071#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
Casey Leedom7ee9ff92010-06-25 12:11:46 +000080#ifdef CONFIG_PCI_IOV
81/*
82 * Virtual Function provisioning constants. We need two extra Ingress Queues
83 * with Interrupt capability to serve as the VF's Firmware Event Queue and
84 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
85 * Lists associated with them). For each Ethernet/Control Egress Queue and
86 * for each Free List, we need an Egress Context.
87 */
88enum {
89 VFRES_NPORTS = 1, /* # of "ports" per VF */
90 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
91
92 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
93 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
94 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
95 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
96 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
97 VFRES_TC = 0, /* PCI-E traffic class */
98 VFRES_NEXACTF = 16, /* # of exact MPS filters */
99
100 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
101 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
102};
103
104/*
105 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
106 * static and likely not to be useful in the long run. We really need to
107 * implement some form of persistent configuration which the firmware
108 * controls.
109 */
110static unsigned int pfvfres_pmask(struct adapter *adapter,
111 unsigned int pf, unsigned int vf)
112{
113 unsigned int portn, portvec;
114
115 /*
116 * Give PF's access to all of the ports.
117 */
118 if (vf == 0)
119 return FW_PFVF_CMD_PMASK_MASK;
120
121 /*
122 * For VFs, we'll assign them access to the ports based purely on the
123 * PF. We assign active ports in order, wrapping around if there are
124 * fewer active ports than PFs: e.g. active port[pf % nports].
125 * Unfortunately the adapter's port_info structs haven't been
126 * initialized yet so we have to compute this.
127 */
128 if (adapter->params.nports == 0)
129 return 0;
130
131 portn = pf % adapter->params.nports;
132 portvec = adapter->params.portvec;
133 for (;;) {
134 /*
135 * Isolate the lowest set bit in the port vector. If we're at
136 * the port number that we want, return that as the pmask.
137 * otherwise mask that bit out of the port vector and
138 * decrement our port number ...
139 */
140 unsigned int pmask = portvec ^ (portvec & (portvec-1));
141 if (portn == 0)
142 return pmask;
143 portn--;
144 portvec &= ~pmask;
145 }
146 /*NOTREACHED*/
147}
148#endif
149
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000150enum {
151 MEMWIN0_APERTURE = 65536,
152 MEMWIN0_BASE = 0x30000,
153 MEMWIN1_APERTURE = 32768,
154 MEMWIN1_BASE = 0x28000,
155 MEMWIN2_APERTURE = 2048,
156 MEMWIN2_BASE = 0x1b800,
157};
158
159enum {
160 MAX_TXQ_ENTRIES = 16384,
161 MAX_CTRL_TXQ_ENTRIES = 1024,
162 MAX_RSPQ_ENTRIES = 16384,
163 MAX_RX_BUFFERS = 16384,
164 MIN_TXQ_ENTRIES = 32,
165 MIN_CTRL_TXQ_ENTRIES = 32,
166 MIN_RSPQ_ENTRIES = 128,
167 MIN_FL_ENTRIES = 16
168};
169
170#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
171 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
172 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
173
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000174#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000177 CH_DEVICE(0xa000, 0), /* PE10K */
Dimitris Michailidisccea7902010-08-23 17:21:01 +0000178 CH_DEVICE(0x4001, -1),
179 CH_DEVICE(0x4002, -1),
180 CH_DEVICE(0x4003, -1),
181 CH_DEVICE(0x4004, -1),
182 CH_DEVICE(0x4005, -1),
183 CH_DEVICE(0x4006, -1),
184 CH_DEVICE(0x4007, -1),
185 CH_DEVICE(0x4008, -1),
186 CH_DEVICE(0x4009, -1),
187 CH_DEVICE(0x400a, -1),
188 CH_DEVICE(0x4401, 4),
189 CH_DEVICE(0x4402, 4),
190 CH_DEVICE(0x4403, 4),
191 CH_DEVICE(0x4404, 4),
192 CH_DEVICE(0x4405, 4),
193 CH_DEVICE(0x4406, 4),
194 CH_DEVICE(0x4407, 4),
195 CH_DEVICE(0x4408, 4),
196 CH_DEVICE(0x4409, 4),
197 CH_DEVICE(0x440a, 4),
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000198 { 0, }
199};
200
201#define FW_FNAME "cxgb4/t4fw.bin"
202
203MODULE_DESCRIPTION(DRV_DESC);
204MODULE_AUTHOR("Chelsio Communications");
205MODULE_LICENSE("Dual BSD/GPL");
206MODULE_VERSION(DRV_VERSION);
207MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
208MODULE_FIRMWARE(FW_FNAME);
209
210static int dflt_msg_enable = DFLT_MSG_ENABLE;
211
212module_param(dflt_msg_enable, int, 0644);
213MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
214
215/*
216 * The driver uses the best interrupt scheme available on a platform in the
217 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
218 * of these schemes the driver may consider as follows:
219 *
220 * msi = 2: choose from among all three options
221 * msi = 1: only consider MSI and INTx interrupts
222 * msi = 0: force INTx interrupts
223 */
224static int msi = 2;
225
226module_param(msi, int, 0644);
227MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
228
229/*
230 * Queue interrupt hold-off timer values. Queues default to the first of these
231 * upon creation.
232 */
233static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
234
235module_param_array(intr_holdoff, uint, NULL, 0644);
236MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
237 "0..4 in microseconds");
238
239static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
240
241module_param_array(intr_cnt, uint, NULL, 0644);
242MODULE_PARM_DESC(intr_cnt,
243 "thresholds 1..3 for queue interrupt packet counters");
244
245static int vf_acls;
246
247#ifdef CONFIG_PCI_IOV
248module_param(vf_acls, bool, 0644);
249MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
250
251static unsigned int num_vf[4];
252
253module_param_array(num_vf, uint, NULL, 0644);
254MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
255#endif
256
257static struct dentry *cxgb4_debugfs_root;
258
259static LIST_HEAD(adapter_list);
260static DEFINE_MUTEX(uld_mutex);
261static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
262static const char *uld_str[] = { "RDMA", "iSCSI" };
263
264static void link_report(struct net_device *dev)
265{
266 if (!netif_carrier_ok(dev))
267 netdev_info(dev, "link down\n");
268 else {
269 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
270
271 const char *s = "10Mbps";
272 const struct port_info *p = netdev_priv(dev);
273
274 switch (p->link_cfg.speed) {
275 case SPEED_10000:
276 s = "10Gbps";
277 break;
278 case SPEED_1000:
279 s = "1000Mbps";
280 break;
281 case SPEED_100:
282 s = "100Mbps";
283 break;
284 }
285
286 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
287 fc[p->link_cfg.fc]);
288 }
289}
290
291void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
292{
293 struct net_device *dev = adapter->port[port_id];
294
295 /* Skip changes from disabled ports. */
296 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
297 if (link_stat)
298 netif_carrier_on(dev);
299 else
300 netif_carrier_off(dev);
301
302 link_report(dev);
303 }
304}
305
306void t4_os_portmod_changed(const struct adapter *adap, int port_id)
307{
308 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000309 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000310 };
311
312 const struct net_device *dev = adap->port[port_id];
313 const struct port_info *pi = netdev_priv(dev);
314
315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000317 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319}
320
321/*
322 * Configure the exact and hash address filters to handle a port's multicast
323 * and secondary unicast MAC addresses.
324 */
325static int set_addr_filters(const struct net_device *dev, bool sleep)
326{
327 u64 mhash = 0;
328 u64 uhash = 0;
329 bool free = true;
330 u16 filt_idx[7];
331 const u8 *addr[7];
332 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000333 const struct netdev_hw_addr *ha;
334 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700335 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000336 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000337 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000338
339 /* first do the secondary unicast addresses */
340 netdev_for_each_uc_addr(ha, dev) {
341 addr[naddr++] = ha->addr;
342 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000343 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000344 naddr, addr, filt_idx, &uhash, sleep);
345 if (ret < 0)
346 return ret;
347
348 free = false;
349 naddr = 0;
350 }
351 }
352
353 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700354 netdev_for_each_mc_addr(ha, dev) {
355 addr[naddr++] = ha->addr;
356 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000357 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000358 naddr, addr, filt_idx, &mhash, sleep);
359 if (ret < 0)
360 return ret;
361
362 free = false;
363 naddr = 0;
364 }
365 }
366
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000367 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000368 uhash | mhash, sleep);
369}
370
371/*
372 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
373 * If @mtu is -1 it is left unchanged.
374 */
375static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
376{
377 int ret;
378 struct port_info *pi = netdev_priv(dev);
379
380 ret = set_addr_filters(dev, sleep_ok);
381 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000382 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000383 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000384 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000385 sleep_ok);
386 return ret;
387}
388
389/**
390 * link_start - enable a port
391 * @dev: the port to enable
392 *
393 * Performs the MAC and PHY actions needed to enable a port.
394 */
395static int link_start(struct net_device *dev)
396{
397 int ret;
398 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000399 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000400
401 /*
402 * We do not set address filters and promiscuity here, the stack does
403 * that step explicitly.
404 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000405 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Dimitris Michailidis19ecae22010-10-21 11:29:56 +0000406 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000407 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000408 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000409 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000410 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000411 if (ret >= 0) {
412 pi->xact_addr_filt = ret;
413 ret = 0;
414 }
415 }
416 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000417 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
418 &pi->link_cfg);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000419 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000420 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000421 return ret;
422}
423
424/*
425 * Response queue handler for the FW event queue.
426 */
427static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
428 const struct pkt_gl *gl)
429{
430 u8 opcode = ((const struct rss_header *)rsp)->opcode;
431
432 rsp++; /* skip RSS header */
433 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
434 const struct cpl_sge_egr_update *p = (void *)rsp;
435 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000436 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000437
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000438 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000439 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000440 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000441 struct sge_eth_txq *eq;
442
443 eq = container_of(txq, struct sge_eth_txq, q);
444 netif_tx_wake_queue(eq->txq);
445 } else {
446 struct sge_ofld_txq *oq;
447
448 oq = container_of(txq, struct sge_ofld_txq, q);
449 tasklet_schedule(&oq->qresume_tsk);
450 }
451 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
452 const struct cpl_fw6_msg *p = (void *)rsp;
453
454 if (p->type == 0)
455 t4_handle_fw_rpl(q->adap, p->data);
456 } else if (opcode == CPL_L2T_WRITE_RPL) {
457 const struct cpl_l2t_write_rpl *p = (void *)rsp;
458
459 do_l2t_write_rpl(q->adap, p);
460 } else
461 dev_err(q->adap->pdev_dev,
462 "unexpected CPL %#x on FW event queue\n", opcode);
463 return 0;
464}
465
466/**
467 * uldrx_handler - response queue handler for ULD queues
468 * @q: the response queue that received the packet
469 * @rsp: the response queue descriptor holding the offload message
470 * @gl: the gather list of packet fragments
471 *
472 * Deliver an ingress offload packet to a ULD. All processing is done by
473 * the ULD, we just maintain statistics.
474 */
475static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
476 const struct pkt_gl *gl)
477{
478 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
479
480 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
481 rxq->stats.nomem++;
482 return -1;
483 }
484 if (gl == NULL)
485 rxq->stats.imm++;
486 else if (gl == CXGB4_MSG_AN)
487 rxq->stats.an++;
488 else
489 rxq->stats.pkts++;
490 return 0;
491}
492
493static void disable_msi(struct adapter *adapter)
494{
495 if (adapter->flags & USING_MSIX) {
496 pci_disable_msix(adapter->pdev);
497 adapter->flags &= ~USING_MSIX;
498 } else if (adapter->flags & USING_MSI) {
499 pci_disable_msi(adapter->pdev);
500 adapter->flags &= ~USING_MSI;
501 }
502}
503
504/*
505 * Interrupt handler for non-data events used with MSI-X.
506 */
507static irqreturn_t t4_nondata_intr(int irq, void *cookie)
508{
509 struct adapter *adap = cookie;
510
511 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
512 if (v & PFSW) {
513 adap->swintr = 1;
514 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
515 }
516 t4_slow_intr_handler(adap);
517 return IRQ_HANDLED;
518}
519
520/*
521 * Name the MSI-X interrupts.
522 */
523static void name_msix_vecs(struct adapter *adap)
524{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000525 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000526
527 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000528 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000529
530 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000531 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
532 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000533
534 /* Ethernet queues */
535 for_each_port(adap, j) {
536 struct net_device *d = adap->port[j];
537 const struct port_info *pi = netdev_priv(d);
538
Dimitris Michailidisba278162010-12-14 21:36:50 +0000539 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000540 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
541 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000542 }
543
544 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000545 for_each_ofldrxq(&adap->sge, i)
546 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000547 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000548
549 for_each_rdmarxq(&adap->sge, i)
550 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000551 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000552}
553
554static int request_msix_queue_irqs(struct adapter *adap)
555{
556 struct sge *s = &adap->sge;
557 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
558
559 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
560 adap->msix_info[1].desc, &s->fw_evtq);
561 if (err)
562 return err;
563
564 for_each_ethrxq(s, ethqidx) {
565 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
566 adap->msix_info[msi].desc,
567 &s->ethrxq[ethqidx].rspq);
568 if (err)
569 goto unwind;
570 msi++;
571 }
572 for_each_ofldrxq(s, ofldqidx) {
573 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
574 adap->msix_info[msi].desc,
575 &s->ofldrxq[ofldqidx].rspq);
576 if (err)
577 goto unwind;
578 msi++;
579 }
580 for_each_rdmarxq(s, rdmaqidx) {
581 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
582 adap->msix_info[msi].desc,
583 &s->rdmarxq[rdmaqidx].rspq);
584 if (err)
585 goto unwind;
586 msi++;
587 }
588 return 0;
589
590unwind:
591 while (--rdmaqidx >= 0)
592 free_irq(adap->msix_info[--msi].vec,
593 &s->rdmarxq[rdmaqidx].rspq);
594 while (--ofldqidx >= 0)
595 free_irq(adap->msix_info[--msi].vec,
596 &s->ofldrxq[ofldqidx].rspq);
597 while (--ethqidx >= 0)
598 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
599 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
600 return err;
601}
602
603static void free_msix_queue_irqs(struct adapter *adap)
604{
605 int i, msi = 2;
606 struct sge *s = &adap->sge;
607
608 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
609 for_each_ethrxq(s, i)
610 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
611 for_each_ofldrxq(s, i)
612 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
613 for_each_rdmarxq(s, i)
614 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
615}
616
617/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000618 * write_rss - write the RSS table for a given port
619 * @pi: the port
620 * @queues: array of queue indices for RSS
621 *
622 * Sets up the portion of the HW RSS table for the port's VI to distribute
623 * packets to the Rx queues in @queues.
624 */
625static int write_rss(const struct port_info *pi, const u16 *queues)
626{
627 u16 *rss;
628 int i, err;
629 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
630
631 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
632 if (!rss)
633 return -ENOMEM;
634
635 /* map the queue indices to queue ids */
636 for (i = 0; i < pi->rss_size; i++, queues++)
637 rss[i] = q[*queues].rspq.abs_id;
638
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000639 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
640 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000641 kfree(rss);
642 return err;
643}
644
645/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000646 * setup_rss - configure RSS
647 * @adap: the adapter
648 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000649 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000650 */
651static int setup_rss(struct adapter *adap)
652{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000653 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000654
655 for_each_port(adap, i) {
656 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000657
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000658 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000659 if (err)
660 return err;
661 }
662 return 0;
663}
664
665/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000666 * Return the channel of the ingress queue with the given qid.
667 */
668static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
669{
670 qid -= p->ingr_start;
671 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
672}
673
674/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000675 * Wait until all NAPI handlers are descheduled.
676 */
677static void quiesce_rx(struct adapter *adap)
678{
679 int i;
680
681 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
682 struct sge_rspq *q = adap->sge.ingr_map[i];
683
684 if (q && q->handler)
685 napi_disable(&q->napi);
686 }
687}
688
689/*
690 * Enable NAPI scheduling and interrupt generation for all Rx queues.
691 */
692static void enable_rx(struct adapter *adap)
693{
694 int i;
695
696 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
697 struct sge_rspq *q = adap->sge.ingr_map[i];
698
699 if (!q)
700 continue;
701 if (q->handler)
702 napi_enable(&q->napi);
703 /* 0-increment GTS to start the timer and enable interrupts */
704 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
705 SEINTARM(q->intr_params) |
706 INGRESSQID(q->cntxt_id));
707 }
708}
709
710/**
711 * setup_sge_queues - configure SGE Tx/Rx/response queues
712 * @adap: the adapter
713 *
714 * Determines how many sets of SGE queues to use and initializes them.
715 * We support multiple queue sets per port if we have MSI-X, otherwise
716 * just one queue set per port.
717 */
718static int setup_sge_queues(struct adapter *adap)
719{
720 int err, msi_idx, i, j;
721 struct sge *s = &adap->sge;
722
723 bitmap_zero(s->starving_fl, MAX_EGRQ);
724 bitmap_zero(s->txq_maperr, MAX_EGRQ);
725
726 if (adap->flags & USING_MSIX)
727 msi_idx = 1; /* vector 0 is for non-queue interrupts */
728 else {
729 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
730 NULL, NULL);
731 if (err)
732 return err;
733 msi_idx = -((int)s->intrq.abs_id + 1);
734 }
735
736 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
737 msi_idx, NULL, fwevtq_handler);
738 if (err) {
739freeout: t4_free_sge_resources(adap);
740 return err;
741 }
742
743 for_each_port(adap, i) {
744 struct net_device *dev = adap->port[i];
745 struct port_info *pi = netdev_priv(dev);
746 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
747 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
748
749 for (j = 0; j < pi->nqsets; j++, q++) {
750 if (msi_idx > 0)
751 msi_idx++;
752 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
753 msi_idx, &q->fl,
754 t4_ethrx_handler);
755 if (err)
756 goto freeout;
757 q->rspq.idx = j;
758 memset(&q->stats, 0, sizeof(q->stats));
759 }
760 for (j = 0; j < pi->nqsets; j++, t++) {
761 err = t4_sge_alloc_eth_txq(adap, t, dev,
762 netdev_get_tx_queue(dev, j),
763 s->fw_evtq.cntxt_id);
764 if (err)
765 goto freeout;
766 }
767 }
768
769 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
770 for_each_ofldrxq(s, i) {
771 struct sge_ofld_rxq *q = &s->ofldrxq[i];
772 struct net_device *dev = adap->port[i / j];
773
774 if (msi_idx > 0)
775 msi_idx++;
776 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
777 &q->fl, uldrx_handler);
778 if (err)
779 goto freeout;
780 memset(&q->stats, 0, sizeof(q->stats));
781 s->ofld_rxq[i] = q->rspq.abs_id;
782 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
783 s->fw_evtq.cntxt_id);
784 if (err)
785 goto freeout;
786 }
787
788 for_each_rdmarxq(s, i) {
789 struct sge_ofld_rxq *q = &s->rdmarxq[i];
790
791 if (msi_idx > 0)
792 msi_idx++;
793 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
794 msi_idx, &q->fl, uldrx_handler);
795 if (err)
796 goto freeout;
797 memset(&q->stats, 0, sizeof(q->stats));
798 s->rdma_rxq[i] = q->rspq.abs_id;
799 }
800
801 for_each_port(adap, i) {
802 /*
803 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
804 * have RDMA queues, and that's the right value.
805 */
806 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
807 s->fw_evtq.cntxt_id,
808 s->rdmarxq[i].rspq.cntxt_id);
809 if (err)
810 goto freeout;
811 }
812
813 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
814 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
815 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
816 return 0;
817}
818
819/*
820 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
821 * started but failed, and a negative errno if flash load couldn't start.
822 */
823static int upgrade_fw(struct adapter *adap)
824{
825 int ret;
826 u32 vers;
827 const struct fw_hdr *hdr;
828 const struct firmware *fw;
829 struct device *dev = adap->pdev_dev;
830
831 ret = request_firmware(&fw, FW_FNAME, dev);
832 if (ret < 0) {
833 dev_err(dev, "unable to load firmware image " FW_FNAME
834 ", error %d\n", ret);
835 return ret;
836 }
837
838 hdr = (const struct fw_hdr *)fw->data;
839 vers = ntohl(hdr->fw_ver);
840 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
841 ret = -EINVAL; /* wrong major version, won't do */
842 goto out;
843 }
844
845 /*
846 * If the flash FW is unusable or we found something newer, load it.
847 */
848 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
849 vers > adap->params.fw_vers) {
850 ret = -t4_load_fw(adap, fw->data, fw->size);
851 if (!ret)
852 dev_info(dev, "firmware upgraded to version %pI4 from "
853 FW_FNAME "\n", &hdr->fw_ver);
854 }
855out: release_firmware(fw);
856 return ret;
857}
858
859/*
860 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
861 * The allocated memory is cleared.
862 */
863void *t4_alloc_mem(size_t size)
864{
Eric Dumazet89bf67f2010-11-22 00:15:06 +0000865 void *p = kzalloc(size, GFP_KERNEL);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000866
867 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +0000868 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000869 return p;
870}
871
872/*
873 * Free memory allocated through alloc_mem().
874 */
stephen hemminger31b9c192010-10-18 05:39:18 +0000875static void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000876{
877 if (is_vmalloc_addr(addr))
878 vfree(addr);
879 else
880 kfree(addr);
881}
882
883static inline int is_offload(const struct adapter *adap)
884{
885 return adap->params.offload;
886}
887
888/*
889 * Implementation of ethtool operations.
890 */
891
892static u32 get_msglevel(struct net_device *dev)
893{
894 return netdev2adap(dev)->msg_enable;
895}
896
897static void set_msglevel(struct net_device *dev, u32 val)
898{
899 netdev2adap(dev)->msg_enable = val;
900}
901
902static char stats_strings[][ETH_GSTRING_LEN] = {
903 "TxOctetsOK ",
904 "TxFramesOK ",
905 "TxBroadcastFrames ",
906 "TxMulticastFrames ",
907 "TxUnicastFrames ",
908 "TxErrorFrames ",
909
910 "TxFrames64 ",
911 "TxFrames65To127 ",
912 "TxFrames128To255 ",
913 "TxFrames256To511 ",
914 "TxFrames512To1023 ",
915 "TxFrames1024To1518 ",
916 "TxFrames1519ToMax ",
917
918 "TxFramesDropped ",
919 "TxPauseFrames ",
920 "TxPPP0Frames ",
921 "TxPPP1Frames ",
922 "TxPPP2Frames ",
923 "TxPPP3Frames ",
924 "TxPPP4Frames ",
925 "TxPPP5Frames ",
926 "TxPPP6Frames ",
927 "TxPPP7Frames ",
928
929 "RxOctetsOK ",
930 "RxFramesOK ",
931 "RxBroadcastFrames ",
932 "RxMulticastFrames ",
933 "RxUnicastFrames ",
934
935 "RxFramesTooLong ",
936 "RxJabberErrors ",
937 "RxFCSErrors ",
938 "RxLengthErrors ",
939 "RxSymbolErrors ",
940 "RxRuntFrames ",
941
942 "RxFrames64 ",
943 "RxFrames65To127 ",
944 "RxFrames128To255 ",
945 "RxFrames256To511 ",
946 "RxFrames512To1023 ",
947 "RxFrames1024To1518 ",
948 "RxFrames1519ToMax ",
949
950 "RxPauseFrames ",
951 "RxPPP0Frames ",
952 "RxPPP1Frames ",
953 "RxPPP2Frames ",
954 "RxPPP3Frames ",
955 "RxPPP4Frames ",
956 "RxPPP5Frames ",
957 "RxPPP6Frames ",
958 "RxPPP7Frames ",
959
960 "RxBG0FramesDropped ",
961 "RxBG1FramesDropped ",
962 "RxBG2FramesDropped ",
963 "RxBG3FramesDropped ",
964 "RxBG0FramesTrunc ",
965 "RxBG1FramesTrunc ",
966 "RxBG2FramesTrunc ",
967 "RxBG3FramesTrunc ",
968
969 "TSO ",
970 "TxCsumOffload ",
971 "RxCsumGood ",
972 "VLANextractions ",
973 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000974 "GROpackets ",
975 "GROmerged ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000976};
977
978static int get_sset_count(struct net_device *dev, int sset)
979{
980 switch (sset) {
981 case ETH_SS_STATS:
982 return ARRAY_SIZE(stats_strings);
983 default:
984 return -EOPNOTSUPP;
985 }
986}
987
988#define T4_REGMAP_SIZE (160 * 1024)
989
990static int get_regs_len(struct net_device *dev)
991{
992 return T4_REGMAP_SIZE;
993}
994
995static int get_eeprom_len(struct net_device *dev)
996{
997 return EEPROMSIZE;
998}
999
1000static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1001{
1002 struct adapter *adapter = netdev2adap(dev);
1003
1004 strcpy(info->driver, KBUILD_MODNAME);
1005 strcpy(info->version, DRV_VERSION);
1006 strcpy(info->bus_info, pci_name(adapter->pdev));
1007
1008 if (!adapter->params.fw_vers)
1009 strcpy(info->fw_version, "N/A");
1010 else
1011 snprintf(info->fw_version, sizeof(info->fw_version),
1012 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1013 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1014 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1015 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1016 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1017 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1018 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1019 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1020 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1021}
1022
1023static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1024{
1025 if (stringset == ETH_SS_STATS)
1026 memcpy(data, stats_strings, sizeof(stats_strings));
1027}
1028
1029/*
1030 * port stats maintained per queue of the port. They should be in the same
1031 * order as in stats_strings above.
1032 */
1033struct queue_port_stats {
1034 u64 tso;
1035 u64 tx_csum;
1036 u64 rx_csum;
1037 u64 vlan_ex;
1038 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001039 u64 gro_pkts;
1040 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001041};
1042
1043static void collect_sge_port_stats(const struct adapter *adap,
1044 const struct port_info *p, struct queue_port_stats *s)
1045{
1046 int i;
1047 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1048 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1049
1050 memset(s, 0, sizeof(*s));
1051 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1052 s->tso += tx->tso;
1053 s->tx_csum += tx->tx_cso;
1054 s->rx_csum += rx->stats.rx_cso;
1055 s->vlan_ex += rx->stats.vlan_ex;
1056 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001057 s->gro_pkts += rx->stats.lro_pkts;
1058 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001059 }
1060}
1061
1062static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1063 u64 *data)
1064{
1065 struct port_info *pi = netdev_priv(dev);
1066 struct adapter *adapter = pi->adapter;
1067
1068 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1069
1070 data += sizeof(struct port_stats) / sizeof(u64);
1071 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1072}
1073
1074/*
1075 * Return a version number to identify the type of adapter. The scheme is:
1076 * - bits 0..9: chip version
1077 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001078 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001079 */
1080static inline unsigned int mk_adap_vers(const struct adapter *ap)
1081{
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001082 return 4 | (ap->params.rev << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001083}
1084
1085static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1086 unsigned int end)
1087{
1088 u32 *p = buf + start;
1089
1090 for ( ; start <= end; start += sizeof(u32))
1091 *p++ = t4_read_reg(ap, start);
1092}
1093
1094static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1095 void *buf)
1096{
1097 static const unsigned int reg_ranges[] = {
1098 0x1008, 0x1108,
1099 0x1180, 0x11b4,
1100 0x11fc, 0x123c,
1101 0x1300, 0x173c,
1102 0x1800, 0x18fc,
1103 0x3000, 0x30d8,
1104 0x30e0, 0x5924,
1105 0x5960, 0x59d4,
1106 0x5a00, 0x5af8,
1107 0x6000, 0x6098,
1108 0x6100, 0x6150,
1109 0x6200, 0x6208,
1110 0x6240, 0x6248,
1111 0x6280, 0x6338,
1112 0x6370, 0x638c,
1113 0x6400, 0x643c,
1114 0x6500, 0x6524,
1115 0x6a00, 0x6a38,
1116 0x6a60, 0x6a78,
1117 0x6b00, 0x6b84,
1118 0x6bf0, 0x6c84,
1119 0x6cf0, 0x6d84,
1120 0x6df0, 0x6e84,
1121 0x6ef0, 0x6f84,
1122 0x6ff0, 0x7084,
1123 0x70f0, 0x7184,
1124 0x71f0, 0x7284,
1125 0x72f0, 0x7384,
1126 0x73f0, 0x7450,
1127 0x7500, 0x7530,
1128 0x7600, 0x761c,
1129 0x7680, 0x76cc,
1130 0x7700, 0x7798,
1131 0x77c0, 0x77fc,
1132 0x7900, 0x79fc,
1133 0x7b00, 0x7c38,
1134 0x7d00, 0x7efc,
1135 0x8dc0, 0x8e1c,
1136 0x8e30, 0x8e78,
1137 0x8ea0, 0x8f6c,
1138 0x8fc0, 0x9074,
1139 0x90fc, 0x90fc,
1140 0x9400, 0x9458,
1141 0x9600, 0x96bc,
1142 0x9800, 0x9808,
1143 0x9820, 0x983c,
1144 0x9850, 0x9864,
1145 0x9c00, 0x9c6c,
1146 0x9c80, 0x9cec,
1147 0x9d00, 0x9d6c,
1148 0x9d80, 0x9dec,
1149 0x9e00, 0x9e6c,
1150 0x9e80, 0x9eec,
1151 0x9f00, 0x9f6c,
1152 0x9f80, 0x9fec,
1153 0xd004, 0xd03c,
1154 0xdfc0, 0xdfe0,
1155 0xe000, 0xea7c,
1156 0xf000, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001157 0x19040, 0x1906c,
1158 0x19078, 0x19080,
1159 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001160 0x19150, 0x191b0,
1161 0x191d0, 0x191e8,
1162 0x19238, 0x1924c,
1163 0x193f8, 0x19474,
1164 0x19490, 0x194f8,
1165 0x19800, 0x19f30,
1166 0x1a000, 0x1a06c,
1167 0x1a0b0, 0x1a120,
1168 0x1a128, 0x1a138,
1169 0x1a190, 0x1a1c4,
1170 0x1a1fc, 0x1a1fc,
1171 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001172 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001173 0x1e2c0, 0x1e2c0,
1174 0x1e2e0, 0x1e2e0,
1175 0x1e300, 0x1e384,
1176 0x1e3c0, 0x1e3c8,
1177 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001178 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001179 0x1e6c0, 0x1e6c0,
1180 0x1e6e0, 0x1e6e0,
1181 0x1e700, 0x1e784,
1182 0x1e7c0, 0x1e7c8,
1183 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001184 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001185 0x1eac0, 0x1eac0,
1186 0x1eae0, 0x1eae0,
1187 0x1eb00, 0x1eb84,
1188 0x1ebc0, 0x1ebc8,
1189 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001190 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001191 0x1eec0, 0x1eec0,
1192 0x1eee0, 0x1eee0,
1193 0x1ef00, 0x1ef84,
1194 0x1efc0, 0x1efc8,
1195 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001196 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001197 0x1f2c0, 0x1f2c0,
1198 0x1f2e0, 0x1f2e0,
1199 0x1f300, 0x1f384,
1200 0x1f3c0, 0x1f3c8,
1201 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001202 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001203 0x1f6c0, 0x1f6c0,
1204 0x1f6e0, 0x1f6e0,
1205 0x1f700, 0x1f784,
1206 0x1f7c0, 0x1f7c8,
1207 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001208 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001209 0x1fac0, 0x1fac0,
1210 0x1fae0, 0x1fae0,
1211 0x1fb00, 0x1fb84,
1212 0x1fbc0, 0x1fbc8,
1213 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001214 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001215 0x1fec0, 0x1fec0,
1216 0x1fee0, 0x1fee0,
1217 0x1ff00, 0x1ff84,
1218 0x1ffc0, 0x1ffc8,
1219 0x20000, 0x2002c,
1220 0x20100, 0x2013c,
1221 0x20190, 0x201c8,
1222 0x20200, 0x20318,
1223 0x20400, 0x20528,
1224 0x20540, 0x20614,
1225 0x21000, 0x21040,
1226 0x2104c, 0x21060,
1227 0x210c0, 0x210ec,
1228 0x21200, 0x21268,
1229 0x21270, 0x21284,
1230 0x212fc, 0x21388,
1231 0x21400, 0x21404,
1232 0x21500, 0x21518,
1233 0x2152c, 0x2153c,
1234 0x21550, 0x21554,
1235 0x21600, 0x21600,
1236 0x21608, 0x21628,
1237 0x21630, 0x2163c,
1238 0x21700, 0x2171c,
1239 0x21780, 0x2178c,
1240 0x21800, 0x21c38,
1241 0x21c80, 0x21d7c,
1242 0x21e00, 0x21e04,
1243 0x22000, 0x2202c,
1244 0x22100, 0x2213c,
1245 0x22190, 0x221c8,
1246 0x22200, 0x22318,
1247 0x22400, 0x22528,
1248 0x22540, 0x22614,
1249 0x23000, 0x23040,
1250 0x2304c, 0x23060,
1251 0x230c0, 0x230ec,
1252 0x23200, 0x23268,
1253 0x23270, 0x23284,
1254 0x232fc, 0x23388,
1255 0x23400, 0x23404,
1256 0x23500, 0x23518,
1257 0x2352c, 0x2353c,
1258 0x23550, 0x23554,
1259 0x23600, 0x23600,
1260 0x23608, 0x23628,
1261 0x23630, 0x2363c,
1262 0x23700, 0x2371c,
1263 0x23780, 0x2378c,
1264 0x23800, 0x23c38,
1265 0x23c80, 0x23d7c,
1266 0x23e00, 0x23e04,
1267 0x24000, 0x2402c,
1268 0x24100, 0x2413c,
1269 0x24190, 0x241c8,
1270 0x24200, 0x24318,
1271 0x24400, 0x24528,
1272 0x24540, 0x24614,
1273 0x25000, 0x25040,
1274 0x2504c, 0x25060,
1275 0x250c0, 0x250ec,
1276 0x25200, 0x25268,
1277 0x25270, 0x25284,
1278 0x252fc, 0x25388,
1279 0x25400, 0x25404,
1280 0x25500, 0x25518,
1281 0x2552c, 0x2553c,
1282 0x25550, 0x25554,
1283 0x25600, 0x25600,
1284 0x25608, 0x25628,
1285 0x25630, 0x2563c,
1286 0x25700, 0x2571c,
1287 0x25780, 0x2578c,
1288 0x25800, 0x25c38,
1289 0x25c80, 0x25d7c,
1290 0x25e00, 0x25e04,
1291 0x26000, 0x2602c,
1292 0x26100, 0x2613c,
1293 0x26190, 0x261c8,
1294 0x26200, 0x26318,
1295 0x26400, 0x26528,
1296 0x26540, 0x26614,
1297 0x27000, 0x27040,
1298 0x2704c, 0x27060,
1299 0x270c0, 0x270ec,
1300 0x27200, 0x27268,
1301 0x27270, 0x27284,
1302 0x272fc, 0x27388,
1303 0x27400, 0x27404,
1304 0x27500, 0x27518,
1305 0x2752c, 0x2753c,
1306 0x27550, 0x27554,
1307 0x27600, 0x27600,
1308 0x27608, 0x27628,
1309 0x27630, 0x2763c,
1310 0x27700, 0x2771c,
1311 0x27780, 0x2778c,
1312 0x27800, 0x27c38,
1313 0x27c80, 0x27d7c,
1314 0x27e00, 0x27e04
1315 };
1316
1317 int i;
1318 struct adapter *ap = netdev2adap(dev);
1319
1320 regs->version = mk_adap_vers(ap);
1321
1322 memset(buf, 0, T4_REGMAP_SIZE);
1323 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1324 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1325}
1326
1327static int restart_autoneg(struct net_device *dev)
1328{
1329 struct port_info *p = netdev_priv(dev);
1330
1331 if (!netif_running(dev))
1332 return -EAGAIN;
1333 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1334 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001335 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001336 return 0;
1337}
1338
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001339static int identify_port(struct net_device *dev,
1340 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001341{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001342 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001343 struct adapter *adap = netdev2adap(dev);
1344
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001345 if (state == ETHTOOL_ID_ACTIVE)
1346 val = 0xffff;
1347 else if (state == ETHTOOL_ID_INACTIVE)
1348 val = 0;
1349 else
1350 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001351
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001352 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001353}
1354
1355static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1356{
1357 unsigned int v = 0;
1358
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001359 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1360 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001361 v |= SUPPORTED_TP;
1362 if (caps & FW_PORT_CAP_SPEED_100M)
1363 v |= SUPPORTED_100baseT_Full;
1364 if (caps & FW_PORT_CAP_SPEED_1G)
1365 v |= SUPPORTED_1000baseT_Full;
1366 if (caps & FW_PORT_CAP_SPEED_10G)
1367 v |= SUPPORTED_10000baseT_Full;
1368 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1369 v |= SUPPORTED_Backplane;
1370 if (caps & FW_PORT_CAP_SPEED_1G)
1371 v |= SUPPORTED_1000baseKX_Full;
1372 if (caps & FW_PORT_CAP_SPEED_10G)
1373 v |= SUPPORTED_10000baseKX4_Full;
1374 } else if (type == FW_PORT_TYPE_KR)
1375 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001376 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00001377 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1378 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1379 else if (type == FW_PORT_TYPE_BP4_AP)
1380 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1381 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1382 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001383 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1384 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001385 v |= SUPPORTED_FIBRE;
1386
1387 if (caps & FW_PORT_CAP_ANEG)
1388 v |= SUPPORTED_Autoneg;
1389 return v;
1390}
1391
1392static unsigned int to_fw_linkcaps(unsigned int caps)
1393{
1394 unsigned int v = 0;
1395
1396 if (caps & ADVERTISED_100baseT_Full)
1397 v |= FW_PORT_CAP_SPEED_100M;
1398 if (caps & ADVERTISED_1000baseT_Full)
1399 v |= FW_PORT_CAP_SPEED_1G;
1400 if (caps & ADVERTISED_10000baseT_Full)
1401 v |= FW_PORT_CAP_SPEED_10G;
1402 return v;
1403}
1404
1405static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1406{
1407 const struct port_info *p = netdev_priv(dev);
1408
1409 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001410 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001411 p->port_type == FW_PORT_TYPE_BT_XAUI)
1412 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001413 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1414 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001415 cmd->port = PORT_FIBRE;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001416 else if (p->port_type == FW_PORT_TYPE_SFP) {
1417 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1418 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1419 cmd->port = PORT_DA;
1420 else
1421 cmd->port = PORT_FIBRE;
1422 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001423 cmd->port = PORT_OTHER;
1424
1425 if (p->mdio_addr >= 0) {
1426 cmd->phy_address = p->mdio_addr;
1427 cmd->transceiver = XCVR_EXTERNAL;
1428 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1429 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1430 } else {
1431 cmd->phy_address = 0; /* not really, but no better option */
1432 cmd->transceiver = XCVR_INTERNAL;
1433 cmd->mdio_support = 0;
1434 }
1435
1436 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1437 cmd->advertising = from_fw_linkcaps(p->port_type,
1438 p->link_cfg.advertising);
1439 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1440 cmd->duplex = DUPLEX_FULL;
1441 cmd->autoneg = p->link_cfg.autoneg;
1442 cmd->maxtxpkt = 0;
1443 cmd->maxrxpkt = 0;
1444 return 0;
1445}
1446
1447static unsigned int speed_to_caps(int speed)
1448{
1449 if (speed == SPEED_100)
1450 return FW_PORT_CAP_SPEED_100M;
1451 if (speed == SPEED_1000)
1452 return FW_PORT_CAP_SPEED_1G;
1453 if (speed == SPEED_10000)
1454 return FW_PORT_CAP_SPEED_10G;
1455 return 0;
1456}
1457
1458static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1459{
1460 unsigned int cap;
1461 struct port_info *p = netdev_priv(dev);
1462 struct link_config *lc = &p->link_cfg;
1463
1464 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1465 return -EINVAL;
1466
1467 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1468 /*
1469 * PHY offers a single speed. See if that's what's
1470 * being requested.
1471 */
1472 if (cmd->autoneg == AUTONEG_DISABLE &&
1473 (lc->supported & speed_to_caps(cmd->speed)))
1474 return 0;
1475 return -EINVAL;
1476 }
1477
1478 if (cmd->autoneg == AUTONEG_DISABLE) {
1479 cap = speed_to_caps(cmd->speed);
1480
1481 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1482 cmd->speed == SPEED_10000)
1483 return -EINVAL;
1484 lc->requested_speed = cap;
1485 lc->advertising = 0;
1486 } else {
1487 cap = to_fw_linkcaps(cmd->advertising);
1488 if (!(lc->supported & cap))
1489 return -EINVAL;
1490 lc->requested_speed = 0;
1491 lc->advertising = cap | FW_PORT_CAP_ANEG;
1492 }
1493 lc->autoneg = cmd->autoneg;
1494
1495 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001496 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1497 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001498 return 0;
1499}
1500
1501static void get_pauseparam(struct net_device *dev,
1502 struct ethtool_pauseparam *epause)
1503{
1504 struct port_info *p = netdev_priv(dev);
1505
1506 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1507 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1508 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1509}
1510
1511static int set_pauseparam(struct net_device *dev,
1512 struct ethtool_pauseparam *epause)
1513{
1514 struct port_info *p = netdev_priv(dev);
1515 struct link_config *lc = &p->link_cfg;
1516
1517 if (epause->autoneg == AUTONEG_DISABLE)
1518 lc->requested_fc = 0;
1519 else if (lc->supported & FW_PORT_CAP_ANEG)
1520 lc->requested_fc = PAUSE_AUTONEG;
1521 else
1522 return -EINVAL;
1523
1524 if (epause->rx_pause)
1525 lc->requested_fc |= PAUSE_RX;
1526 if (epause->tx_pause)
1527 lc->requested_fc |= PAUSE_TX;
1528 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001529 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1530 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001531 return 0;
1532}
1533
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001534static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1535{
1536 const struct port_info *pi = netdev_priv(dev);
1537 const struct sge *s = &pi->adapter->sge;
1538
1539 e->rx_max_pending = MAX_RX_BUFFERS;
1540 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1541 e->rx_jumbo_max_pending = 0;
1542 e->tx_max_pending = MAX_TXQ_ENTRIES;
1543
1544 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1545 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1546 e->rx_jumbo_pending = 0;
1547 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1548}
1549
1550static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1551{
1552 int i;
1553 const struct port_info *pi = netdev_priv(dev);
1554 struct adapter *adapter = pi->adapter;
1555 struct sge *s = &adapter->sge;
1556
1557 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1558 e->tx_pending > MAX_TXQ_ENTRIES ||
1559 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1560 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1561 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1562 return -EINVAL;
1563
1564 if (adapter->flags & FULL_INIT_DONE)
1565 return -EBUSY;
1566
1567 for (i = 0; i < pi->nqsets; ++i) {
1568 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1569 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1570 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1571 }
1572 return 0;
1573}
1574
1575static int closest_timer(const struct sge *s, int time)
1576{
1577 int i, delta, match = 0, min_delta = INT_MAX;
1578
1579 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1580 delta = time - s->timer_val[i];
1581 if (delta < 0)
1582 delta = -delta;
1583 if (delta < min_delta) {
1584 min_delta = delta;
1585 match = i;
1586 }
1587 }
1588 return match;
1589}
1590
1591static int closest_thres(const struct sge *s, int thres)
1592{
1593 int i, delta, match = 0, min_delta = INT_MAX;
1594
1595 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1596 delta = thres - s->counter_val[i];
1597 if (delta < 0)
1598 delta = -delta;
1599 if (delta < min_delta) {
1600 min_delta = delta;
1601 match = i;
1602 }
1603 }
1604 return match;
1605}
1606
1607/*
1608 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1609 */
1610static unsigned int qtimer_val(const struct adapter *adap,
1611 const struct sge_rspq *q)
1612{
1613 unsigned int idx = q->intr_params >> 1;
1614
1615 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1616}
1617
1618/**
1619 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1620 * @adap: the adapter
1621 * @q: the Rx queue
1622 * @us: the hold-off time in us, or 0 to disable timer
1623 * @cnt: the hold-off packet count, or 0 to disable counter
1624 *
1625 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1626 * one of the two needs to be enabled for the queue to generate interrupts.
1627 */
1628static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1629 unsigned int us, unsigned int cnt)
1630{
1631 if ((us | cnt) == 0)
1632 cnt = 1;
1633
1634 if (cnt) {
1635 int err;
1636 u32 v, new_idx;
1637
1638 new_idx = closest_thres(&adap->sge, cnt);
1639 if (q->desc && q->pktcnt_idx != new_idx) {
1640 /* the queue has already been created, update it */
1641 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1642 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1643 FW_PARAMS_PARAM_YZ(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001644 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1645 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001646 if (err)
1647 return err;
1648 }
1649 q->pktcnt_idx = new_idx;
1650 }
1651
1652 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1653 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1654 return 0;
1655}
1656
1657static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1658{
1659 const struct port_info *pi = netdev_priv(dev);
1660 struct adapter *adap = pi->adapter;
1661
1662 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1663 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1664}
1665
1666static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1667{
1668 const struct port_info *pi = netdev_priv(dev);
1669 const struct adapter *adap = pi->adapter;
1670 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1671
1672 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1673 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1674 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1675 return 0;
1676}
1677
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001678/**
1679 * eeprom_ptov - translate a physical EEPROM address to virtual
1680 * @phys_addr: the physical EEPROM address
1681 * @fn: the PCI function number
1682 * @sz: size of function-specific area
1683 *
1684 * Translate a physical EEPROM address to virtual. The first 1K is
1685 * accessed through virtual addresses starting at 31K, the rest is
1686 * accessed through virtual addresses starting at 0.
1687 *
1688 * The mapping is as follows:
1689 * [0..1K) -> [31K..32K)
1690 * [1K..1K+A) -> [31K-A..31K)
1691 * [1K+A..ES) -> [0..ES-A-1K)
1692 *
1693 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001694 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001695static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001696{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001697 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001698 if (phys_addr < 1024)
1699 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001700 if (phys_addr < 1024 + fn)
1701 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001702 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001703 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001704 return -EINVAL;
1705}
1706
1707/*
1708 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001709 */
1710static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1711{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001712 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001713
1714 if (vaddr >= 0)
1715 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1716 return vaddr < 0 ? vaddr : 0;
1717}
1718
1719static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1720{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001721 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001722
1723 if (vaddr >= 0)
1724 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1725 return vaddr < 0 ? vaddr : 0;
1726}
1727
1728#define EEPROM_MAGIC 0x38E2F10C
1729
1730static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1731 u8 *data)
1732{
1733 int i, err = 0;
1734 struct adapter *adapter = netdev2adap(dev);
1735
1736 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1737 if (!buf)
1738 return -ENOMEM;
1739
1740 e->magic = EEPROM_MAGIC;
1741 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1742 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1743
1744 if (!err)
1745 memcpy(data, buf + e->offset, e->len);
1746 kfree(buf);
1747 return err;
1748}
1749
1750static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1751 u8 *data)
1752{
1753 u8 *buf;
1754 int err = 0;
1755 u32 aligned_offset, aligned_len, *p;
1756 struct adapter *adapter = netdev2adap(dev);
1757
1758 if (eeprom->magic != EEPROM_MAGIC)
1759 return -EINVAL;
1760
1761 aligned_offset = eeprom->offset & ~3;
1762 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1763
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00001764 if (adapter->fn > 0) {
1765 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1766
1767 if (aligned_offset < start ||
1768 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1769 return -EPERM;
1770 }
1771
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001772 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1773 /*
1774 * RMW possibly needed for first or last words.
1775 */
1776 buf = kmalloc(aligned_len, GFP_KERNEL);
1777 if (!buf)
1778 return -ENOMEM;
1779 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1780 if (!err && aligned_len > 4)
1781 err = eeprom_rd_phys(adapter,
1782 aligned_offset + aligned_len - 4,
1783 (u32 *)&buf[aligned_len - 4]);
1784 if (err)
1785 goto out;
1786 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1787 } else
1788 buf = data;
1789
1790 err = t4_seeprom_wp(adapter, false);
1791 if (err)
1792 goto out;
1793
1794 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1795 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1796 aligned_offset += 4;
1797 }
1798
1799 if (!err)
1800 err = t4_seeprom_wp(adapter, true);
1801out:
1802 if (buf != data)
1803 kfree(buf);
1804 return err;
1805}
1806
1807static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1808{
1809 int ret;
1810 const struct firmware *fw;
1811 struct adapter *adap = netdev2adap(netdev);
1812
1813 ef->data[sizeof(ef->data) - 1] = '\0';
1814 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1815 if (ret < 0)
1816 return ret;
1817
1818 ret = t4_load_fw(adap, fw->data, fw->size);
1819 release_firmware(fw);
1820 if (!ret)
1821 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1822 return ret;
1823}
1824
1825#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1826#define BCAST_CRC 0xa0ccc1a6
1827
1828static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1829{
1830 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1831 wol->wolopts = netdev2adap(dev)->wol;
1832 memset(&wol->sopass, 0, sizeof(wol->sopass));
1833}
1834
1835static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1836{
1837 int err = 0;
1838 struct port_info *pi = netdev_priv(dev);
1839
1840 if (wol->wolopts & ~WOL_SUPPORTED)
1841 return -EINVAL;
1842 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1843 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1844 if (wol->wolopts & WAKE_BCAST) {
1845 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1846 ~0ULL, 0, false);
1847 if (!err)
1848 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1849 ~6ULL, ~0ULL, BCAST_CRC, true);
1850 } else
1851 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1852 return err;
1853}
1854
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00001855static int cxgb_set_features(struct net_device *dev, u32 features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001856{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00001857 const struct port_info *pi = netdev_priv(dev);
1858 u32 changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00001859 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00001860
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00001861 if (!(changed & NETIF_F_HW_VLAN_RX))
1862 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00001863
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00001864 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1865 -1, -1, -1,
1866 !!(features & NETIF_F_HW_VLAN_RX), true);
1867 if (unlikely(err))
1868 dev->features = features ^ NETIF_F_HW_VLAN_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00001869 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001870}
1871
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001872static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1873{
1874 const struct port_info *pi = netdev_priv(dev);
1875 unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1876
1877 p->size = pi->rss_size;
1878 while (n--)
1879 p->ring_index[n] = pi->rss[n];
1880 return 0;
1881}
1882
1883static int set_rss_table(struct net_device *dev,
1884 const struct ethtool_rxfh_indir *p)
1885{
1886 unsigned int i;
1887 struct port_info *pi = netdev_priv(dev);
1888
1889 if (p->size != pi->rss_size)
1890 return -EINVAL;
1891 for (i = 0; i < p->size; i++)
1892 if (p->ring_index[i] >= pi->nqsets)
1893 return -EINVAL;
1894 for (i = 0; i < p->size; i++)
1895 pi->rss[i] = p->ring_index[i];
1896 if (pi->adapter->flags & FULL_INIT_DONE)
1897 return write_rss(pi, pi->rss);
1898 return 0;
1899}
1900
1901static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1902 void *rules)
1903{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001904 const struct port_info *pi = netdev_priv(dev);
1905
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001906 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001907 case ETHTOOL_GRXFH: {
1908 unsigned int v = pi->rss_mode;
1909
1910 info->data = 0;
1911 switch (info->flow_type) {
1912 case TCP_V4_FLOW:
1913 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1914 info->data = RXH_IP_SRC | RXH_IP_DST |
1915 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1916 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1917 info->data = RXH_IP_SRC | RXH_IP_DST;
1918 break;
1919 case UDP_V4_FLOW:
1920 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1921 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1922 info->data = RXH_IP_SRC | RXH_IP_DST |
1923 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1924 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1925 info->data = RXH_IP_SRC | RXH_IP_DST;
1926 break;
1927 case SCTP_V4_FLOW:
1928 case AH_ESP_V4_FLOW:
1929 case IPV4_FLOW:
1930 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1931 info->data = RXH_IP_SRC | RXH_IP_DST;
1932 break;
1933 case TCP_V6_FLOW:
1934 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1935 info->data = RXH_IP_SRC | RXH_IP_DST |
1936 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1937 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1938 info->data = RXH_IP_SRC | RXH_IP_DST;
1939 break;
1940 case UDP_V6_FLOW:
1941 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1942 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1943 info->data = RXH_IP_SRC | RXH_IP_DST |
1944 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1945 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1946 info->data = RXH_IP_SRC | RXH_IP_DST;
1947 break;
1948 case SCTP_V6_FLOW:
1949 case AH_ESP_V6_FLOW:
1950 case IPV6_FLOW:
1951 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1952 info->data = RXH_IP_SRC | RXH_IP_DST;
1953 break;
1954 }
1955 return 0;
1956 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001957 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00001958 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001959 return 0;
1960 }
1961 return -EOPNOTSUPP;
1962}
1963
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001964static struct ethtool_ops cxgb_ethtool_ops = {
1965 .get_settings = get_settings,
1966 .set_settings = set_settings,
1967 .get_drvinfo = get_drvinfo,
1968 .get_msglevel = get_msglevel,
1969 .set_msglevel = set_msglevel,
1970 .get_ringparam = get_sge_param,
1971 .set_ringparam = set_sge_param,
1972 .get_coalesce = get_coalesce,
1973 .set_coalesce = set_coalesce,
1974 .get_eeprom_len = get_eeprom_len,
1975 .get_eeprom = get_eeprom,
1976 .set_eeprom = set_eeprom,
1977 .get_pauseparam = get_pauseparam,
1978 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001979 .get_link = ethtool_op_get_link,
1980 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001981 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001982 .nway_reset = restart_autoneg,
1983 .get_sset_count = get_sset_count,
1984 .get_ethtool_stats = get_stats,
1985 .get_regs_len = get_regs_len,
1986 .get_regs = get_regs,
1987 .get_wol = get_wol,
1988 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001989 .get_rxnfc = get_rxnfc,
1990 .get_rxfh_indir = get_rss_table,
1991 .set_rxfh_indir = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001992 .flash_device = set_flash,
1993};
1994
1995/*
1996 * debugfs support
1997 */
1998
1999static int mem_open(struct inode *inode, struct file *file)
2000{
2001 file->private_data = inode->i_private;
2002 return 0;
2003}
2004
2005static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2006 loff_t *ppos)
2007{
2008 loff_t pos = *ppos;
2009 loff_t avail = file->f_path.dentry->d_inode->i_size;
2010 unsigned int mem = (uintptr_t)file->private_data & 3;
2011 struct adapter *adap = file->private_data - mem;
2012
2013 if (pos < 0)
2014 return -EINVAL;
2015 if (pos >= avail)
2016 return 0;
2017 if (count > avail - pos)
2018 count = avail - pos;
2019
2020 while (count) {
2021 size_t len;
2022 int ret, ofst;
2023 __be32 data[16];
2024
2025 if (mem == MEM_MC)
2026 ret = t4_mc_read(adap, pos, data, NULL);
2027 else
2028 ret = t4_edc_read(adap, mem, pos, data, NULL);
2029 if (ret)
2030 return ret;
2031
2032 ofst = pos % sizeof(data);
2033 len = min(count, sizeof(data) - ofst);
2034 if (copy_to_user(buf, (u8 *)data + ofst, len))
2035 return -EFAULT;
2036
2037 buf += len;
2038 pos += len;
2039 count -= len;
2040 }
2041 count = pos - *ppos;
2042 *ppos = pos;
2043 return count;
2044}
2045
2046static const struct file_operations mem_debugfs_fops = {
2047 .owner = THIS_MODULE,
2048 .open = mem_open,
2049 .read = mem_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02002050 .llseek = default_llseek,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002051};
2052
2053static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2054 unsigned int idx, unsigned int size_mb)
2055{
2056 struct dentry *de;
2057
2058 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2059 (void *)adap + idx, &mem_debugfs_fops);
2060 if (de && de->d_inode)
2061 de->d_inode->i_size = size_mb << 20;
2062}
2063
2064static int __devinit setup_debugfs(struct adapter *adap)
2065{
2066 int i;
2067
2068 if (IS_ERR_OR_NULL(adap->debugfs_root))
2069 return -1;
2070
2071 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2072 if (i & EDRAM0_ENABLE)
2073 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2074 if (i & EDRAM1_ENABLE)
2075 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2076 if (i & EXT_MEM_ENABLE)
2077 add_debugfs_mem(adap, "mc", MEM_MC,
2078 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2079 if (adap->l2t)
2080 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2081 &t4_l2t_fops);
2082 return 0;
2083}
2084
2085/*
2086 * upper-layer driver support
2087 */
2088
2089/*
2090 * Allocate an active-open TID and set it to the supplied value.
2091 */
2092int cxgb4_alloc_atid(struct tid_info *t, void *data)
2093{
2094 int atid = -1;
2095
2096 spin_lock_bh(&t->atid_lock);
2097 if (t->afree) {
2098 union aopen_entry *p = t->afree;
2099
2100 atid = p - t->atid_tab;
2101 t->afree = p->next;
2102 p->data = data;
2103 t->atids_in_use++;
2104 }
2105 spin_unlock_bh(&t->atid_lock);
2106 return atid;
2107}
2108EXPORT_SYMBOL(cxgb4_alloc_atid);
2109
2110/*
2111 * Release an active-open TID.
2112 */
2113void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2114{
2115 union aopen_entry *p = &t->atid_tab[atid];
2116
2117 spin_lock_bh(&t->atid_lock);
2118 p->next = t->afree;
2119 t->afree = p;
2120 t->atids_in_use--;
2121 spin_unlock_bh(&t->atid_lock);
2122}
2123EXPORT_SYMBOL(cxgb4_free_atid);
2124
2125/*
2126 * Allocate a server TID and set it to the supplied value.
2127 */
2128int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2129{
2130 int stid;
2131
2132 spin_lock_bh(&t->stid_lock);
2133 if (family == PF_INET) {
2134 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2135 if (stid < t->nstids)
2136 __set_bit(stid, t->stid_bmap);
2137 else
2138 stid = -1;
2139 } else {
2140 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2141 if (stid < 0)
2142 stid = -1;
2143 }
2144 if (stid >= 0) {
2145 t->stid_tab[stid].data = data;
2146 stid += t->stid_base;
2147 t->stids_in_use++;
2148 }
2149 spin_unlock_bh(&t->stid_lock);
2150 return stid;
2151}
2152EXPORT_SYMBOL(cxgb4_alloc_stid);
2153
2154/*
2155 * Release a server TID.
2156 */
2157void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2158{
2159 stid -= t->stid_base;
2160 spin_lock_bh(&t->stid_lock);
2161 if (family == PF_INET)
2162 __clear_bit(stid, t->stid_bmap);
2163 else
2164 bitmap_release_region(t->stid_bmap, stid, 2);
2165 t->stid_tab[stid].data = NULL;
2166 t->stids_in_use--;
2167 spin_unlock_bh(&t->stid_lock);
2168}
2169EXPORT_SYMBOL(cxgb4_free_stid);
2170
2171/*
2172 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2173 */
2174static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2175 unsigned int tid)
2176{
2177 struct cpl_tid_release *req;
2178
2179 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2180 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2181 INIT_TP_WR(req, tid);
2182 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2183}
2184
2185/*
2186 * Queue a TID release request and if necessary schedule a work queue to
2187 * process it.
2188 */
stephen hemminger31b9c192010-10-18 05:39:18 +00002189static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2190 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002191{
2192 void **p = &t->tid_tab[tid];
2193 struct adapter *adap = container_of(t, struct adapter, tids);
2194
2195 spin_lock_bh(&adap->tid_release_lock);
2196 *p = adap->tid_release_head;
2197 /* Low 2 bits encode the Tx channel number */
2198 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2199 if (!adap->tid_release_task_busy) {
2200 adap->tid_release_task_busy = true;
2201 schedule_work(&adap->tid_release_task);
2202 }
2203 spin_unlock_bh(&adap->tid_release_lock);
2204}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002205
2206/*
2207 * Process the list of pending TID release requests.
2208 */
2209static void process_tid_release_list(struct work_struct *work)
2210{
2211 struct sk_buff *skb;
2212 struct adapter *adap;
2213
2214 adap = container_of(work, struct adapter, tid_release_task);
2215
2216 spin_lock_bh(&adap->tid_release_lock);
2217 while (adap->tid_release_head) {
2218 void **p = adap->tid_release_head;
2219 unsigned int chan = (uintptr_t)p & 3;
2220 p = (void *)p - chan;
2221
2222 adap->tid_release_head = *p;
2223 *p = NULL;
2224 spin_unlock_bh(&adap->tid_release_lock);
2225
2226 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2227 GFP_KERNEL)))
2228 schedule_timeout_uninterruptible(1);
2229
2230 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2231 t4_ofld_send(adap, skb);
2232 spin_lock_bh(&adap->tid_release_lock);
2233 }
2234 adap->tid_release_task_busy = false;
2235 spin_unlock_bh(&adap->tid_release_lock);
2236}
2237
2238/*
2239 * Release a TID and inform HW. If we are unable to allocate the release
2240 * message we defer to a work queue.
2241 */
2242void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2243{
2244 void *old;
2245 struct sk_buff *skb;
2246 struct adapter *adap = container_of(t, struct adapter, tids);
2247
2248 old = t->tid_tab[tid];
2249 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2250 if (likely(skb)) {
2251 t->tid_tab[tid] = NULL;
2252 mk_tid_release(skb, chan, tid);
2253 t4_ofld_send(adap, skb);
2254 } else
2255 cxgb4_queue_tid_release(t, chan, tid);
2256 if (old)
2257 atomic_dec(&t->tids_in_use);
2258}
2259EXPORT_SYMBOL(cxgb4_remove_tid);
2260
2261/*
2262 * Allocate and initialize the TID tables. Returns 0 on success.
2263 */
2264static int tid_init(struct tid_info *t)
2265{
2266 size_t size;
2267 unsigned int natids = t->natids;
2268
2269 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2270 t->nstids * sizeof(*t->stid_tab) +
2271 BITS_TO_LONGS(t->nstids) * sizeof(long);
2272 t->tid_tab = t4_alloc_mem(size);
2273 if (!t->tid_tab)
2274 return -ENOMEM;
2275
2276 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2277 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2278 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2279 spin_lock_init(&t->stid_lock);
2280 spin_lock_init(&t->atid_lock);
2281
2282 t->stids_in_use = 0;
2283 t->afree = NULL;
2284 t->atids_in_use = 0;
2285 atomic_set(&t->tids_in_use, 0);
2286
2287 /* Setup the free list for atid_tab and clear the stid bitmap. */
2288 if (natids) {
2289 while (--natids)
2290 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2291 t->afree = t->atid_tab;
2292 }
2293 bitmap_zero(t->stid_bmap, t->nstids);
2294 return 0;
2295}
2296
2297/**
2298 * cxgb4_create_server - create an IP server
2299 * @dev: the device
2300 * @stid: the server TID
2301 * @sip: local IP address to bind server to
2302 * @sport: the server's TCP port
2303 * @queue: queue to direct messages from this server to
2304 *
2305 * Create an IP server for the given port and address.
2306 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2307 */
2308int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2309 __be32 sip, __be16 sport, unsigned int queue)
2310{
2311 unsigned int chan;
2312 struct sk_buff *skb;
2313 struct adapter *adap;
2314 struct cpl_pass_open_req *req;
2315
2316 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2317 if (!skb)
2318 return -ENOMEM;
2319
2320 adap = netdev2adap(dev);
2321 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2322 INIT_TP_WR(req, 0);
2323 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2324 req->local_port = sport;
2325 req->peer_port = htons(0);
2326 req->local_ip = sip;
2327 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00002328 chan = rxq_to_chan(&adap->sge, queue);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002329 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2330 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2331 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2332 return t4_mgmt_tx(adap, skb);
2333}
2334EXPORT_SYMBOL(cxgb4_create_server);
2335
2336/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002337 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2338 * @mtus: the HW MTU table
2339 * @mtu: the target MTU
2340 * @idx: index of selected entry in the MTU table
2341 *
2342 * Returns the index and the value in the HW MTU table that is closest to
2343 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2344 * table, in which case that smallest available value is selected.
2345 */
2346unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2347 unsigned int *idx)
2348{
2349 unsigned int i = 0;
2350
2351 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2352 ++i;
2353 if (idx)
2354 *idx = i;
2355 return mtus[i];
2356}
2357EXPORT_SYMBOL(cxgb4_best_mtu);
2358
2359/**
2360 * cxgb4_port_chan - get the HW channel of a port
2361 * @dev: the net device for the port
2362 *
2363 * Return the HW Tx channel of the given port.
2364 */
2365unsigned int cxgb4_port_chan(const struct net_device *dev)
2366{
2367 return netdev2pinfo(dev)->tx_chan;
2368}
2369EXPORT_SYMBOL(cxgb4_port_chan);
2370
2371/**
2372 * cxgb4_port_viid - get the VI id of a port
2373 * @dev: the net device for the port
2374 *
2375 * Return the VI id of the given port.
2376 */
2377unsigned int cxgb4_port_viid(const struct net_device *dev)
2378{
2379 return netdev2pinfo(dev)->viid;
2380}
2381EXPORT_SYMBOL(cxgb4_port_viid);
2382
2383/**
2384 * cxgb4_port_idx - get the index of a port
2385 * @dev: the net device for the port
2386 *
2387 * Return the index of the given port.
2388 */
2389unsigned int cxgb4_port_idx(const struct net_device *dev)
2390{
2391 return netdev2pinfo(dev)->port_id;
2392}
2393EXPORT_SYMBOL(cxgb4_port_idx);
2394
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002395void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2396 struct tp_tcp_stats *v6)
2397{
2398 struct adapter *adap = pci_get_drvdata(pdev);
2399
2400 spin_lock(&adap->stats_lock);
2401 t4_tp_get_tcp_stats(adap, v4, v6);
2402 spin_unlock(&adap->stats_lock);
2403}
2404EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2405
2406void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2407 const unsigned int *pgsz_order)
2408{
2409 struct adapter *adap = netdev2adap(dev);
2410
2411 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2412 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2413 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2414 HPZ3(pgsz_order[3]));
2415}
2416EXPORT_SYMBOL(cxgb4_iscsi_init);
2417
2418static struct pci_driver cxgb4_driver;
2419
2420static void check_neigh_update(struct neighbour *neigh)
2421{
2422 const struct device *parent;
2423 const struct net_device *netdev = neigh->dev;
2424
2425 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2426 netdev = vlan_dev_real_dev(netdev);
2427 parent = netdev->dev.parent;
2428 if (parent && parent->driver == &cxgb4_driver.driver)
2429 t4_l2t_update(dev_get_drvdata(parent), neigh);
2430}
2431
2432static int netevent_cb(struct notifier_block *nb, unsigned long event,
2433 void *data)
2434{
2435 switch (event) {
2436 case NETEVENT_NEIGH_UPDATE:
2437 check_neigh_update(data);
2438 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002439 case NETEVENT_REDIRECT:
2440 default:
2441 break;
2442 }
2443 return 0;
2444}
2445
2446static bool netevent_registered;
2447static struct notifier_block cxgb4_netevent_nb = {
2448 .notifier_call = netevent_cb
2449};
2450
2451static void uld_attach(struct adapter *adap, unsigned int uld)
2452{
2453 void *handle;
2454 struct cxgb4_lld_info lli;
2455
2456 lli.pdev = adap->pdev;
2457 lli.l2t = adap->l2t;
2458 lli.tids = &adap->tids;
2459 lli.ports = adap->port;
2460 lli.vr = &adap->vres;
2461 lli.mtus = adap->params.mtus;
2462 if (uld == CXGB4_ULD_RDMA) {
2463 lli.rxq_ids = adap->sge.rdma_rxq;
2464 lli.nrxq = adap->sge.rdmaqs;
2465 } else if (uld == CXGB4_ULD_ISCSI) {
2466 lli.rxq_ids = adap->sge.ofld_rxq;
2467 lli.nrxq = adap->sge.ofldqsets;
2468 }
2469 lli.ntxq = adap->sge.ofldqsets;
2470 lli.nchan = adap->params.nports;
2471 lli.nports = adap->params.nports;
2472 lli.wr_cred = adap->params.ofldq_wr_cred;
2473 lli.adapter_type = adap->params.rev;
2474 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2475 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002476 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2477 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002478 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002479 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2480 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002481 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2482 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2483 lli.fw_vers = adap->params.fw_vers;
2484
2485 handle = ulds[uld].add(&lli);
2486 if (IS_ERR(handle)) {
2487 dev_warn(adap->pdev_dev,
2488 "could not attach to the %s driver, error %ld\n",
2489 uld_str[uld], PTR_ERR(handle));
2490 return;
2491 }
2492
2493 adap->uld_handle[uld] = handle;
2494
2495 if (!netevent_registered) {
2496 register_netevent_notifier(&cxgb4_netevent_nb);
2497 netevent_registered = true;
2498 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00002499
2500 if (adap->flags & FULL_INIT_DONE)
2501 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002502}
2503
2504static void attach_ulds(struct adapter *adap)
2505{
2506 unsigned int i;
2507
2508 mutex_lock(&uld_mutex);
2509 list_add_tail(&adap->list_node, &adapter_list);
2510 for (i = 0; i < CXGB4_ULD_MAX; i++)
2511 if (ulds[i].add)
2512 uld_attach(adap, i);
2513 mutex_unlock(&uld_mutex);
2514}
2515
2516static void detach_ulds(struct adapter *adap)
2517{
2518 unsigned int i;
2519
2520 mutex_lock(&uld_mutex);
2521 list_del(&adap->list_node);
2522 for (i = 0; i < CXGB4_ULD_MAX; i++)
2523 if (adap->uld_handle[i]) {
2524 ulds[i].state_change(adap->uld_handle[i],
2525 CXGB4_STATE_DETACH);
2526 adap->uld_handle[i] = NULL;
2527 }
2528 if (netevent_registered && list_empty(&adapter_list)) {
2529 unregister_netevent_notifier(&cxgb4_netevent_nb);
2530 netevent_registered = false;
2531 }
2532 mutex_unlock(&uld_mutex);
2533}
2534
2535static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2536{
2537 unsigned int i;
2538
2539 mutex_lock(&uld_mutex);
2540 for (i = 0; i < CXGB4_ULD_MAX; i++)
2541 if (adap->uld_handle[i])
2542 ulds[i].state_change(adap->uld_handle[i], new_state);
2543 mutex_unlock(&uld_mutex);
2544}
2545
2546/**
2547 * cxgb4_register_uld - register an upper-layer driver
2548 * @type: the ULD type
2549 * @p: the ULD methods
2550 *
2551 * Registers an upper-layer driver with this driver and notifies the ULD
2552 * about any presently available devices that support its type. Returns
2553 * %-EBUSY if a ULD of the same type is already registered.
2554 */
2555int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2556{
2557 int ret = 0;
2558 struct adapter *adap;
2559
2560 if (type >= CXGB4_ULD_MAX)
2561 return -EINVAL;
2562 mutex_lock(&uld_mutex);
2563 if (ulds[type].add) {
2564 ret = -EBUSY;
2565 goto out;
2566 }
2567 ulds[type] = *p;
2568 list_for_each_entry(adap, &adapter_list, list_node)
2569 uld_attach(adap, type);
2570out: mutex_unlock(&uld_mutex);
2571 return ret;
2572}
2573EXPORT_SYMBOL(cxgb4_register_uld);
2574
2575/**
2576 * cxgb4_unregister_uld - unregister an upper-layer driver
2577 * @type: the ULD type
2578 *
2579 * Unregisters an existing upper-layer driver.
2580 */
2581int cxgb4_unregister_uld(enum cxgb4_uld type)
2582{
2583 struct adapter *adap;
2584
2585 if (type >= CXGB4_ULD_MAX)
2586 return -EINVAL;
2587 mutex_lock(&uld_mutex);
2588 list_for_each_entry(adap, &adapter_list, list_node)
2589 adap->uld_handle[type] = NULL;
2590 ulds[type].add = NULL;
2591 mutex_unlock(&uld_mutex);
2592 return 0;
2593}
2594EXPORT_SYMBOL(cxgb4_unregister_uld);
2595
2596/**
2597 * cxgb_up - enable the adapter
2598 * @adap: adapter being enabled
2599 *
2600 * Called when the first port is enabled, this function performs the
2601 * actions necessary to make an adapter operational, such as completing
2602 * the initialization of HW modules, and enabling interrupts.
2603 *
2604 * Must be called with the rtnl lock held.
2605 */
2606static int cxgb_up(struct adapter *adap)
2607{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002608 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002609
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002610 err = setup_sge_queues(adap);
2611 if (err)
2612 goto out;
2613 err = setup_rss(adap);
2614 if (err)
2615 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002616
2617 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002618 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002619 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2620 adap->msix_info[0].desc, adap);
2621 if (err)
2622 goto irq_err;
2623
2624 err = request_msix_queue_irqs(adap);
2625 if (err) {
2626 free_irq(adap->msix_info[0].vec, adap);
2627 goto irq_err;
2628 }
2629 } else {
2630 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2631 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00002632 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002633 if (err)
2634 goto irq_err;
2635 }
2636 enable_rx(adap);
2637 t4_sge_start(adap);
2638 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002639 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002640 notify_ulds(adap, CXGB4_STATE_UP);
2641 out:
2642 return err;
2643 irq_err:
2644 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002645 freeq:
2646 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002647 goto out;
2648}
2649
2650static void cxgb_down(struct adapter *adapter)
2651{
2652 t4_intr_disable(adapter);
2653 cancel_work_sync(&adapter->tid_release_task);
2654 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002655 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002656
2657 if (adapter->flags & USING_MSIX) {
2658 free_msix_queue_irqs(adapter);
2659 free_irq(adapter->msix_info[0].vec, adapter);
2660 } else
2661 free_irq(adapter->pdev->irq, adapter);
2662 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002663 t4_sge_stop(adapter);
2664 t4_free_sge_resources(adapter);
2665 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002666}
2667
2668/*
2669 * net_device operations
2670 */
2671static int cxgb_open(struct net_device *dev)
2672{
2673 int err;
2674 struct port_info *pi = netdev_priv(dev);
2675 struct adapter *adapter = pi->adapter;
2676
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00002677 netif_carrier_off(dev);
2678
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002679 if (!(adapter->flags & FULL_INIT_DONE)) {
2680 err = cxgb_up(adapter);
2681 if (err < 0)
2682 return err;
2683 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002684
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00002685 err = link_start(dev);
2686 if (!err)
2687 netif_tx_start_all_queues(dev);
2688 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002689}
2690
2691static int cxgb_close(struct net_device *dev)
2692{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002693 struct port_info *pi = netdev_priv(dev);
2694 struct adapter *adapter = pi->adapter;
2695
2696 netif_tx_stop_all_queues(dev);
2697 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002698 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002699}
2700
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00002701static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2702 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002703{
2704 struct port_stats stats;
2705 struct port_info *p = netdev_priv(dev);
2706 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002707
2708 spin_lock(&adapter->stats_lock);
2709 t4_get_port_stats(adapter, p->tx_chan, &stats);
2710 spin_unlock(&adapter->stats_lock);
2711
2712 ns->tx_bytes = stats.tx_octets;
2713 ns->tx_packets = stats.tx_frames;
2714 ns->rx_bytes = stats.rx_octets;
2715 ns->rx_packets = stats.rx_frames;
2716 ns->multicast = stats.rx_mcast_frames;
2717
2718 /* detailed rx_errors */
2719 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2720 stats.rx_runt;
2721 ns->rx_over_errors = 0;
2722 ns->rx_crc_errors = stats.rx_fcs_err;
2723 ns->rx_frame_errors = stats.rx_symbol_err;
2724 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2725 stats.rx_ovflow2 + stats.rx_ovflow3 +
2726 stats.rx_trunc0 + stats.rx_trunc1 +
2727 stats.rx_trunc2 + stats.rx_trunc3;
2728 ns->rx_missed_errors = 0;
2729
2730 /* detailed tx_errors */
2731 ns->tx_aborted_errors = 0;
2732 ns->tx_carrier_errors = 0;
2733 ns->tx_fifo_errors = 0;
2734 ns->tx_heartbeat_errors = 0;
2735 ns->tx_window_errors = 0;
2736
2737 ns->tx_errors = stats.tx_error_frames;
2738 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2739 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2740 return ns;
2741}
2742
2743static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2744{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002745 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002746 int ret = 0, prtad, devad;
2747 struct port_info *pi = netdev_priv(dev);
2748 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2749
2750 switch (cmd) {
2751 case SIOCGMIIPHY:
2752 if (pi->mdio_addr < 0)
2753 return -EOPNOTSUPP;
2754 data->phy_id = pi->mdio_addr;
2755 break;
2756 case SIOCGMIIREG:
2757 case SIOCSMIIREG:
2758 if (mdio_phy_id_is_c45(data->phy_id)) {
2759 prtad = mdio_phy_id_prtad(data->phy_id);
2760 devad = mdio_phy_id_devad(data->phy_id);
2761 } else if (data->phy_id < 32) {
2762 prtad = data->phy_id;
2763 devad = 0;
2764 data->reg_num &= 0x1f;
2765 } else
2766 return -EINVAL;
2767
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002768 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002769 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002770 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002771 data->reg_num, &data->val_out);
2772 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002773 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002774 data->reg_num, data->val_in);
2775 break;
2776 default:
2777 return -EOPNOTSUPP;
2778 }
2779 return ret;
2780}
2781
2782static void cxgb_set_rxmode(struct net_device *dev)
2783{
2784 /* unfortunately we can't return errors to the stack */
2785 set_rxmode(dev, -1, false);
2786}
2787
2788static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2789{
2790 int ret;
2791 struct port_info *pi = netdev_priv(dev);
2792
2793 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2794 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002795 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2796 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002797 if (!ret)
2798 dev->mtu = new_mtu;
2799 return ret;
2800}
2801
2802static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2803{
2804 int ret;
2805 struct sockaddr *addr = p;
2806 struct port_info *pi = netdev_priv(dev);
2807
2808 if (!is_valid_ether_addr(addr->sa_data))
2809 return -EINVAL;
2810
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002811 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2812 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002813 if (ret < 0)
2814 return ret;
2815
2816 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2817 pi->xact_addr_filt = ret;
2818 return 0;
2819}
2820
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002821#ifdef CONFIG_NET_POLL_CONTROLLER
2822static void cxgb_netpoll(struct net_device *dev)
2823{
2824 struct port_info *pi = netdev_priv(dev);
2825 struct adapter *adap = pi->adapter;
2826
2827 if (adap->flags & USING_MSIX) {
2828 int i;
2829 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2830
2831 for (i = pi->nqsets; i; i--, rx++)
2832 t4_sge_intr_msix(0, &rx->rspq);
2833 } else
2834 t4_intr_handler(adap)(0, adap);
2835}
2836#endif
2837
2838static const struct net_device_ops cxgb4_netdev_ops = {
2839 .ndo_open = cxgb_open,
2840 .ndo_stop = cxgb_close,
2841 .ndo_start_xmit = t4_eth_xmit,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002842 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002843 .ndo_set_rx_mode = cxgb_set_rxmode,
2844 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002845 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002846 .ndo_validate_addr = eth_validate_addr,
2847 .ndo_do_ioctl = cxgb_ioctl,
2848 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002849#ifdef CONFIG_NET_POLL_CONTROLLER
2850 .ndo_poll_controller = cxgb_netpoll,
2851#endif
2852};
2853
2854void t4_fatal_err(struct adapter *adap)
2855{
2856 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2857 t4_intr_disable(adap);
2858 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2859}
2860
2861static void setup_memwin(struct adapter *adap)
2862{
2863 u32 bar0;
2864
2865 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2866 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2867 (bar0 + MEMWIN0_BASE) | BIR(0) |
2868 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2869 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2870 (bar0 + MEMWIN1_BASE) | BIR(0) |
2871 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2872 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2873 (bar0 + MEMWIN2_BASE) | BIR(0) |
2874 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00002875 if (adap->vres.ocq.size) {
2876 unsigned int start, sz_kb;
2877
2878 start = pci_resource_start(adap->pdev, 2) +
2879 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2880 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2881 t4_write_reg(adap,
2882 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2883 start | BIR(1) | WINDOW(ilog2(sz_kb)));
2884 t4_write_reg(adap,
2885 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2886 adap->vres.ocq.start);
2887 t4_read_reg(adap,
2888 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2889 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002890}
2891
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002892static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2893{
2894 u32 v;
2895 int ret;
2896
2897 /* get device capabilities */
2898 memset(c, 0, sizeof(*c));
2899 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2900 FW_CMD_REQUEST | FW_CMD_READ);
2901 c->retval_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002902 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002903 if (ret < 0)
2904 return ret;
2905
2906 /* select capabilities we'll be using */
2907 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2908 if (!vf_acls)
2909 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2910 else
2911 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2912 } else if (vf_acls) {
2913 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2914 return ret;
2915 }
2916 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2917 FW_CMD_REQUEST | FW_CMD_WRITE);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002918 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002919 if (ret < 0)
2920 return ret;
2921
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002922 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002923 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2924 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2925 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2926 if (ret < 0)
2927 return ret;
2928
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002929 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2930 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002931 if (ret < 0)
2932 return ret;
2933
2934 t4_sge_init(adap);
2935
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002936 /* tweak some settings */
2937 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2938 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2939 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2940 v = t4_read_reg(adap, TP_PIO_DATA);
2941 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002942
2943 /* get basic stuff going */
2944 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002945}
2946
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002947/*
2948 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2949 */
2950#define MAX_ATIDS 8192U
2951
2952/*
2953 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2954 */
2955static int adap_init0(struct adapter *adap)
2956{
2957 int ret;
2958 u32 v, port_vec;
2959 enum dev_state state;
2960 u32 params[7], val[7];
2961 struct fw_caps_config_cmd c;
2962
2963 ret = t4_check_fw_version(adap);
2964 if (ret == -EINVAL || ret > 0) {
2965 if (upgrade_fw(adap) >= 0) /* recache FW version */
2966 ret = t4_check_fw_version(adap);
2967 }
2968 if (ret < 0)
2969 return ret;
2970
2971 /* contact FW, request master */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002972 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002973 if (ret < 0) {
2974 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2975 ret);
2976 return ret;
2977 }
2978
2979 /* reset device */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002980 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002981 if (ret < 0)
2982 goto bye;
2983
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002984 for (v = 0; v < SGE_NTIMERS - 1; v++)
2985 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2986 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2987 adap->sge.counter_val[0] = 1;
2988 for (v = 1; v < SGE_NCOUNTERS; v++)
2989 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2990 THRESHOLD_3_MASK);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002991#define FW_PARAM_DEV(param) \
2992 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2993 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2994
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002995 params[0] = FW_PARAM_DEV(CCLK);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002996 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002997 if (ret < 0)
2998 goto bye;
2999 adap->params.vpd.cclk = val[0];
3000
3001 ret = adap_init1(adap, &c);
3002 if (ret < 0)
3003 goto bye;
3004
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003005#define FW_PARAM_PFVF(param) \
3006 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003007 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3008 FW_PARAMS_PARAM_Y(adap->fn))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003009
3010 params[0] = FW_PARAM_DEV(PORTVEC);
3011 params[1] = FW_PARAM_PFVF(L2T_START);
3012 params[2] = FW_PARAM_PFVF(L2T_END);
3013 params[3] = FW_PARAM_PFVF(FILTER_START);
3014 params[4] = FW_PARAM_PFVF(FILTER_END);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003015 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3016 params[6] = FW_PARAM_PFVF(EQ_START);
3017 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003018 if (ret < 0)
3019 goto bye;
3020 port_vec = val[0];
3021 adap->tids.ftid_base = val[3];
3022 adap->tids.nftids = val[4] - val[3] + 1;
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003023 adap->sge.ingr_start = val[5];
3024 adap->sge.egr_start = val[6];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003025
3026 if (c.ofldcaps) {
3027 /* query offload-related parameters */
3028 params[0] = FW_PARAM_DEV(NTID);
3029 params[1] = FW_PARAM_PFVF(SERVER_START);
3030 params[2] = FW_PARAM_PFVF(SERVER_END);
3031 params[3] = FW_PARAM_PFVF(TDDP_START);
3032 params[4] = FW_PARAM_PFVF(TDDP_END);
3033 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003034 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3035 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003036 if (ret < 0)
3037 goto bye;
3038 adap->tids.ntids = val[0];
3039 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3040 adap->tids.stid_base = val[1];
3041 adap->tids.nstids = val[2] - val[1] + 1;
3042 adap->vres.ddp.start = val[3];
3043 adap->vres.ddp.size = val[4] - val[3] + 1;
3044 adap->params.ofldq_wr_cred = val[5];
3045 adap->params.offload = 1;
3046 }
3047 if (c.rdmacaps) {
3048 params[0] = FW_PARAM_PFVF(STAG_START);
3049 params[1] = FW_PARAM_PFVF(STAG_END);
3050 params[2] = FW_PARAM_PFVF(RQ_START);
3051 params[3] = FW_PARAM_PFVF(RQ_END);
3052 params[4] = FW_PARAM_PFVF(PBL_START);
3053 params[5] = FW_PARAM_PFVF(PBL_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003054 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3055 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003056 if (ret < 0)
3057 goto bye;
3058 adap->vres.stag.start = val[0];
3059 adap->vres.stag.size = val[1] - val[0] + 1;
3060 adap->vres.rq.start = val[2];
3061 adap->vres.rq.size = val[3] - val[2] + 1;
3062 adap->vres.pbl.start = val[4];
3063 adap->vres.pbl.size = val[5] - val[4] + 1;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003064
3065 params[0] = FW_PARAM_PFVF(SQRQ_START);
3066 params[1] = FW_PARAM_PFVF(SQRQ_END);
3067 params[2] = FW_PARAM_PFVF(CQ_START);
3068 params[3] = FW_PARAM_PFVF(CQ_END);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003069 params[4] = FW_PARAM_PFVF(OCQ_START);
3070 params[5] = FW_PARAM_PFVF(OCQ_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003071 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3072 val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003073 if (ret < 0)
3074 goto bye;
3075 adap->vres.qp.start = val[0];
3076 adap->vres.qp.size = val[1] - val[0] + 1;
3077 adap->vres.cq.start = val[2];
3078 adap->vres.cq.size = val[3] - val[2] + 1;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003079 adap->vres.ocq.start = val[4];
3080 adap->vres.ocq.size = val[5] - val[4] + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003081 }
3082 if (c.iscsicaps) {
3083 params[0] = FW_PARAM_PFVF(ISCSI_START);
3084 params[1] = FW_PARAM_PFVF(ISCSI_END);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003085 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3086 val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003087 if (ret < 0)
3088 goto bye;
3089 adap->vres.iscsi.start = val[0];
3090 adap->vres.iscsi.size = val[1] - val[0] + 1;
3091 }
3092#undef FW_PARAM_PFVF
3093#undef FW_PARAM_DEV
3094
3095 adap->params.nports = hweight32(port_vec);
3096 adap->params.portvec = port_vec;
3097 adap->flags |= FW_OK;
3098
3099 /* These are finalized by FW initialization, load their values now */
3100 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3101 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3102 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3103 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3104 adap->params.b_wnd);
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003105
3106#ifdef CONFIG_PCI_IOV
3107 /*
3108 * Provision resource limits for Virtual Functions. We currently
3109 * grant them all the same static resource limits except for the Port
3110 * Access Rights Mask which we're assigning based on the PF. All of
3111 * the static provisioning stuff for both the PF and VF really needs
3112 * to be managed in a persistent manner for each device which the
3113 * firmware controls.
3114 */
3115 {
3116 int pf, vf;
3117
3118 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3119 if (num_vf[pf] <= 0)
3120 continue;
3121
3122 /* VF numbering starts at 1! */
3123 for (vf = 1; vf <= num_vf[pf]; vf++) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003124 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003125 VFRES_NEQ, VFRES_NETHCTRL,
3126 VFRES_NIQFLINT, VFRES_NIQ,
3127 VFRES_TC, VFRES_NVI,
3128 FW_PFVF_CMD_CMASK_MASK,
3129 pfvfres_pmask(adap, pf, vf),
3130 VFRES_NEXACTF,
3131 VFRES_R_CAPS, VFRES_WX_CAPS);
3132 if (ret < 0)
3133 dev_warn(adap->pdev_dev, "failed to "
3134 "provision pf/vf=%d/%d; "
3135 "err=%d\n", pf, vf, ret);
3136 }
3137 }
3138 }
3139#endif
3140
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003141 setup_memwin(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003142 return 0;
3143
3144 /*
3145 * If a command timed out or failed with EIO FW does not operate within
3146 * its spec or something catastrophic happened to HW/FW, stop issuing
3147 * commands.
3148 */
3149bye: if (ret != -ETIMEDOUT && ret != -EIO)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003150 t4_fw_bye(adap, adap->fn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003151 return ret;
3152}
3153
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003154/* EEH callbacks */
3155
3156static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3157 pci_channel_state_t state)
3158{
3159 int i;
3160 struct adapter *adap = pci_get_drvdata(pdev);
3161
3162 if (!adap)
3163 goto out;
3164
3165 rtnl_lock();
3166 adap->flags &= ~FW_OK;
3167 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3168 for_each_port(adap, i) {
3169 struct net_device *dev = adap->port[i];
3170
3171 netif_device_detach(dev);
3172 netif_carrier_off(dev);
3173 }
3174 if (adap->flags & FULL_INIT_DONE)
3175 cxgb_down(adap);
3176 rtnl_unlock();
3177 pci_disable_device(pdev);
3178out: return state == pci_channel_io_perm_failure ?
3179 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3180}
3181
3182static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3183{
3184 int i, ret;
3185 struct fw_caps_config_cmd c;
3186 struct adapter *adap = pci_get_drvdata(pdev);
3187
3188 if (!adap) {
3189 pci_restore_state(pdev);
3190 pci_save_state(pdev);
3191 return PCI_ERS_RESULT_RECOVERED;
3192 }
3193
3194 if (pci_enable_device(pdev)) {
3195 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3196 return PCI_ERS_RESULT_DISCONNECT;
3197 }
3198
3199 pci_set_master(pdev);
3200 pci_restore_state(pdev);
3201 pci_save_state(pdev);
3202 pci_cleanup_aer_uncorrect_error_status(pdev);
3203
3204 if (t4_wait_dev_ready(adap) < 0)
3205 return PCI_ERS_RESULT_DISCONNECT;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003206 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003207 return PCI_ERS_RESULT_DISCONNECT;
3208 adap->flags |= FW_OK;
3209 if (adap_init1(adap, &c))
3210 return PCI_ERS_RESULT_DISCONNECT;
3211
3212 for_each_port(adap, i) {
3213 struct port_info *p = adap2pinfo(adap, i);
3214
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003215 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3216 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003217 if (ret < 0)
3218 return PCI_ERS_RESULT_DISCONNECT;
3219 p->viid = ret;
3220 p->xact_addr_filt = -1;
3221 }
3222
3223 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3224 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003225 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003226 if (cxgb_up(adap))
3227 return PCI_ERS_RESULT_DISCONNECT;
3228 return PCI_ERS_RESULT_RECOVERED;
3229}
3230
3231static void eeh_resume(struct pci_dev *pdev)
3232{
3233 int i;
3234 struct adapter *adap = pci_get_drvdata(pdev);
3235
3236 if (!adap)
3237 return;
3238
3239 rtnl_lock();
3240 for_each_port(adap, i) {
3241 struct net_device *dev = adap->port[i];
3242
3243 if (netif_running(dev)) {
3244 link_start(dev);
3245 cxgb_set_rxmode(dev);
3246 }
3247 netif_device_attach(dev);
3248 }
3249 rtnl_unlock();
3250}
3251
3252static struct pci_error_handlers cxgb4_eeh = {
3253 .error_detected = eeh_err_detected,
3254 .slot_reset = eeh_slot_reset,
3255 .resume = eeh_resume,
3256};
3257
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003258static inline bool is_10g_port(const struct link_config *lc)
3259{
3260 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3261}
3262
3263static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3264 unsigned int size, unsigned int iqe_size)
3265{
3266 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3267 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3268 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3269 q->iqe_len = iqe_size;
3270 q->size = size;
3271}
3272
3273/*
3274 * Perform default configuration of DMA queues depending on the number and type
3275 * of ports we found and the number of available CPUs. Most settings can be
3276 * modified by the admin prior to actual use.
3277 */
3278static void __devinit cfg_queues(struct adapter *adap)
3279{
3280 struct sge *s = &adap->sge;
3281 int i, q10g = 0, n10g = 0, qidx = 0;
3282
3283 for_each_port(adap, i)
3284 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3285
3286 /*
3287 * We default to 1 queue per non-10G port and up to # of cores queues
3288 * per 10G port.
3289 */
3290 if (n10g)
3291 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3292 if (q10g > num_online_cpus())
3293 q10g = num_online_cpus();
3294
3295 for_each_port(adap, i) {
3296 struct port_info *pi = adap2pinfo(adap, i);
3297
3298 pi->first_qset = qidx;
3299 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3300 qidx += pi->nqsets;
3301 }
3302
3303 s->ethqsets = qidx;
3304 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3305
3306 if (is_offload(adap)) {
3307 /*
3308 * For offload we use 1 queue/channel if all ports are up to 1G,
3309 * otherwise we divide all available queues amongst the channels
3310 * capped by the number of available cores.
3311 */
3312 if (n10g) {
3313 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3314 num_online_cpus());
3315 s->ofldqsets = roundup(i, adap->params.nports);
3316 } else
3317 s->ofldqsets = adap->params.nports;
3318 /* For RDMA one Rx queue per channel suffices */
3319 s->rdmaqs = adap->params.nports;
3320 }
3321
3322 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3323 struct sge_eth_rxq *r = &s->ethrxq[i];
3324
3325 init_rspq(&r->rspq, 0, 0, 1024, 64);
3326 r->fl.size = 72;
3327 }
3328
3329 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3330 s->ethtxq[i].q.size = 1024;
3331
3332 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3333 s->ctrlq[i].q.size = 512;
3334
3335 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3336 s->ofldtxq[i].q.size = 1024;
3337
3338 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3339 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3340
3341 init_rspq(&r->rspq, 0, 0, 1024, 64);
3342 r->rspq.uld = CXGB4_ULD_ISCSI;
3343 r->fl.size = 72;
3344 }
3345
3346 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3347 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3348
3349 init_rspq(&r->rspq, 0, 0, 511, 64);
3350 r->rspq.uld = CXGB4_ULD_RDMA;
3351 r->fl.size = 72;
3352 }
3353
3354 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3355 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3356}
3357
3358/*
3359 * Reduce the number of Ethernet queues across all ports to at most n.
3360 * n provides at least one queue per port.
3361 */
3362static void __devinit reduce_ethqs(struct adapter *adap, int n)
3363{
3364 int i;
3365 struct port_info *pi;
3366
3367 while (n < adap->sge.ethqsets)
3368 for_each_port(adap, i) {
3369 pi = adap2pinfo(adap, i);
3370 if (pi->nqsets > 1) {
3371 pi->nqsets--;
3372 adap->sge.ethqsets--;
3373 if (adap->sge.ethqsets <= n)
3374 break;
3375 }
3376 }
3377
3378 n = 0;
3379 for_each_port(adap, i) {
3380 pi = adap2pinfo(adap, i);
3381 pi->first_qset = n;
3382 n += pi->nqsets;
3383 }
3384}
3385
3386/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3387#define EXTRA_VECS 2
3388
3389static int __devinit enable_msix(struct adapter *adap)
3390{
3391 int ofld_need = 0;
3392 int i, err, want, need;
3393 struct sge *s = &adap->sge;
3394 unsigned int nchan = adap->params.nports;
3395 struct msix_entry entries[MAX_INGQ + 1];
3396
3397 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3398 entries[i].entry = i;
3399
3400 want = s->max_ethqsets + EXTRA_VECS;
3401 if (is_offload(adap)) {
3402 want += s->rdmaqs + s->ofldqsets;
3403 /* need nchan for each possible ULD */
3404 ofld_need = 2 * nchan;
3405 }
3406 need = adap->params.nports + EXTRA_VECS + ofld_need;
3407
3408 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3409 want = err;
3410
3411 if (!err) {
3412 /*
3413 * Distribute available vectors to the various queue groups.
3414 * Every group gets its minimum requirement and NIC gets top
3415 * priority for leftovers.
3416 */
3417 i = want - EXTRA_VECS - ofld_need;
3418 if (i < s->max_ethqsets) {
3419 s->max_ethqsets = i;
3420 if (i < s->ethqsets)
3421 reduce_ethqs(adap, i);
3422 }
3423 if (is_offload(adap)) {
3424 i = want - EXTRA_VECS - s->max_ethqsets;
3425 i -= ofld_need - nchan;
3426 s->ofldqsets = (i / nchan) * nchan; /* round down */
3427 }
3428 for (i = 0; i < want; ++i)
3429 adap->msix_info[i].vec = entries[i].vector;
3430 } else if (err > 0)
3431 dev_info(adap->pdev_dev,
3432 "only %d MSI-X vectors left, not using MSI-X\n", err);
3433 return err;
3434}
3435
3436#undef EXTRA_VECS
3437
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003438static int __devinit init_rss(struct adapter *adap)
3439{
3440 unsigned int i, j;
3441
3442 for_each_port(adap, i) {
3443 struct port_info *pi = adap2pinfo(adap, i);
3444
3445 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3446 if (!pi->rss)
3447 return -ENOMEM;
3448 for (j = 0; j < pi->rss_size; j++)
3449 pi->rss[j] = j % pi->nqsets;
3450 }
3451 return 0;
3452}
3453
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003454static void __devinit print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003455{
3456 static const char *base[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003457 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00003458 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003459 };
3460
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003461 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003462 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003463 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003464 const struct port_info *pi = netdev_priv(dev);
3465 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003466
3467 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3468 spd = " 2.5 GT/s";
3469 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3470 spd = " 5 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003471
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003472 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3473 bufp += sprintf(bufp, "100/");
3474 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3475 bufp += sprintf(bufp, "1000/");
3476 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3477 bufp += sprintf(bufp, "10G/");
3478 if (bufp != buf)
3479 --bufp;
3480 sprintf(bufp, "BASE-%s", base[pi->port_type]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003481
Dimitris Michailidis118969e2010-12-14 21:36:48 +00003482 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3483 adap->params.vpd.id, adap->params.rev, buf,
3484 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
3485 (adap->flags & USING_MSIX) ? " MSI-X" :
3486 (adap->flags & USING_MSI) ? " MSI" : "");
3487 netdev_info(dev, "S/N: %s, E/C: %s\n",
3488 adap->params.vpd.sn, adap->params.vpd.ec);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003489}
3490
Dimitris Michailidisef306b52010-12-14 21:36:44 +00003491static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3492{
3493 u16 v;
3494 int pos;
3495
3496 pos = pci_pcie_cap(dev);
3497 if (pos > 0) {
3498 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3499 v |= PCI_EXP_DEVCTL_RELAX_EN;
3500 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3501 }
3502}
3503
Dimitris Michailidis06546392010-07-11 12:01:16 +00003504/*
3505 * Free the following resources:
3506 * - memory used for tables
3507 * - MSI/MSI-X
3508 * - net devices
3509 * - resources FW is holding for us
3510 */
3511static void free_some_resources(struct adapter *adapter)
3512{
3513 unsigned int i;
3514
3515 t4_free_mem(adapter->l2t);
3516 t4_free_mem(adapter->tids.tid_tab);
3517 disable_msi(adapter);
3518
3519 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003520 if (adapter->port[i]) {
3521 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00003522 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003523 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00003524 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003525 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00003526}
3527
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00003528#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00003529#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003530 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3531
3532static int __devinit init_one(struct pci_dev *pdev,
3533 const struct pci_device_id *ent)
3534{
3535 int func, i, err;
3536 struct port_info *pi;
3537 unsigned int highdma = 0;
3538 struct adapter *adapter = NULL;
3539
3540 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3541
3542 err = pci_request_regions(pdev, KBUILD_MODNAME);
3543 if (err) {
3544 /* Just info, some other driver may have claimed the device. */
3545 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3546 return err;
3547 }
3548
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003549 /* We control everything through one PF */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003550 func = PCI_FUNC(pdev->devfn);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003551 if (func != ent->driver_data) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003552 pci_save_state(pdev); /* to restore SR-IOV later */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003553 goto sriov;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003554 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003555
3556 err = pci_enable_device(pdev);
3557 if (err) {
3558 dev_err(&pdev->dev, "cannot enable PCI device\n");
3559 goto out_release_regions;
3560 }
3561
3562 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3563 highdma = NETIF_F_HIGHDMA;
3564 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3565 if (err) {
3566 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3567 "coherent allocations\n");
3568 goto out_disable_device;
3569 }
3570 } else {
3571 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3572 if (err) {
3573 dev_err(&pdev->dev, "no usable DMA configuration\n");
3574 goto out_disable_device;
3575 }
3576 }
3577
3578 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00003579 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003580 pci_set_master(pdev);
3581 pci_save_state(pdev);
3582
3583 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3584 if (!adapter) {
3585 err = -ENOMEM;
3586 goto out_disable_device;
3587 }
3588
3589 adapter->regs = pci_ioremap_bar(pdev, 0);
3590 if (!adapter->regs) {
3591 dev_err(&pdev->dev, "cannot map device registers\n");
3592 err = -ENOMEM;
3593 goto out_free_adapter;
3594 }
3595
3596 adapter->pdev = pdev;
3597 adapter->pdev_dev = &pdev->dev;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003598 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003599 adapter->msg_enable = dflt_msg_enable;
3600 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3601
3602 spin_lock_init(&adapter->stats_lock);
3603 spin_lock_init(&adapter->tid_release_lock);
3604
3605 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3606
3607 err = t4_prep_adapter(adapter);
3608 if (err)
3609 goto out_unmap_bar;
3610 err = adap_init0(adapter);
3611 if (err)
3612 goto out_unmap_bar;
3613
3614 for_each_port(adapter, i) {
3615 struct net_device *netdev;
3616
3617 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3618 MAX_ETH_QSETS);
3619 if (!netdev) {
3620 err = -ENOMEM;
3621 goto out_free_dev;
3622 }
3623
3624 SET_NETDEV_DEV(netdev, &pdev->dev);
3625
3626 adapter->port[i] = netdev;
3627 pi = netdev_priv(netdev);
3628 pi->adapter = adapter;
3629 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003630 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003631 netdev->irq = pdev->irq;
3632
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00003633 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3634 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3635 NETIF_F_RXCSUM | NETIF_F_RXHASH |
3636 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3637 netdev->features |= netdev->hw_features | highdma;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003638 netdev->vlan_features = netdev->features & VLAN_FEAT;
3639
3640 netdev->netdev_ops = &cxgb4_netdev_ops;
3641 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3642 }
3643
3644 pci_set_drvdata(pdev, adapter);
3645
3646 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003647 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003648 if (err)
3649 goto out_free_dev;
3650 }
3651
3652 /*
3653 * Configure queues and allocate tables now, they can be needed as
3654 * soon as the first register_netdev completes.
3655 */
3656 cfg_queues(adapter);
3657
3658 adapter->l2t = t4_init_l2t();
3659 if (!adapter->l2t) {
3660 /* We tolerate a lack of L2T, giving up some functionality */
3661 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3662 adapter->params.offload = 0;
3663 }
3664
3665 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3666 dev_warn(&pdev->dev, "could not allocate TID table, "
3667 "continuing\n");
3668 adapter->params.offload = 0;
3669 }
3670
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00003671 /* See what interrupts we'll be using */
3672 if (msi > 1 && enable_msix(adapter) == 0)
3673 adapter->flags |= USING_MSIX;
3674 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3675 adapter->flags |= USING_MSI;
3676
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003677 err = init_rss(adapter);
3678 if (err)
3679 goto out_free_dev;
3680
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003681 /*
3682 * The card is now ready to go. If any errors occur during device
3683 * registration we do not fail the whole card but rather proceed only
3684 * with the ports we manage to register successfully. However we must
3685 * register at least one net device.
3686 */
3687 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00003688 pi = adap2pinfo(adapter, i);
3689 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
3690 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
3691
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003692 err = register_netdev(adapter->port[i]);
3693 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003694 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003695 adapter->chan_map[pi->tx_chan] = i;
3696 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003697 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003698 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003699 dev_err(&pdev->dev, "could not register any net devices\n");
3700 goto out_free_dev;
3701 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003702 if (err) {
3703 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
3704 err = 0;
3705 };
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003706
3707 if (cxgb4_debugfs_root) {
3708 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3709 cxgb4_debugfs_root);
3710 setup_debugfs(adapter);
3711 }
3712
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003713 if (is_offload(adapter))
3714 attach_ulds(adapter);
3715
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003716sriov:
3717#ifdef CONFIG_PCI_IOV
3718 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3719 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3720 dev_info(&pdev->dev,
3721 "instantiated %u virtual functions\n",
3722 num_vf[func]);
3723#endif
3724 return 0;
3725
3726 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00003727 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003728 out_unmap_bar:
3729 iounmap(adapter->regs);
3730 out_free_adapter:
3731 kfree(adapter);
3732 out_disable_device:
3733 pci_disable_pcie_error_reporting(pdev);
3734 pci_disable_device(pdev);
3735 out_release_regions:
3736 pci_release_regions(pdev);
3737 pci_set_drvdata(pdev, NULL);
3738 return err;
3739}
3740
3741static void __devexit remove_one(struct pci_dev *pdev)
3742{
3743 struct adapter *adapter = pci_get_drvdata(pdev);
3744
3745 pci_disable_sriov(pdev);
3746
3747 if (adapter) {
3748 int i;
3749
3750 if (is_offload(adapter))
3751 detach_ulds(adapter);
3752
3753 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00003754 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003755 unregister_netdev(adapter->port[i]);
3756
3757 if (adapter->debugfs_root)
3758 debugfs_remove_recursive(adapter->debugfs_root);
3759
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003760 if (adapter->flags & FULL_INIT_DONE)
3761 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003762
Dimitris Michailidis06546392010-07-11 12:01:16 +00003763 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003764 iounmap(adapter->regs);
3765 kfree(adapter);
3766 pci_disable_pcie_error_reporting(pdev);
3767 pci_disable_device(pdev);
3768 pci_release_regions(pdev);
3769 pci_set_drvdata(pdev, NULL);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00003770 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003771 pci_release_regions(pdev);
3772}
3773
3774static struct pci_driver cxgb4_driver = {
3775 .name = KBUILD_MODNAME,
3776 .id_table = cxgb4_pci_tbl,
3777 .probe = init_one,
3778 .remove = __devexit_p(remove_one),
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003779 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003780};
3781
3782static int __init cxgb4_init_module(void)
3783{
3784 int ret;
3785
3786 /* Debugfs support is optional, just warn if this fails */
3787 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3788 if (!cxgb4_debugfs_root)
3789 pr_warning("could not create debugfs entry, continuing\n");
3790
3791 ret = pci_register_driver(&cxgb4_driver);
3792 if (ret < 0)
3793 debugfs_remove(cxgb4_debugfs_root);
3794 return ret;
3795}
3796
3797static void __exit cxgb4_cleanup_module(void)
3798{
3799 pci_unregister_driver(&cxgb4_driver);
3800 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3801}
3802
3803module_init(cxgb4_init_module);
3804module_exit(cxgb4_cleanup_module);