blob: 5bf490a781aaad9de63b772d5202be5c6fe4bdba [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000045#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
Vipul Pandya01bcca62013-07-04 16:10:46 +053063#include <net/addrconf.h>
David S. Miller1ef80192014-11-10 13:27:49 -050064#include <net/bonding.h>
Anish Bhattb5a02f52015-01-14 15:17:34 -080065#include <net/addrconf.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000066#include <asm/uaccess.h>
67
68#include "cxgb4.h"
69#include "t4_regs.h"
Hariprasad Shenaif612b812015-01-05 16:30:43 +053070#include "t4_values.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000071#include "t4_msg.h"
72#include "t4fw_api.h"
Hariprasad Shenaicd6c2f12015-01-27 20:12:52 +053073#include "t4fw_version.h"
Anish Bhatt688848b2014-06-19 21:37:13 -070074#include "cxgb4_dcb.h"
Hariprasad Shenaifd88b312014-11-07 09:35:23 +053075#include "cxgb4_debugfs.h"
Anish Bhattb5a02f52015-01-14 15:17:34 -080076#include "clip_tbl.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000077#include "l2t.h"
78
Vipul Pandya01bcca62013-07-04 16:10:46 +053079#ifdef DRV_VERSION
80#undef DRV_VERSION
81#endif
Santosh Rastapur3a7f8552013-03-14 05:08:55 +000082#define DRV_VERSION "2.0.0-ko"
83#define DRV_DESC "Chelsio T4/T5 Network Driver"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000084
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000085enum {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000086 MAX_TXQ_ENTRIES = 16384,
87 MAX_CTRL_TXQ_ENTRIES = 1024,
88 MAX_RSPQ_ENTRIES = 16384,
89 MAX_RX_BUFFERS = 16384,
90 MIN_TXQ_ENTRIES = 32,
91 MIN_CTRL_TXQ_ENTRIES = 32,
92 MIN_RSPQ_ENTRIES = 128,
93 MIN_FL_ENTRIES = 16
94};
95
Vipul Pandyaf2b7e782012-12-10 09:30:52 +000096/* Host shadow copy of ingress filter entry. This is in host native format
97 * and doesn't match the ordering or bit order, etc. of the hardware of the
98 * firmware command. The use of bit-field structure elements is purely to
99 * remind ourselves of the field size limitations and save memory in the case
100 * where the filter table is large.
101 */
102struct filter_entry {
103 /* Administrative fields for filter.
104 */
105 u32 valid:1; /* filter allocated and valid */
106 u32 locked:1; /* filter is administratively locked */
107
108 u32 pending:1; /* filter action is pending firmware reply */
109 u32 smtidx:8; /* Source MAC Table index for smac */
110 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
111
112 /* The filter itself. Most of this is a straight copy of information
113 * provided by the extended ioctl(). Some fields are translated to
114 * internal forms -- for instance the Ingress Queue ID passed in from
115 * the ioctl() is translated into the Absolute Ingress Queue ID.
116 */
117 struct ch_filter_specification fs;
118};
119
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000120#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
121 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
122 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
123
Hariprasad Shenai3fedeab2014-11-25 08:33:58 +0530124/* Macros needed to support the PCI Device ID Table ...
125 */
126#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
127 static struct pci_device_id cxgb4_pci_tbl[] = {
128#define CH_PCI_DEVICE_ID_FUNCTION 0x4
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000129
Hariprasad Shenai3fedeab2014-11-25 08:33:58 +0530130/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
131 * called for both.
132 */
133#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
134
135#define CH_PCI_ID_TABLE_ENTRY(devid) \
136 {PCI_VDEVICE(CHELSIO, (devid)), 4}
137
138#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
139 { 0, } \
140 }
141
142#include "t4_pci_id_tbl.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000143
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530144#define FW4_FNAME "cxgb4/t4fw.bin"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000145#define FW5_FNAME "cxgb4/t5fw.bin"
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530146#define FW4_CFNAME "cxgb4/t4-config.txt"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000147#define FW5_CFNAME "cxgb4/t5-config.txt"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000148
149MODULE_DESCRIPTION(DRV_DESC);
150MODULE_AUTHOR("Chelsio Communications");
151MODULE_LICENSE("Dual BSD/GPL");
152MODULE_VERSION(DRV_VERSION);
153MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530154MODULE_FIRMWARE(FW4_FNAME);
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000155MODULE_FIRMWARE(FW5_FNAME);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000156
Vipul Pandya636f9d32012-09-26 02:39:39 +0000157/*
158 * Normally we're willing to become the firmware's Master PF but will be happy
159 * if another PF has already become the Master and initialized the adapter.
160 * Setting "force_init" will cause this driver to forcibly establish itself as
161 * the Master PF and initialize the adapter.
162 */
163static uint force_init;
164
165module_param(force_init, uint, 0644);
166MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
167
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000168/*
169 * Normally if the firmware we connect to has Configuration File support, we
170 * use that and only fall back to the old Driver-based initialization if the
171 * Configuration File fails for some reason. If force_old_init is set, then
172 * we'll always use the old Driver-based initialization sequence.
173 */
174static uint force_old_init;
175
176module_param(force_old_init, uint, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530177MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
178 " parameter");
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000179
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000180static int dflt_msg_enable = DFLT_MSG_ENABLE;
181
182module_param(dflt_msg_enable, int, 0644);
183MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
184
185/*
186 * The driver uses the best interrupt scheme available on a platform in the
187 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
188 * of these schemes the driver may consider as follows:
189 *
190 * msi = 2: choose from among all three options
191 * msi = 1: only consider MSI and INTx interrupts
192 * msi = 0: force INTx interrupts
193 */
194static int msi = 2;
195
196module_param(msi, int, 0644);
197MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
198
199/*
200 * Queue interrupt hold-off timer values. Queues default to the first of these
201 * upon creation.
202 */
203static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
204
205module_param_array(intr_holdoff, uint, NULL, 0644);
206MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
Hariprasad Shenai06640312015-01-13 15:19:25 +0530207 "0..4 in microseconds, deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000208
209static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
210
211module_param_array(intr_cnt, uint, NULL, 0644);
212MODULE_PARM_DESC(intr_cnt,
Hariprasad Shenai06640312015-01-13 15:19:25 +0530213 "thresholds 1..3 for queue interrupt packet counters, "
214 "deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000215
Vipul Pandya636f9d32012-09-26 02:39:39 +0000216/*
217 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
218 * offset by 2 bytes in order to have the IP headers line up on 4-byte
219 * boundaries. This is a requirement for many architectures which will throw
220 * a machine check fault if an attempt is made to access one of the 4-byte IP
221 * header fields on a non-4-byte boundary. And it's a major performance issue
222 * even on some architectures which allow it like some implementations of the
223 * x86 ISA. However, some architectures don't mind this and for some very
224 * edge-case performance sensitive applications (like forwarding large volumes
225 * of small packets), setting this DMA offset to 0 will decrease the number of
226 * PCI-E Bus transfers enough to measurably affect performance.
227 */
228static int rx_dma_offset = 2;
229
Rusty Russelleb939922011-12-19 14:08:01 +0000230static bool vf_acls;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000231
232#ifdef CONFIG_PCI_IOV
233module_param(vf_acls, bool, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530234MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
235 "deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000236
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000237/* Configure the number of PCI-E Virtual Function which are to be instantiated
238 * on SR-IOV Capable Physical Functions.
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000239 */
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000240static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000241
242module_param_array(num_vf, uint, NULL, 0644);
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000243MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000244#endif
245
Anish Bhatt688848b2014-06-19 21:37:13 -0700246/* TX Queue select used to determine what algorithm to use for selecting TX
247 * queue. Select between the kernel provided function (select_queue=0) or user
248 * cxgb_select_queue function (select_queue=1)
249 *
250 * Default: select_queue=0
251 */
252static int select_queue;
253module_param(select_queue, int, 0644);
254MODULE_PARM_DESC(select_queue,
255 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
256
Hariprasad Shenai06640312015-01-13 15:19:25 +0530257static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000258
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000259module_param(tp_vlan_pri_map, uint, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530260MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
261 "deprecated parameter");
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000262
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000263static struct dentry *cxgb4_debugfs_root;
264
265static LIST_HEAD(adapter_list);
266static DEFINE_MUTEX(uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +0530267/* Adapter list to be accessed from atomic context */
268static LIST_HEAD(adap_rcu_list);
269static DEFINE_SPINLOCK(adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000270static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
271static const char *uld_str[] = { "RDMA", "iSCSI" };
272
273static void link_report(struct net_device *dev)
274{
275 if (!netif_carrier_ok(dev))
276 netdev_info(dev, "link down\n");
277 else {
278 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
279
280 const char *s = "10Mbps";
281 const struct port_info *p = netdev_priv(dev);
282
283 switch (p->link_cfg.speed) {
Ben Hutchingse8b39012014-02-23 00:03:24 +0000284 case 10000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000285 s = "10Gbps";
286 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000287 case 1000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000288 s = "1000Mbps";
289 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000290 case 100:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000291 s = "100Mbps";
292 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000293 case 40000:
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +0530294 s = "40Gbps";
295 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000296 }
297
298 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
299 fc[p->link_cfg.fc]);
300 }
301}
302
Anish Bhatt688848b2014-06-19 21:37:13 -0700303#ifdef CONFIG_CHELSIO_T4_DCB
304/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
305static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
306{
307 struct port_info *pi = netdev_priv(dev);
308 struct adapter *adap = pi->adapter;
309 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
310 int i;
311
312 /* We use a simple mapping of Port TX Queue Index to DCB
313 * Priority when we're enabling DCB.
314 */
315 for (i = 0; i < pi->nqsets; i++, txq++) {
316 u32 name, value;
317 int err;
318
Hariprasad Shenai51678652014-11-21 12:52:02 +0530319 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
320 FW_PARAMS_PARAM_X_V(
321 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
322 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
Anish Bhatt688848b2014-06-19 21:37:13 -0700323 value = enable ? i : 0xffffffff;
324
325 /* Since we can be called while atomic (from "interrupt
326 * level") we need to issue the Set Parameters Commannd
327 * without sleeping (timeout < 0).
328 */
329 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
330 &name, &value);
331
332 if (err)
333 dev_err(adap->pdev_dev,
334 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
335 enable ? "set" : "unset", pi->port_id, i, -err);
Anish Bhatt10b00462014-08-07 16:14:03 -0700336 else
337 txq->dcb_prio = value;
Anish Bhatt688848b2014-06-19 21:37:13 -0700338 }
339}
340#endif /* CONFIG_CHELSIO_T4_DCB */
341
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000342void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
343{
344 struct net_device *dev = adapter->port[port_id];
345
346 /* Skip changes from disabled ports. */
347 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
348 if (link_stat)
349 netif_carrier_on(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700350 else {
351#ifdef CONFIG_CHELSIO_T4_DCB
352 cxgb4_dcb_state_init(dev);
353 dcb_tx_queue_prio_enable(dev, false);
354#endif /* CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000355 netif_carrier_off(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700356 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000357
358 link_report(dev);
359 }
360}
361
362void t4_os_portmod_changed(const struct adapter *adap, int port_id)
363{
364 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000365 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000366 };
367
368 const struct net_device *dev = adap->port[port_id];
369 const struct port_info *pi = netdev_priv(dev);
370
371 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
372 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000373 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000374 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
375}
376
377/*
378 * Configure the exact and hash address filters to handle a port's multicast
379 * and secondary unicast MAC addresses.
380 */
381static int set_addr_filters(const struct net_device *dev, bool sleep)
382{
383 u64 mhash = 0;
384 u64 uhash = 0;
385 bool free = true;
386 u16 filt_idx[7];
387 const u8 *addr[7];
388 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000389 const struct netdev_hw_addr *ha;
390 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700391 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000392 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000393 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000394
395 /* first do the secondary unicast addresses */
396 netdev_for_each_uc_addr(ha, dev) {
397 addr[naddr++] = ha->addr;
398 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000399 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000400 naddr, addr, filt_idx, &uhash, sleep);
401 if (ret < 0)
402 return ret;
403
404 free = false;
405 naddr = 0;
406 }
407 }
408
409 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700410 netdev_for_each_mc_addr(ha, dev) {
411 addr[naddr++] = ha->addr;
412 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000413 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000414 naddr, addr, filt_idx, &mhash, sleep);
415 if (ret < 0)
416 return ret;
417
418 free = false;
419 naddr = 0;
420 }
421 }
422
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000423 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000424 uhash | mhash, sleep);
425}
426
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530427int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
428module_param(dbfifo_int_thresh, int, 0644);
429MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
430
Vipul Pandya404d9e32012-10-08 02:59:43 +0000431/*
432 * usecs to sleep while draining the dbfifo
433 */
434static int dbfifo_drain_delay = 1000;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530435module_param(dbfifo_drain_delay, int, 0644);
436MODULE_PARM_DESC(dbfifo_drain_delay,
437 "usecs to sleep while draining the dbfifo");
438
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000439/*
440 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
441 * If @mtu is -1 it is left unchanged.
442 */
443static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
444{
445 int ret;
446 struct port_info *pi = netdev_priv(dev);
447
448 ret = set_addr_filters(dev, sleep_ok);
449 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000450 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000451 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000452 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000453 sleep_ok);
454 return ret;
455}
456
457/**
458 * link_start - enable a port
459 * @dev: the port to enable
460 *
461 * Performs the MAC and PHY actions needed to enable a port.
462 */
463static int link_start(struct net_device *dev)
464{
465 int ret;
466 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000467 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000468
469 /*
470 * We do not set address filters and promiscuity here, the stack does
471 * that step explicitly.
472 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000473 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +0000474 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000475 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000476 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000477 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000478 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000479 if (ret >= 0) {
480 pi->xact_addr_filt = ret;
481 ret = 0;
482 }
483 }
484 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000485 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
486 &pi->link_cfg);
Anish Bhatt30f00842014-08-05 16:05:23 -0700487 if (ret == 0) {
488 local_bh_disable();
Anish Bhatt688848b2014-06-19 21:37:13 -0700489 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
490 true, CXGB4_DCB_ENABLED);
Anish Bhatt30f00842014-08-05 16:05:23 -0700491 local_bh_enable();
492 }
Anish Bhatt688848b2014-06-19 21:37:13 -0700493
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000494 return ret;
495}
496
Anish Bhatt688848b2014-06-19 21:37:13 -0700497int cxgb4_dcb_enabled(const struct net_device *dev)
498{
499#ifdef CONFIG_CHELSIO_T4_DCB
500 struct port_info *pi = netdev_priv(dev);
501
Anish Bhatt3bb06262014-10-23 14:37:31 -0700502 if (!pi->dcb.enabled)
503 return 0;
504
505 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
506 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
Anish Bhatt688848b2014-06-19 21:37:13 -0700507#else
508 return 0;
509#endif
510}
511EXPORT_SYMBOL(cxgb4_dcb_enabled);
512
513#ifdef CONFIG_CHELSIO_T4_DCB
514/* Handle a Data Center Bridging update message from the firmware. */
515static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
516{
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530517 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
Anish Bhatt688848b2014-06-19 21:37:13 -0700518 struct net_device *dev = adap->port[port];
519 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
520 int new_dcb_enabled;
521
522 cxgb4_dcb_handle_fw_update(adap, pcmd);
523 new_dcb_enabled = cxgb4_dcb_enabled(dev);
524
525 /* If the DCB has become enabled or disabled on the port then we're
526 * going to need to set up/tear down DCB Priority parameters for the
527 * TX Queues associated with the port.
528 */
529 if (new_dcb_enabled != old_dcb_enabled)
530 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
531}
532#endif /* CONFIG_CHELSIO_T4_DCB */
533
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000534/* Clear a filter and release any of its resources that we own. This also
535 * clears the filter's "pending" status.
536 */
537static void clear_filter(struct adapter *adap, struct filter_entry *f)
538{
539 /* If the new or old filter have loopback rewriteing rules then we'll
540 * need to free any existing Layer Two Table (L2T) entries of the old
541 * filter rule. The firmware will handle freeing up any Source MAC
542 * Table (SMT) entries used for rewriting Source MAC Addresses in
543 * loopback rules.
544 */
545 if (f->l2t)
546 cxgb4_l2t_release(f->l2t);
547
548 /* The zeroing of the filter rule below clears the filter valid,
549 * pending, locked flags, l2t pointer, etc. so it's all we need for
550 * this operation.
551 */
552 memset(f, 0, sizeof(*f));
553}
554
555/* Handle a filter write/deletion reply.
556 */
557static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
558{
559 unsigned int idx = GET_TID(rpl);
560 unsigned int nidx = idx - adap->tids.ftid_base;
561 unsigned int ret;
562 struct filter_entry *f;
563
564 if (idx >= adap->tids.ftid_base && nidx <
565 (adap->tids.nftids + adap->tids.nsftids)) {
566 idx = nidx;
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -0800567 ret = TCB_COOKIE_G(rpl->cookie);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000568 f = &adap->tids.ftid_tab[idx];
569
570 if (ret == FW_FILTER_WR_FLT_DELETED) {
571 /* Clear the filter when we get confirmation from the
572 * hardware that the filter has been deleted.
573 */
574 clear_filter(adap, f);
575 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
576 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
577 idx);
578 clear_filter(adap, f);
579 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
580 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
581 f->pending = 0; /* asynchronous setup completed */
582 f->valid = 1;
583 } else {
584 /* Something went wrong. Issue a warning about the
585 * problem and clear everything out.
586 */
587 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
588 idx, ret);
589 clear_filter(adap, f);
590 }
591 }
592}
593
594/* Response queue handler for the FW event queue.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000595 */
596static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
597 const struct pkt_gl *gl)
598{
599 u8 opcode = ((const struct rss_header *)rsp)->opcode;
600
601 rsp++; /* skip RSS header */
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000602
603 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
604 */
605 if (unlikely(opcode == CPL_FW4_MSG &&
606 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
607 rsp++;
608 opcode = ((const struct rss_header *)rsp)->opcode;
609 rsp++;
610 if (opcode != CPL_SGE_EGR_UPDATE) {
611 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
612 , opcode);
613 goto out;
614 }
615 }
616
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000617 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
618 const struct cpl_sge_egr_update *p = (void *)rsp;
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -0800619 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000620 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000621
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000622 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000623 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000624 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000625 struct sge_eth_txq *eq;
626
627 eq = container_of(txq, struct sge_eth_txq, q);
628 netif_tx_wake_queue(eq->txq);
629 } else {
630 struct sge_ofld_txq *oq;
631
632 oq = container_of(txq, struct sge_ofld_txq, q);
633 tasklet_schedule(&oq->qresume_tsk);
634 }
635 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
636 const struct cpl_fw6_msg *p = (void *)rsp;
637
Anish Bhatt688848b2014-06-19 21:37:13 -0700638#ifdef CONFIG_CHELSIO_T4_DCB
639 const struct fw_port_cmd *pcmd = (const void *)p->data;
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530640 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
Anish Bhatt688848b2014-06-19 21:37:13 -0700641 unsigned int action =
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530642 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
Anish Bhatt688848b2014-06-19 21:37:13 -0700643
644 if (cmd == FW_PORT_CMD &&
645 action == FW_PORT_ACTION_GET_PORT_INFO) {
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530646 int port = FW_PORT_CMD_PORTID_G(
Anish Bhatt688848b2014-06-19 21:37:13 -0700647 be32_to_cpu(pcmd->op_to_portid));
648 struct net_device *dev = q->adap->port[port];
649 int state_input = ((pcmd->u.info.dcbxdis_pkd &
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530650 FW_PORT_CMD_DCBXDIS_F)
Anish Bhatt688848b2014-06-19 21:37:13 -0700651 ? CXGB4_DCB_INPUT_FW_DISABLED
652 : CXGB4_DCB_INPUT_FW_ENABLED);
653
654 cxgb4_dcb_state_fsm(dev, state_input);
655 }
656
657 if (cmd == FW_PORT_CMD &&
658 action == FW_PORT_ACTION_L2_DCB_CFG)
659 dcb_rpl(q->adap, pcmd);
660 else
661#endif
662 if (p->type == 0)
663 t4_handle_fw_rpl(q->adap, p->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000664 } else if (opcode == CPL_L2T_WRITE_RPL) {
665 const struct cpl_l2t_write_rpl *p = (void *)rsp;
666
667 do_l2t_write_rpl(q->adap, p);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000668 } else if (opcode == CPL_SET_TCB_RPL) {
669 const struct cpl_set_tcb_rpl *p = (void *)rsp;
670
671 filter_rpl(q->adap, p);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000672 } else
673 dev_err(q->adap->pdev_dev,
674 "unexpected CPL %#x on FW event queue\n", opcode);
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000675out:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000676 return 0;
677}
678
679/**
680 * uldrx_handler - response queue handler for ULD queues
681 * @q: the response queue that received the packet
682 * @rsp: the response queue descriptor holding the offload message
683 * @gl: the gather list of packet fragments
684 *
685 * Deliver an ingress offload packet to a ULD. All processing is done by
686 * the ULD, we just maintain statistics.
687 */
688static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
689 const struct pkt_gl *gl)
690{
691 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
692
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000693 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
694 */
695 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
696 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
697 rsp += 2;
698
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000699 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
700 rxq->stats.nomem++;
701 return -1;
702 }
703 if (gl == NULL)
704 rxq->stats.imm++;
705 else if (gl == CXGB4_MSG_AN)
706 rxq->stats.an++;
707 else
708 rxq->stats.pkts++;
709 return 0;
710}
711
712static void disable_msi(struct adapter *adapter)
713{
714 if (adapter->flags & USING_MSIX) {
715 pci_disable_msix(adapter->pdev);
716 adapter->flags &= ~USING_MSIX;
717 } else if (adapter->flags & USING_MSI) {
718 pci_disable_msi(adapter->pdev);
719 adapter->flags &= ~USING_MSI;
720 }
721}
722
723/*
724 * Interrupt handler for non-data events used with MSI-X.
725 */
726static irqreturn_t t4_nondata_intr(int irq, void *cookie)
727{
728 struct adapter *adap = cookie;
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530729 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000730
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530731 if (v & PFSW_F) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000732 adap->swintr = 1;
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530733 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000734 }
735 t4_slow_intr_handler(adap);
736 return IRQ_HANDLED;
737}
738
739/*
740 * Name the MSI-X interrupts.
741 */
742static void name_msix_vecs(struct adapter *adap)
743{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000744 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000745
746 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000747 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000748
749 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000750 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
751 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000752
753 /* Ethernet queues */
754 for_each_port(adap, j) {
755 struct net_device *d = adap->port[j];
756 const struct port_info *pi = netdev_priv(d);
757
Dimitris Michailidisba278162010-12-14 21:36:50 +0000758 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000759 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
760 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000761 }
762
763 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000764 for_each_ofldrxq(&adap->sge, i)
765 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000766 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000767
768 for_each_rdmarxq(&adap->sge, i)
769 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000770 adap->port[0]->name, i);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530771
772 for_each_rdmaciq(&adap->sge, i)
773 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
774 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000775}
776
777static int request_msix_queue_irqs(struct adapter *adap)
778{
779 struct sge *s = &adap->sge;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530780 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
781 int msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000782
783 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
784 adap->msix_info[1].desc, &s->fw_evtq);
785 if (err)
786 return err;
787
788 for_each_ethrxq(s, ethqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000789 err = request_irq(adap->msix_info[msi_index].vec,
790 t4_sge_intr_msix, 0,
791 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000792 &s->ethrxq[ethqidx].rspq);
793 if (err)
794 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000795 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000796 }
797 for_each_ofldrxq(s, ofldqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000798 err = request_irq(adap->msix_info[msi_index].vec,
799 t4_sge_intr_msix, 0,
800 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000801 &s->ofldrxq[ofldqidx].rspq);
802 if (err)
803 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000804 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000805 }
806 for_each_rdmarxq(s, rdmaqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000807 err = request_irq(adap->msix_info[msi_index].vec,
808 t4_sge_intr_msix, 0,
809 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000810 &s->rdmarxq[rdmaqidx].rspq);
811 if (err)
812 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000813 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000814 }
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530815 for_each_rdmaciq(s, rdmaciqqidx) {
816 err = request_irq(adap->msix_info[msi_index].vec,
817 t4_sge_intr_msix, 0,
818 adap->msix_info[msi_index].desc,
819 &s->rdmaciq[rdmaciqqidx].rspq);
820 if (err)
821 goto unwind;
822 msi_index++;
823 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000824 return 0;
825
826unwind:
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530827 while (--rdmaciqqidx >= 0)
828 free_irq(adap->msix_info[--msi_index].vec,
829 &s->rdmaciq[rdmaciqqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000830 while (--rdmaqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000831 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000832 &s->rdmarxq[rdmaqidx].rspq);
833 while (--ofldqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000834 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000835 &s->ofldrxq[ofldqidx].rspq);
836 while (--ethqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000837 free_irq(adap->msix_info[--msi_index].vec,
838 &s->ethrxq[ethqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000839 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
840 return err;
841}
842
843static void free_msix_queue_irqs(struct adapter *adap)
844{
Vipul Pandya404d9e32012-10-08 02:59:43 +0000845 int i, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000846 struct sge *s = &adap->sge;
847
848 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
849 for_each_ethrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000850 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000851 for_each_ofldrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000852 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000853 for_each_rdmarxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000854 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530855 for_each_rdmaciq(s, i)
856 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000857}
858
859/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000860 * write_rss - write the RSS table for a given port
861 * @pi: the port
862 * @queues: array of queue indices for RSS
863 *
864 * Sets up the portion of the HW RSS table for the port's VI to distribute
865 * packets to the Rx queues in @queues.
866 */
867static int write_rss(const struct port_info *pi, const u16 *queues)
868{
869 u16 *rss;
870 int i, err;
871 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
872
873 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
874 if (!rss)
875 return -ENOMEM;
876
877 /* map the queue indices to queue ids */
878 for (i = 0; i < pi->rss_size; i++, queues++)
879 rss[i] = q[*queues].rspq.abs_id;
880
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000881 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
882 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000883 kfree(rss);
884 return err;
885}
886
887/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000888 * setup_rss - configure RSS
889 * @adap: the adapter
890 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000891 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000892 */
893static int setup_rss(struct adapter *adap)
894{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000895 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000896
897 for_each_port(adap, i) {
898 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000899
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000900 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000901 if (err)
902 return err;
903 }
904 return 0;
905}
906
907/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000908 * Return the channel of the ingress queue with the given qid.
909 */
910static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
911{
912 qid -= p->ingr_start;
913 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
914}
915
916/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000917 * Wait until all NAPI handlers are descheduled.
918 */
919static void quiesce_rx(struct adapter *adap)
920{
921 int i;
922
923 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
924 struct sge_rspq *q = adap->sge.ingr_map[i];
925
926 if (q && q->handler)
927 napi_disable(&q->napi);
928 }
929}
930
931/*
932 * Enable NAPI scheduling and interrupt generation for all Rx queues.
933 */
934static void enable_rx(struct adapter *adap)
935{
936 int i;
937
938 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
939 struct sge_rspq *q = adap->sge.ingr_map[i];
940
941 if (!q)
942 continue;
943 if (q->handler)
944 napi_enable(&q->napi);
945 /* 0-increment GTS to start the timer and enable interrupts */
Hariprasad Shenaif612b812015-01-05 16:30:43 +0530946 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
947 SEINTARM_V(q->intr_params) |
948 INGRESSQID_V(q->cntxt_id));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000949 }
950}
951
952/**
953 * setup_sge_queues - configure SGE Tx/Rx/response queues
954 * @adap: the adapter
955 *
956 * Determines how many sets of SGE queues to use and initializes them.
957 * We support multiple queue sets per port if we have MSI-X, otherwise
958 * just one queue set per port.
959 */
960static int setup_sge_queues(struct adapter *adap)
961{
962 int err, msi_idx, i, j;
963 struct sge *s = &adap->sge;
964
965 bitmap_zero(s->starving_fl, MAX_EGRQ);
966 bitmap_zero(s->txq_maperr, MAX_EGRQ);
967
968 if (adap->flags & USING_MSIX)
969 msi_idx = 1; /* vector 0 is for non-queue interrupts */
970 else {
971 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
972 NULL, NULL);
973 if (err)
974 return err;
975 msi_idx = -((int)s->intrq.abs_id + 1);
976 }
977
978 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
979 msi_idx, NULL, fwevtq_handler);
980 if (err) {
981freeout: t4_free_sge_resources(adap);
982 return err;
983 }
984
985 for_each_port(adap, i) {
986 struct net_device *dev = adap->port[i];
987 struct port_info *pi = netdev_priv(dev);
988 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
989 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
990
991 for (j = 0; j < pi->nqsets; j++, q++) {
992 if (msi_idx > 0)
993 msi_idx++;
994 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
995 msi_idx, &q->fl,
996 t4_ethrx_handler);
997 if (err)
998 goto freeout;
999 q->rspq.idx = j;
1000 memset(&q->stats, 0, sizeof(q->stats));
1001 }
1002 for (j = 0; j < pi->nqsets; j++, t++) {
1003 err = t4_sge_alloc_eth_txq(adap, t, dev,
1004 netdev_get_tx_queue(dev, j),
1005 s->fw_evtq.cntxt_id);
1006 if (err)
1007 goto freeout;
1008 }
1009 }
1010
1011 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1012 for_each_ofldrxq(s, i) {
1013 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1014 struct net_device *dev = adap->port[i / j];
1015
1016 if (msi_idx > 0)
1017 msi_idx++;
1018 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301019 q->fl.size ? &q->fl : NULL,
1020 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001021 if (err)
1022 goto freeout;
1023 memset(&q->stats, 0, sizeof(q->stats));
1024 s->ofld_rxq[i] = q->rspq.abs_id;
1025 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1026 s->fw_evtq.cntxt_id);
1027 if (err)
1028 goto freeout;
1029 }
1030
1031 for_each_rdmarxq(s, i) {
1032 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1033
1034 if (msi_idx > 0)
1035 msi_idx++;
1036 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301037 msi_idx, q->fl.size ? &q->fl : NULL,
1038 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001039 if (err)
1040 goto freeout;
1041 memset(&q->stats, 0, sizeof(q->stats));
1042 s->rdma_rxq[i] = q->rspq.abs_id;
1043 }
1044
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301045 for_each_rdmaciq(s, i) {
1046 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1047
1048 if (msi_idx > 0)
1049 msi_idx++;
1050 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1051 msi_idx, q->fl.size ? &q->fl : NULL,
1052 uldrx_handler);
1053 if (err)
1054 goto freeout;
1055 memset(&q->stats, 0, sizeof(q->stats));
1056 s->rdma_ciq[i] = q->rspq.abs_id;
1057 }
1058
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001059 for_each_port(adap, i) {
1060 /*
1061 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1062 * have RDMA queues, and that's the right value.
1063 */
1064 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1065 s->fw_evtq.cntxt_id,
1066 s->rdmarxq[i].rspq.cntxt_id);
1067 if (err)
1068 goto freeout;
1069 }
1070
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301071 t4_write_reg(adap, is_t4(adap->params.chip) ?
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05301072 MPS_TRC_RSS_CONTROL_A :
1073 MPS_T5_TRC_RSS_CONTROL_A,
1074 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1075 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001076 return 0;
1077}
1078
1079/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001080 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1081 * The allocated memory is cleared.
1082 */
1083void *t4_alloc_mem(size_t size)
1084{
Joe Perches8be04b92013-06-19 12:15:53 -07001085 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001086
1087 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001088 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001089 return p;
1090}
1091
1092/*
1093 * Free memory allocated through alloc_mem().
1094 */
Hariprasad Shenaifd88b312014-11-07 09:35:23 +05301095void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001096{
1097 if (is_vmalloc_addr(addr))
1098 vfree(addr);
1099 else
1100 kfree(addr);
1101}
1102
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001103/* Send a Work Request to write the filter at a specified index. We construct
1104 * a Firmware Filter Work Request to have the work done and put the indicated
1105 * filter into "pending" mode which will prevent any further actions against
1106 * it till we get a reply from the firmware on the completion status of the
1107 * request.
1108 */
1109static int set_filter_wr(struct adapter *adapter, int fidx)
1110{
1111 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1112 struct sk_buff *skb;
1113 struct fw_filter_wr *fwr;
1114 unsigned int ftid;
1115
1116 /* If the new filter requires loopback Destination MAC and/or VLAN
1117 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1118 * the filter.
1119 */
1120 if (f->fs.newdmac || f->fs.newvlan) {
1121 /* allocate L2T entry for new filter */
1122 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1123 if (f->l2t == NULL)
1124 return -EAGAIN;
1125 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1126 f->fs.eport, f->fs.dmac)) {
1127 cxgb4_l2t_release(f->l2t);
1128 f->l2t = NULL;
1129 return -ENOMEM;
1130 }
1131 }
1132
1133 ftid = adapter->tids.ftid_base + fidx;
1134
1135 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1136 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1137 memset(fwr, 0, sizeof(*fwr));
1138
1139 /* It would be nice to put most of the following in t4_hw.c but most
1140 * of the work is translating the cxgbtool ch_filter_specification
1141 * into the Work Request and the definition of that structure is
1142 * currently in cxgbtool.h which isn't appropriate to pull into the
1143 * common code. We may eventually try to come up with a more neutral
1144 * filter specification structure but for now it's easiest to simply
1145 * put this fairly direct code in line ...
1146 */
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301147 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1148 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001149 fwr->tid_to_iq =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301150 htonl(FW_FILTER_WR_TID_V(ftid) |
1151 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1152 FW_FILTER_WR_NOREPLY_V(0) |
1153 FW_FILTER_WR_IQ_V(f->fs.iq));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001154 fwr->del_filter_to_l2tix =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301155 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1156 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1157 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1158 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1159 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1160 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1161 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1162 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1163 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001164 f->fs.newvlan == VLAN_REWRITE) |
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301165 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001166 f->fs.newvlan == VLAN_REWRITE) |
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301167 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1168 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1169 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1170 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001171 fwr->ethtype = htons(f->fs.val.ethtype);
1172 fwr->ethtypem = htons(f->fs.mask.ethtype);
1173 fwr->frag_to_ovlan_vldm =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301174 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1175 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1176 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1177 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1178 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1179 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001180 fwr->smac_sel = 0;
1181 fwr->rx_chan_rx_rpl_iq =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301182 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1183 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001184 fwr->maci_to_matchtypem =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301185 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1186 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1187 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1188 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1189 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1190 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1191 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1192 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001193 fwr->ptcl = f->fs.val.proto;
1194 fwr->ptclm = f->fs.mask.proto;
1195 fwr->ttyp = f->fs.val.tos;
1196 fwr->ttypm = f->fs.mask.tos;
1197 fwr->ivlan = htons(f->fs.val.ivlan);
1198 fwr->ivlanm = htons(f->fs.mask.ivlan);
1199 fwr->ovlan = htons(f->fs.val.ovlan);
1200 fwr->ovlanm = htons(f->fs.mask.ovlan);
1201 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1202 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1203 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1204 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1205 fwr->lp = htons(f->fs.val.lport);
1206 fwr->lpm = htons(f->fs.mask.lport);
1207 fwr->fp = htons(f->fs.val.fport);
1208 fwr->fpm = htons(f->fs.mask.fport);
1209 if (f->fs.newsmac)
1210 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1211
1212 /* Mark the filter as "pending" and ship off the Filter Work Request.
1213 * When we get the Work Request Reply we'll clear the pending status.
1214 */
1215 f->pending = 1;
1216 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1217 t4_ofld_send(adapter, skb);
1218 return 0;
1219}
1220
1221/* Delete the filter at a specified index.
1222 */
1223static int del_filter_wr(struct adapter *adapter, int fidx)
1224{
1225 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1226 struct sk_buff *skb;
1227 struct fw_filter_wr *fwr;
1228 unsigned int len, ftid;
1229
1230 len = sizeof(*fwr);
1231 ftid = adapter->tids.ftid_base + fidx;
1232
1233 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1234 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1235 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1236
1237 /* Mark the filter as "pending" and ship off the Filter Work Request.
1238 * When we get the Work Request Reply we'll clear the pending status.
1239 */
1240 f->pending = 1;
1241 t4_mgmt_tx(adapter, skb);
1242 return 0;
1243}
1244
Anish Bhatt688848b2014-06-19 21:37:13 -07001245static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1246 void *accel_priv, select_queue_fallback_t fallback)
1247{
1248 int txq;
1249
1250#ifdef CONFIG_CHELSIO_T4_DCB
1251 /* If a Data Center Bridging has been successfully negotiated on this
1252 * link then we'll use the skb's priority to map it to a TX Queue.
1253 * The skb's priority is determined via the VLAN Tag Priority Code
1254 * Point field.
1255 */
1256 if (cxgb4_dcb_enabled(dev)) {
1257 u16 vlan_tci;
1258 int err;
1259
1260 err = vlan_get_tag(skb, &vlan_tci);
1261 if (unlikely(err)) {
1262 if (net_ratelimit())
1263 netdev_warn(dev,
1264 "TX Packet without VLAN Tag on DCB Link\n");
1265 txq = 0;
1266 } else {
1267 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1268 }
1269 return txq;
1270 }
1271#endif /* CONFIG_CHELSIO_T4_DCB */
1272
1273 if (select_queue) {
1274 txq = (skb_rx_queue_recorded(skb)
1275 ? skb_get_rx_queue(skb)
1276 : smp_processor_id());
1277
1278 while (unlikely(txq >= dev->real_num_tx_queues))
1279 txq -= dev->real_num_tx_queues;
1280
1281 return txq;
1282 }
1283
1284 return fallback(dev, skb) % dev->real_num_tx_queues;
1285}
1286
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001287static inline int is_offload(const struct adapter *adap)
1288{
1289 return adap->params.offload;
1290}
1291
1292/*
1293 * Implementation of ethtool operations.
1294 */
1295
1296static u32 get_msglevel(struct net_device *dev)
1297{
1298 return netdev2adap(dev)->msg_enable;
1299}
1300
1301static void set_msglevel(struct net_device *dev, u32 val)
1302{
1303 netdev2adap(dev)->msg_enable = val;
1304}
1305
1306static char stats_strings[][ETH_GSTRING_LEN] = {
1307 "TxOctetsOK ",
1308 "TxFramesOK ",
1309 "TxBroadcastFrames ",
1310 "TxMulticastFrames ",
1311 "TxUnicastFrames ",
1312 "TxErrorFrames ",
1313
1314 "TxFrames64 ",
1315 "TxFrames65To127 ",
1316 "TxFrames128To255 ",
1317 "TxFrames256To511 ",
1318 "TxFrames512To1023 ",
1319 "TxFrames1024To1518 ",
1320 "TxFrames1519ToMax ",
1321
1322 "TxFramesDropped ",
1323 "TxPauseFrames ",
1324 "TxPPP0Frames ",
1325 "TxPPP1Frames ",
1326 "TxPPP2Frames ",
1327 "TxPPP3Frames ",
1328 "TxPPP4Frames ",
1329 "TxPPP5Frames ",
1330 "TxPPP6Frames ",
1331 "TxPPP7Frames ",
1332
1333 "RxOctetsOK ",
1334 "RxFramesOK ",
1335 "RxBroadcastFrames ",
1336 "RxMulticastFrames ",
1337 "RxUnicastFrames ",
1338
1339 "RxFramesTooLong ",
1340 "RxJabberErrors ",
1341 "RxFCSErrors ",
1342 "RxLengthErrors ",
1343 "RxSymbolErrors ",
1344 "RxRuntFrames ",
1345
1346 "RxFrames64 ",
1347 "RxFrames65To127 ",
1348 "RxFrames128To255 ",
1349 "RxFrames256To511 ",
1350 "RxFrames512To1023 ",
1351 "RxFrames1024To1518 ",
1352 "RxFrames1519ToMax ",
1353
1354 "RxPauseFrames ",
1355 "RxPPP0Frames ",
1356 "RxPPP1Frames ",
1357 "RxPPP2Frames ",
1358 "RxPPP3Frames ",
1359 "RxPPP4Frames ",
1360 "RxPPP5Frames ",
1361 "RxPPP6Frames ",
1362 "RxPPP7Frames ",
1363
1364 "RxBG0FramesDropped ",
1365 "RxBG1FramesDropped ",
1366 "RxBG2FramesDropped ",
1367 "RxBG3FramesDropped ",
1368 "RxBG0FramesTrunc ",
1369 "RxBG1FramesTrunc ",
1370 "RxBG2FramesTrunc ",
1371 "RxBG3FramesTrunc ",
1372
1373 "TSO ",
1374 "TxCsumOffload ",
1375 "RxCsumGood ",
1376 "VLANextractions ",
1377 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001378 "GROpackets ",
1379 "GROmerged ",
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001380 "WriteCoalSuccess ",
1381 "WriteCoalFail ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001382};
1383
1384static int get_sset_count(struct net_device *dev, int sset)
1385{
1386 switch (sset) {
1387 case ETH_SS_STATS:
1388 return ARRAY_SIZE(stats_strings);
1389 default:
1390 return -EOPNOTSUPP;
1391 }
1392}
1393
1394#define T4_REGMAP_SIZE (160 * 1024)
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001395#define T5_REGMAP_SIZE (332 * 1024)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001396
1397static int get_regs_len(struct net_device *dev)
1398{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001399 struct adapter *adap = netdev2adap(dev);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301400 if (is_t4(adap->params.chip))
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001401 return T4_REGMAP_SIZE;
1402 else
1403 return T5_REGMAP_SIZE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001404}
1405
1406static int get_eeprom_len(struct net_device *dev)
1407{
1408 return EEPROMSIZE;
1409}
1410
1411static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1412{
1413 struct adapter *adapter = netdev2adap(dev);
1414
Rick Jones23020ab2011-11-09 09:58:07 +00001415 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1416 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1417 strlcpy(info->bus_info, pci_name(adapter->pdev),
1418 sizeof(info->bus_info));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001419
Rick Jones84b40502011-11-21 10:54:05 +00001420 if (adapter->params.fw_vers)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001421 snprintf(info->fw_version, sizeof(info->fw_version),
1422 "%u.%u.%u.%u, TP %u.%u.%u.%u",
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05301423 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
1424 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
1425 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
1426 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
1427 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
1428 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1429 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1430 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001431}
1432
1433static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1434{
1435 if (stringset == ETH_SS_STATS)
1436 memcpy(data, stats_strings, sizeof(stats_strings));
1437}
1438
1439/*
1440 * port stats maintained per queue of the port. They should be in the same
1441 * order as in stats_strings above.
1442 */
1443struct queue_port_stats {
1444 u64 tso;
1445 u64 tx_csum;
1446 u64 rx_csum;
1447 u64 vlan_ex;
1448 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001449 u64 gro_pkts;
1450 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001451};
1452
1453static void collect_sge_port_stats(const struct adapter *adap,
1454 const struct port_info *p, struct queue_port_stats *s)
1455{
1456 int i;
1457 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1458 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1459
1460 memset(s, 0, sizeof(*s));
1461 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1462 s->tso += tx->tso;
1463 s->tx_csum += tx->tx_cso;
1464 s->rx_csum += rx->stats.rx_cso;
1465 s->vlan_ex += rx->stats.vlan_ex;
1466 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001467 s->gro_pkts += rx->stats.lro_pkts;
1468 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001469 }
1470}
1471
1472static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1473 u64 *data)
1474{
1475 struct port_info *pi = netdev_priv(dev);
1476 struct adapter *adapter = pi->adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001477 u32 val1, val2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001478
1479 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1480
1481 data += sizeof(struct port_stats) / sizeof(u64);
1482 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001483 data += sizeof(struct queue_port_stats) / sizeof(u64);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301484 if (!is_t4(adapter->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05301485 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
1486 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
1487 val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001488 *data = val1 - val2;
1489 data++;
1490 *data = val2;
1491 data++;
1492 } else {
1493 memset(data, 0, 2 * sizeof(u64));
1494 *data += 2;
1495 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001496}
1497
1498/*
1499 * Return a version number to identify the type of adapter. The scheme is:
1500 * - bits 0..9: chip version
1501 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001502 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001503 */
1504static inline unsigned int mk_adap_vers(const struct adapter *ap)
1505{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301506 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1507 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001508}
1509
1510static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1511 unsigned int end)
1512{
1513 u32 *p = buf + start;
1514
1515 for ( ; start <= end; start += sizeof(u32))
1516 *p++ = t4_read_reg(ap, start);
1517}
1518
1519static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1520 void *buf)
1521{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001522 static const unsigned int t4_reg_ranges[] = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001523 0x1008, 0x1108,
1524 0x1180, 0x11b4,
1525 0x11fc, 0x123c,
1526 0x1300, 0x173c,
1527 0x1800, 0x18fc,
1528 0x3000, 0x30d8,
1529 0x30e0, 0x5924,
1530 0x5960, 0x59d4,
1531 0x5a00, 0x5af8,
1532 0x6000, 0x6098,
1533 0x6100, 0x6150,
1534 0x6200, 0x6208,
1535 0x6240, 0x6248,
1536 0x6280, 0x6338,
1537 0x6370, 0x638c,
1538 0x6400, 0x643c,
1539 0x6500, 0x6524,
1540 0x6a00, 0x6a38,
1541 0x6a60, 0x6a78,
1542 0x6b00, 0x6b84,
1543 0x6bf0, 0x6c84,
1544 0x6cf0, 0x6d84,
1545 0x6df0, 0x6e84,
1546 0x6ef0, 0x6f84,
1547 0x6ff0, 0x7084,
1548 0x70f0, 0x7184,
1549 0x71f0, 0x7284,
1550 0x72f0, 0x7384,
1551 0x73f0, 0x7450,
1552 0x7500, 0x7530,
1553 0x7600, 0x761c,
1554 0x7680, 0x76cc,
1555 0x7700, 0x7798,
1556 0x77c0, 0x77fc,
1557 0x7900, 0x79fc,
1558 0x7b00, 0x7c38,
1559 0x7d00, 0x7efc,
1560 0x8dc0, 0x8e1c,
1561 0x8e30, 0x8e78,
1562 0x8ea0, 0x8f6c,
1563 0x8fc0, 0x9074,
1564 0x90fc, 0x90fc,
1565 0x9400, 0x9458,
1566 0x9600, 0x96bc,
1567 0x9800, 0x9808,
1568 0x9820, 0x983c,
1569 0x9850, 0x9864,
1570 0x9c00, 0x9c6c,
1571 0x9c80, 0x9cec,
1572 0x9d00, 0x9d6c,
1573 0x9d80, 0x9dec,
1574 0x9e00, 0x9e6c,
1575 0x9e80, 0x9eec,
1576 0x9f00, 0x9f6c,
1577 0x9f80, 0x9fec,
1578 0xd004, 0xd03c,
1579 0xdfc0, 0xdfe0,
1580 0xe000, 0xea7c,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301581 0xf000, 0x11110,
1582 0x11118, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001583 0x19040, 0x1906c,
1584 0x19078, 0x19080,
1585 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001586 0x19150, 0x191b0,
1587 0x191d0, 0x191e8,
1588 0x19238, 0x1924c,
1589 0x193f8, 0x19474,
1590 0x19490, 0x194f8,
1591 0x19800, 0x19f30,
1592 0x1a000, 0x1a06c,
1593 0x1a0b0, 0x1a120,
1594 0x1a128, 0x1a138,
1595 0x1a190, 0x1a1c4,
1596 0x1a1fc, 0x1a1fc,
1597 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001598 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001599 0x1e2c0, 0x1e2c0,
1600 0x1e2e0, 0x1e2e0,
1601 0x1e300, 0x1e384,
1602 0x1e3c0, 0x1e3c8,
1603 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001604 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001605 0x1e6c0, 0x1e6c0,
1606 0x1e6e0, 0x1e6e0,
1607 0x1e700, 0x1e784,
1608 0x1e7c0, 0x1e7c8,
1609 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001610 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001611 0x1eac0, 0x1eac0,
1612 0x1eae0, 0x1eae0,
1613 0x1eb00, 0x1eb84,
1614 0x1ebc0, 0x1ebc8,
1615 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001616 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001617 0x1eec0, 0x1eec0,
1618 0x1eee0, 0x1eee0,
1619 0x1ef00, 0x1ef84,
1620 0x1efc0, 0x1efc8,
1621 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001622 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001623 0x1f2c0, 0x1f2c0,
1624 0x1f2e0, 0x1f2e0,
1625 0x1f300, 0x1f384,
1626 0x1f3c0, 0x1f3c8,
1627 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001628 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001629 0x1f6c0, 0x1f6c0,
1630 0x1f6e0, 0x1f6e0,
1631 0x1f700, 0x1f784,
1632 0x1f7c0, 0x1f7c8,
1633 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001634 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001635 0x1fac0, 0x1fac0,
1636 0x1fae0, 0x1fae0,
1637 0x1fb00, 0x1fb84,
1638 0x1fbc0, 0x1fbc8,
1639 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001640 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001641 0x1fec0, 0x1fec0,
1642 0x1fee0, 0x1fee0,
1643 0x1ff00, 0x1ff84,
1644 0x1ffc0, 0x1ffc8,
1645 0x20000, 0x2002c,
1646 0x20100, 0x2013c,
1647 0x20190, 0x201c8,
1648 0x20200, 0x20318,
1649 0x20400, 0x20528,
1650 0x20540, 0x20614,
1651 0x21000, 0x21040,
1652 0x2104c, 0x21060,
1653 0x210c0, 0x210ec,
1654 0x21200, 0x21268,
1655 0x21270, 0x21284,
1656 0x212fc, 0x21388,
1657 0x21400, 0x21404,
1658 0x21500, 0x21518,
1659 0x2152c, 0x2153c,
1660 0x21550, 0x21554,
1661 0x21600, 0x21600,
1662 0x21608, 0x21628,
1663 0x21630, 0x2163c,
1664 0x21700, 0x2171c,
1665 0x21780, 0x2178c,
1666 0x21800, 0x21c38,
1667 0x21c80, 0x21d7c,
1668 0x21e00, 0x21e04,
1669 0x22000, 0x2202c,
1670 0x22100, 0x2213c,
1671 0x22190, 0x221c8,
1672 0x22200, 0x22318,
1673 0x22400, 0x22528,
1674 0x22540, 0x22614,
1675 0x23000, 0x23040,
1676 0x2304c, 0x23060,
1677 0x230c0, 0x230ec,
1678 0x23200, 0x23268,
1679 0x23270, 0x23284,
1680 0x232fc, 0x23388,
1681 0x23400, 0x23404,
1682 0x23500, 0x23518,
1683 0x2352c, 0x2353c,
1684 0x23550, 0x23554,
1685 0x23600, 0x23600,
1686 0x23608, 0x23628,
1687 0x23630, 0x2363c,
1688 0x23700, 0x2371c,
1689 0x23780, 0x2378c,
1690 0x23800, 0x23c38,
1691 0x23c80, 0x23d7c,
1692 0x23e00, 0x23e04,
1693 0x24000, 0x2402c,
1694 0x24100, 0x2413c,
1695 0x24190, 0x241c8,
1696 0x24200, 0x24318,
1697 0x24400, 0x24528,
1698 0x24540, 0x24614,
1699 0x25000, 0x25040,
1700 0x2504c, 0x25060,
1701 0x250c0, 0x250ec,
1702 0x25200, 0x25268,
1703 0x25270, 0x25284,
1704 0x252fc, 0x25388,
1705 0x25400, 0x25404,
1706 0x25500, 0x25518,
1707 0x2552c, 0x2553c,
1708 0x25550, 0x25554,
1709 0x25600, 0x25600,
1710 0x25608, 0x25628,
1711 0x25630, 0x2563c,
1712 0x25700, 0x2571c,
1713 0x25780, 0x2578c,
1714 0x25800, 0x25c38,
1715 0x25c80, 0x25d7c,
1716 0x25e00, 0x25e04,
1717 0x26000, 0x2602c,
1718 0x26100, 0x2613c,
1719 0x26190, 0x261c8,
1720 0x26200, 0x26318,
1721 0x26400, 0x26528,
1722 0x26540, 0x26614,
1723 0x27000, 0x27040,
1724 0x2704c, 0x27060,
1725 0x270c0, 0x270ec,
1726 0x27200, 0x27268,
1727 0x27270, 0x27284,
1728 0x272fc, 0x27388,
1729 0x27400, 0x27404,
1730 0x27500, 0x27518,
1731 0x2752c, 0x2753c,
1732 0x27550, 0x27554,
1733 0x27600, 0x27600,
1734 0x27608, 0x27628,
1735 0x27630, 0x2763c,
1736 0x27700, 0x2771c,
1737 0x27780, 0x2778c,
1738 0x27800, 0x27c38,
1739 0x27c80, 0x27d7c,
1740 0x27e00, 0x27e04
1741 };
1742
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001743 static const unsigned int t5_reg_ranges[] = {
1744 0x1008, 0x1148,
1745 0x1180, 0x11b4,
1746 0x11fc, 0x123c,
1747 0x1280, 0x173c,
1748 0x1800, 0x18fc,
1749 0x3000, 0x3028,
1750 0x3060, 0x30d8,
1751 0x30e0, 0x30fc,
1752 0x3140, 0x357c,
1753 0x35a8, 0x35cc,
1754 0x35ec, 0x35ec,
1755 0x3600, 0x5624,
1756 0x56cc, 0x575c,
1757 0x580c, 0x5814,
1758 0x5890, 0x58bc,
1759 0x5940, 0x59dc,
1760 0x59fc, 0x5a18,
1761 0x5a60, 0x5a9c,
1762 0x5b9c, 0x5bfc,
1763 0x6000, 0x6040,
1764 0x6058, 0x614c,
1765 0x7700, 0x7798,
1766 0x77c0, 0x78fc,
1767 0x7b00, 0x7c54,
1768 0x7d00, 0x7efc,
1769 0x8dc0, 0x8de0,
1770 0x8df8, 0x8e84,
1771 0x8ea0, 0x8f84,
1772 0x8fc0, 0x90f8,
1773 0x9400, 0x9470,
1774 0x9600, 0x96f4,
1775 0x9800, 0x9808,
1776 0x9820, 0x983c,
1777 0x9850, 0x9864,
1778 0x9c00, 0x9c6c,
1779 0x9c80, 0x9cec,
1780 0x9d00, 0x9d6c,
1781 0x9d80, 0x9dec,
1782 0x9e00, 0x9e6c,
1783 0x9e80, 0x9eec,
1784 0x9f00, 0x9f6c,
1785 0x9f80, 0xa020,
1786 0xd004, 0xd03c,
1787 0xdfc0, 0xdfe0,
1788 0xe000, 0x11088,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301789 0x1109c, 0x11110,
1790 0x11118, 0x1117c,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001791 0x11190, 0x11204,
1792 0x19040, 0x1906c,
1793 0x19078, 0x19080,
1794 0x1908c, 0x19124,
1795 0x19150, 0x191b0,
1796 0x191d0, 0x191e8,
1797 0x19238, 0x19290,
1798 0x193f8, 0x19474,
1799 0x19490, 0x194cc,
1800 0x194f0, 0x194f8,
1801 0x19c00, 0x19c60,
1802 0x19c94, 0x19e10,
1803 0x19e50, 0x19f34,
1804 0x19f40, 0x19f50,
1805 0x19f90, 0x19fe4,
1806 0x1a000, 0x1a06c,
1807 0x1a0b0, 0x1a120,
1808 0x1a128, 0x1a138,
1809 0x1a190, 0x1a1c4,
1810 0x1a1fc, 0x1a1fc,
1811 0x1e008, 0x1e00c,
1812 0x1e040, 0x1e04c,
1813 0x1e284, 0x1e290,
1814 0x1e2c0, 0x1e2c0,
1815 0x1e2e0, 0x1e2e0,
1816 0x1e300, 0x1e384,
1817 0x1e3c0, 0x1e3c8,
1818 0x1e408, 0x1e40c,
1819 0x1e440, 0x1e44c,
1820 0x1e684, 0x1e690,
1821 0x1e6c0, 0x1e6c0,
1822 0x1e6e0, 0x1e6e0,
1823 0x1e700, 0x1e784,
1824 0x1e7c0, 0x1e7c8,
1825 0x1e808, 0x1e80c,
1826 0x1e840, 0x1e84c,
1827 0x1ea84, 0x1ea90,
1828 0x1eac0, 0x1eac0,
1829 0x1eae0, 0x1eae0,
1830 0x1eb00, 0x1eb84,
1831 0x1ebc0, 0x1ebc8,
1832 0x1ec08, 0x1ec0c,
1833 0x1ec40, 0x1ec4c,
1834 0x1ee84, 0x1ee90,
1835 0x1eec0, 0x1eec0,
1836 0x1eee0, 0x1eee0,
1837 0x1ef00, 0x1ef84,
1838 0x1efc0, 0x1efc8,
1839 0x1f008, 0x1f00c,
1840 0x1f040, 0x1f04c,
1841 0x1f284, 0x1f290,
1842 0x1f2c0, 0x1f2c0,
1843 0x1f2e0, 0x1f2e0,
1844 0x1f300, 0x1f384,
1845 0x1f3c0, 0x1f3c8,
1846 0x1f408, 0x1f40c,
1847 0x1f440, 0x1f44c,
1848 0x1f684, 0x1f690,
1849 0x1f6c0, 0x1f6c0,
1850 0x1f6e0, 0x1f6e0,
1851 0x1f700, 0x1f784,
1852 0x1f7c0, 0x1f7c8,
1853 0x1f808, 0x1f80c,
1854 0x1f840, 0x1f84c,
1855 0x1fa84, 0x1fa90,
1856 0x1fac0, 0x1fac0,
1857 0x1fae0, 0x1fae0,
1858 0x1fb00, 0x1fb84,
1859 0x1fbc0, 0x1fbc8,
1860 0x1fc08, 0x1fc0c,
1861 0x1fc40, 0x1fc4c,
1862 0x1fe84, 0x1fe90,
1863 0x1fec0, 0x1fec0,
1864 0x1fee0, 0x1fee0,
1865 0x1ff00, 0x1ff84,
1866 0x1ffc0, 0x1ffc8,
1867 0x30000, 0x30030,
1868 0x30100, 0x30144,
1869 0x30190, 0x301d0,
1870 0x30200, 0x30318,
1871 0x30400, 0x3052c,
1872 0x30540, 0x3061c,
1873 0x30800, 0x30834,
1874 0x308c0, 0x30908,
1875 0x30910, 0x309ac,
1876 0x30a00, 0x30a04,
1877 0x30a0c, 0x30a2c,
1878 0x30a44, 0x30a50,
1879 0x30a74, 0x30c24,
1880 0x30d08, 0x30d14,
1881 0x30d1c, 0x30d20,
1882 0x30d3c, 0x30d50,
1883 0x31200, 0x3120c,
1884 0x31220, 0x31220,
1885 0x31240, 0x31240,
1886 0x31600, 0x31600,
1887 0x31608, 0x3160c,
1888 0x31a00, 0x31a1c,
1889 0x31e04, 0x31e20,
1890 0x31e38, 0x31e3c,
1891 0x31e80, 0x31e80,
1892 0x31e88, 0x31ea8,
1893 0x31eb0, 0x31eb4,
1894 0x31ec8, 0x31ed4,
1895 0x31fb8, 0x32004,
1896 0x32208, 0x3223c,
1897 0x32600, 0x32630,
1898 0x32a00, 0x32abc,
1899 0x32b00, 0x32b70,
1900 0x33000, 0x33048,
1901 0x33060, 0x3309c,
1902 0x330f0, 0x33148,
1903 0x33160, 0x3319c,
1904 0x331f0, 0x332e4,
1905 0x332f8, 0x333e4,
1906 0x333f8, 0x33448,
1907 0x33460, 0x3349c,
1908 0x334f0, 0x33548,
1909 0x33560, 0x3359c,
1910 0x335f0, 0x336e4,
1911 0x336f8, 0x337e4,
1912 0x337f8, 0x337fc,
1913 0x33814, 0x33814,
1914 0x3382c, 0x3382c,
1915 0x33880, 0x3388c,
1916 0x338e8, 0x338ec,
1917 0x33900, 0x33948,
1918 0x33960, 0x3399c,
1919 0x339f0, 0x33ae4,
1920 0x33af8, 0x33b10,
1921 0x33b28, 0x33b28,
1922 0x33b3c, 0x33b50,
1923 0x33bf0, 0x33c10,
1924 0x33c28, 0x33c28,
1925 0x33c3c, 0x33c50,
1926 0x33cf0, 0x33cfc,
1927 0x34000, 0x34030,
1928 0x34100, 0x34144,
1929 0x34190, 0x341d0,
1930 0x34200, 0x34318,
1931 0x34400, 0x3452c,
1932 0x34540, 0x3461c,
1933 0x34800, 0x34834,
1934 0x348c0, 0x34908,
1935 0x34910, 0x349ac,
1936 0x34a00, 0x34a04,
1937 0x34a0c, 0x34a2c,
1938 0x34a44, 0x34a50,
1939 0x34a74, 0x34c24,
1940 0x34d08, 0x34d14,
1941 0x34d1c, 0x34d20,
1942 0x34d3c, 0x34d50,
1943 0x35200, 0x3520c,
1944 0x35220, 0x35220,
1945 0x35240, 0x35240,
1946 0x35600, 0x35600,
1947 0x35608, 0x3560c,
1948 0x35a00, 0x35a1c,
1949 0x35e04, 0x35e20,
1950 0x35e38, 0x35e3c,
1951 0x35e80, 0x35e80,
1952 0x35e88, 0x35ea8,
1953 0x35eb0, 0x35eb4,
1954 0x35ec8, 0x35ed4,
1955 0x35fb8, 0x36004,
1956 0x36208, 0x3623c,
1957 0x36600, 0x36630,
1958 0x36a00, 0x36abc,
1959 0x36b00, 0x36b70,
1960 0x37000, 0x37048,
1961 0x37060, 0x3709c,
1962 0x370f0, 0x37148,
1963 0x37160, 0x3719c,
1964 0x371f0, 0x372e4,
1965 0x372f8, 0x373e4,
1966 0x373f8, 0x37448,
1967 0x37460, 0x3749c,
1968 0x374f0, 0x37548,
1969 0x37560, 0x3759c,
1970 0x375f0, 0x376e4,
1971 0x376f8, 0x377e4,
1972 0x377f8, 0x377fc,
1973 0x37814, 0x37814,
1974 0x3782c, 0x3782c,
1975 0x37880, 0x3788c,
1976 0x378e8, 0x378ec,
1977 0x37900, 0x37948,
1978 0x37960, 0x3799c,
1979 0x379f0, 0x37ae4,
1980 0x37af8, 0x37b10,
1981 0x37b28, 0x37b28,
1982 0x37b3c, 0x37b50,
1983 0x37bf0, 0x37c10,
1984 0x37c28, 0x37c28,
1985 0x37c3c, 0x37c50,
1986 0x37cf0, 0x37cfc,
1987 0x38000, 0x38030,
1988 0x38100, 0x38144,
1989 0x38190, 0x381d0,
1990 0x38200, 0x38318,
1991 0x38400, 0x3852c,
1992 0x38540, 0x3861c,
1993 0x38800, 0x38834,
1994 0x388c0, 0x38908,
1995 0x38910, 0x389ac,
1996 0x38a00, 0x38a04,
1997 0x38a0c, 0x38a2c,
1998 0x38a44, 0x38a50,
1999 0x38a74, 0x38c24,
2000 0x38d08, 0x38d14,
2001 0x38d1c, 0x38d20,
2002 0x38d3c, 0x38d50,
2003 0x39200, 0x3920c,
2004 0x39220, 0x39220,
2005 0x39240, 0x39240,
2006 0x39600, 0x39600,
2007 0x39608, 0x3960c,
2008 0x39a00, 0x39a1c,
2009 0x39e04, 0x39e20,
2010 0x39e38, 0x39e3c,
2011 0x39e80, 0x39e80,
2012 0x39e88, 0x39ea8,
2013 0x39eb0, 0x39eb4,
2014 0x39ec8, 0x39ed4,
2015 0x39fb8, 0x3a004,
2016 0x3a208, 0x3a23c,
2017 0x3a600, 0x3a630,
2018 0x3aa00, 0x3aabc,
2019 0x3ab00, 0x3ab70,
2020 0x3b000, 0x3b048,
2021 0x3b060, 0x3b09c,
2022 0x3b0f0, 0x3b148,
2023 0x3b160, 0x3b19c,
2024 0x3b1f0, 0x3b2e4,
2025 0x3b2f8, 0x3b3e4,
2026 0x3b3f8, 0x3b448,
2027 0x3b460, 0x3b49c,
2028 0x3b4f0, 0x3b548,
2029 0x3b560, 0x3b59c,
2030 0x3b5f0, 0x3b6e4,
2031 0x3b6f8, 0x3b7e4,
2032 0x3b7f8, 0x3b7fc,
2033 0x3b814, 0x3b814,
2034 0x3b82c, 0x3b82c,
2035 0x3b880, 0x3b88c,
2036 0x3b8e8, 0x3b8ec,
2037 0x3b900, 0x3b948,
2038 0x3b960, 0x3b99c,
2039 0x3b9f0, 0x3bae4,
2040 0x3baf8, 0x3bb10,
2041 0x3bb28, 0x3bb28,
2042 0x3bb3c, 0x3bb50,
2043 0x3bbf0, 0x3bc10,
2044 0x3bc28, 0x3bc28,
2045 0x3bc3c, 0x3bc50,
2046 0x3bcf0, 0x3bcfc,
2047 0x3c000, 0x3c030,
2048 0x3c100, 0x3c144,
2049 0x3c190, 0x3c1d0,
2050 0x3c200, 0x3c318,
2051 0x3c400, 0x3c52c,
2052 0x3c540, 0x3c61c,
2053 0x3c800, 0x3c834,
2054 0x3c8c0, 0x3c908,
2055 0x3c910, 0x3c9ac,
2056 0x3ca00, 0x3ca04,
2057 0x3ca0c, 0x3ca2c,
2058 0x3ca44, 0x3ca50,
2059 0x3ca74, 0x3cc24,
2060 0x3cd08, 0x3cd14,
2061 0x3cd1c, 0x3cd20,
2062 0x3cd3c, 0x3cd50,
2063 0x3d200, 0x3d20c,
2064 0x3d220, 0x3d220,
2065 0x3d240, 0x3d240,
2066 0x3d600, 0x3d600,
2067 0x3d608, 0x3d60c,
2068 0x3da00, 0x3da1c,
2069 0x3de04, 0x3de20,
2070 0x3de38, 0x3de3c,
2071 0x3de80, 0x3de80,
2072 0x3de88, 0x3dea8,
2073 0x3deb0, 0x3deb4,
2074 0x3dec8, 0x3ded4,
2075 0x3dfb8, 0x3e004,
2076 0x3e208, 0x3e23c,
2077 0x3e600, 0x3e630,
2078 0x3ea00, 0x3eabc,
2079 0x3eb00, 0x3eb70,
2080 0x3f000, 0x3f048,
2081 0x3f060, 0x3f09c,
2082 0x3f0f0, 0x3f148,
2083 0x3f160, 0x3f19c,
2084 0x3f1f0, 0x3f2e4,
2085 0x3f2f8, 0x3f3e4,
2086 0x3f3f8, 0x3f448,
2087 0x3f460, 0x3f49c,
2088 0x3f4f0, 0x3f548,
2089 0x3f560, 0x3f59c,
2090 0x3f5f0, 0x3f6e4,
2091 0x3f6f8, 0x3f7e4,
2092 0x3f7f8, 0x3f7fc,
2093 0x3f814, 0x3f814,
2094 0x3f82c, 0x3f82c,
2095 0x3f880, 0x3f88c,
2096 0x3f8e8, 0x3f8ec,
2097 0x3f900, 0x3f948,
2098 0x3f960, 0x3f99c,
2099 0x3f9f0, 0x3fae4,
2100 0x3faf8, 0x3fb10,
2101 0x3fb28, 0x3fb28,
2102 0x3fb3c, 0x3fb50,
2103 0x3fbf0, 0x3fc10,
2104 0x3fc28, 0x3fc28,
2105 0x3fc3c, 0x3fc50,
2106 0x3fcf0, 0x3fcfc,
2107 0x40000, 0x4000c,
2108 0x40040, 0x40068,
2109 0x40080, 0x40144,
2110 0x40180, 0x4018c,
2111 0x40200, 0x40298,
2112 0x402ac, 0x4033c,
2113 0x403f8, 0x403fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302114 0x41304, 0x413c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002115 0x41400, 0x4141c,
2116 0x41480, 0x414d0,
2117 0x44000, 0x44078,
2118 0x440c0, 0x44278,
2119 0x442c0, 0x44478,
2120 0x444c0, 0x44678,
2121 0x446c0, 0x44878,
2122 0x448c0, 0x449fc,
2123 0x45000, 0x45068,
2124 0x45080, 0x45084,
2125 0x450a0, 0x450b0,
2126 0x45200, 0x45268,
2127 0x45280, 0x45284,
2128 0x452a0, 0x452b0,
2129 0x460c0, 0x460e4,
2130 0x47000, 0x4708c,
2131 0x47200, 0x47250,
2132 0x47400, 0x47420,
2133 0x47600, 0x47618,
2134 0x47800, 0x47814,
2135 0x48000, 0x4800c,
2136 0x48040, 0x48068,
2137 0x48080, 0x48144,
2138 0x48180, 0x4818c,
2139 0x48200, 0x48298,
2140 0x482ac, 0x4833c,
2141 0x483f8, 0x483fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302142 0x49304, 0x493c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002143 0x49400, 0x4941c,
2144 0x49480, 0x494d0,
2145 0x4c000, 0x4c078,
2146 0x4c0c0, 0x4c278,
2147 0x4c2c0, 0x4c478,
2148 0x4c4c0, 0x4c678,
2149 0x4c6c0, 0x4c878,
2150 0x4c8c0, 0x4c9fc,
2151 0x4d000, 0x4d068,
2152 0x4d080, 0x4d084,
2153 0x4d0a0, 0x4d0b0,
2154 0x4d200, 0x4d268,
2155 0x4d280, 0x4d284,
2156 0x4d2a0, 0x4d2b0,
2157 0x4e0c0, 0x4e0e4,
2158 0x4f000, 0x4f08c,
2159 0x4f200, 0x4f250,
2160 0x4f400, 0x4f420,
2161 0x4f600, 0x4f618,
2162 0x4f800, 0x4f814,
2163 0x50000, 0x500cc,
2164 0x50400, 0x50400,
2165 0x50800, 0x508cc,
2166 0x50c00, 0x50c00,
2167 0x51000, 0x5101c,
2168 0x51300, 0x51308,
2169 };
2170
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002171 int i;
2172 struct adapter *ap = netdev2adap(dev);
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002173 static const unsigned int *reg_ranges;
2174 int arr_size = 0, buf_size = 0;
2175
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302176 if (is_t4(ap->params.chip)) {
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002177 reg_ranges = &t4_reg_ranges[0];
2178 arr_size = ARRAY_SIZE(t4_reg_ranges);
2179 buf_size = T4_REGMAP_SIZE;
2180 } else {
2181 reg_ranges = &t5_reg_ranges[0];
2182 arr_size = ARRAY_SIZE(t5_reg_ranges);
2183 buf_size = T5_REGMAP_SIZE;
2184 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002185
2186 regs->version = mk_adap_vers(ap);
2187
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002188 memset(buf, 0, buf_size);
2189 for (i = 0; i < arr_size; i += 2)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002190 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2191}
2192
2193static int restart_autoneg(struct net_device *dev)
2194{
2195 struct port_info *p = netdev_priv(dev);
2196
2197 if (!netif_running(dev))
2198 return -EAGAIN;
2199 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2200 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002201 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002202 return 0;
2203}
2204
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002205static int identify_port(struct net_device *dev,
2206 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002207{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002208 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002209 struct adapter *adap = netdev2adap(dev);
2210
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002211 if (state == ETHTOOL_ID_ACTIVE)
2212 val = 0xffff;
2213 else if (state == ETHTOOL_ID_INACTIVE)
2214 val = 0;
2215 else
2216 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002217
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002218 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002219}
2220
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302221static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002222{
2223 unsigned int v = 0;
2224
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002225 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2226 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002227 v |= SUPPORTED_TP;
2228 if (caps & FW_PORT_CAP_SPEED_100M)
2229 v |= SUPPORTED_100baseT_Full;
2230 if (caps & FW_PORT_CAP_SPEED_1G)
2231 v |= SUPPORTED_1000baseT_Full;
2232 if (caps & FW_PORT_CAP_SPEED_10G)
2233 v |= SUPPORTED_10000baseT_Full;
2234 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2235 v |= SUPPORTED_Backplane;
2236 if (caps & FW_PORT_CAP_SPEED_1G)
2237 v |= SUPPORTED_1000baseKX_Full;
2238 if (caps & FW_PORT_CAP_SPEED_10G)
2239 v |= SUPPORTED_10000baseKX4_Full;
2240 } else if (type == FW_PORT_TYPE_KR)
2241 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002242 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00002243 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2244 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2245 else if (type == FW_PORT_TYPE_BP4_AP)
2246 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2247 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2248 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002249 else if (type == FW_PORT_TYPE_FIBER_XFI ||
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302250 type == FW_PORT_TYPE_FIBER_XAUI ||
2251 type == FW_PORT_TYPE_SFP ||
2252 type == FW_PORT_TYPE_QSFP_10G ||
2253 type == FW_PORT_TYPE_QSA) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002254 v |= SUPPORTED_FIBRE;
Hariprasad Shenai4c2d5182014-11-28 18:35:14 +05302255 if (caps & FW_PORT_CAP_SPEED_1G)
2256 v |= SUPPORTED_1000baseT_Full;
2257 if (caps & FW_PORT_CAP_SPEED_10G)
2258 v |= SUPPORTED_10000baseT_Full;
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302259 } else if (type == FW_PORT_TYPE_BP40_BA ||
2260 type == FW_PORT_TYPE_QSFP) {
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302261 v |= SUPPORTED_40000baseSR4_Full;
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302262 v |= SUPPORTED_FIBRE;
2263 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002264
2265 if (caps & FW_PORT_CAP_ANEG)
2266 v |= SUPPORTED_Autoneg;
2267 return v;
2268}
2269
2270static unsigned int to_fw_linkcaps(unsigned int caps)
2271{
2272 unsigned int v = 0;
2273
2274 if (caps & ADVERTISED_100baseT_Full)
2275 v |= FW_PORT_CAP_SPEED_100M;
2276 if (caps & ADVERTISED_1000baseT_Full)
2277 v |= FW_PORT_CAP_SPEED_1G;
2278 if (caps & ADVERTISED_10000baseT_Full)
2279 v |= FW_PORT_CAP_SPEED_10G;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302280 if (caps & ADVERTISED_40000baseSR4_Full)
2281 v |= FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002282 return v;
2283}
2284
2285static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2286{
2287 const struct port_info *p = netdev_priv(dev);
2288
2289 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002290 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002291 p->port_type == FW_PORT_TYPE_BT_XAUI)
2292 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002293 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2294 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002295 cmd->port = PORT_FIBRE;
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302296 else if (p->port_type == FW_PORT_TYPE_SFP ||
2297 p->port_type == FW_PORT_TYPE_QSFP_10G ||
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302298 p->port_type == FW_PORT_TYPE_QSA ||
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302299 p->port_type == FW_PORT_TYPE_QSFP) {
2300 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2301 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2302 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2303 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2304 cmd->port = PORT_FIBRE;
2305 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2306 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002307 cmd->port = PORT_DA;
2308 else
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302309 cmd->port = PORT_OTHER;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002310 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002311 cmd->port = PORT_OTHER;
2312
2313 if (p->mdio_addr >= 0) {
2314 cmd->phy_address = p->mdio_addr;
2315 cmd->transceiver = XCVR_EXTERNAL;
2316 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2317 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2318 } else {
2319 cmd->phy_address = 0; /* not really, but no better option */
2320 cmd->transceiver = XCVR_INTERNAL;
2321 cmd->mdio_support = 0;
2322 }
2323
2324 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2325 cmd->advertising = from_fw_linkcaps(p->port_type,
2326 p->link_cfg.advertising);
David Decotigny70739492011-04-27 18:32:40 +00002327 ethtool_cmd_speed_set(cmd,
2328 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002329 cmd->duplex = DUPLEX_FULL;
2330 cmd->autoneg = p->link_cfg.autoneg;
2331 cmd->maxtxpkt = 0;
2332 cmd->maxrxpkt = 0;
2333 return 0;
2334}
2335
2336static unsigned int speed_to_caps(int speed)
2337{
Ben Hutchingse8b39012014-02-23 00:03:24 +00002338 if (speed == 100)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002339 return FW_PORT_CAP_SPEED_100M;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002340 if (speed == 1000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002341 return FW_PORT_CAP_SPEED_1G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002342 if (speed == 10000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002343 return FW_PORT_CAP_SPEED_10G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002344 if (speed == 40000)
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302345 return FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002346 return 0;
2347}
2348
2349static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2350{
2351 unsigned int cap;
2352 struct port_info *p = netdev_priv(dev);
2353 struct link_config *lc = &p->link_cfg;
David Decotigny25db0332011-04-27 18:32:39 +00002354 u32 speed = ethtool_cmd_speed(cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002355
2356 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2357 return -EINVAL;
2358
2359 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2360 /*
2361 * PHY offers a single speed. See if that's what's
2362 * being requested.
2363 */
2364 if (cmd->autoneg == AUTONEG_DISABLE &&
David Decotigny25db0332011-04-27 18:32:39 +00002365 (lc->supported & speed_to_caps(speed)))
2366 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002367 return -EINVAL;
2368 }
2369
2370 if (cmd->autoneg == AUTONEG_DISABLE) {
David Decotigny25db0332011-04-27 18:32:39 +00002371 cap = speed_to_caps(speed);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002372
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302373 if (!(lc->supported & cap) ||
Ben Hutchingse8b39012014-02-23 00:03:24 +00002374 (speed == 1000) ||
2375 (speed == 10000) ||
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302376 (speed == 40000))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002377 return -EINVAL;
2378 lc->requested_speed = cap;
2379 lc->advertising = 0;
2380 } else {
2381 cap = to_fw_linkcaps(cmd->advertising);
2382 if (!(lc->supported & cap))
2383 return -EINVAL;
2384 lc->requested_speed = 0;
2385 lc->advertising = cap | FW_PORT_CAP_ANEG;
2386 }
2387 lc->autoneg = cmd->autoneg;
2388
2389 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002390 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2391 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002392 return 0;
2393}
2394
2395static void get_pauseparam(struct net_device *dev,
2396 struct ethtool_pauseparam *epause)
2397{
2398 struct port_info *p = netdev_priv(dev);
2399
2400 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2401 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2402 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2403}
2404
2405static int set_pauseparam(struct net_device *dev,
2406 struct ethtool_pauseparam *epause)
2407{
2408 struct port_info *p = netdev_priv(dev);
2409 struct link_config *lc = &p->link_cfg;
2410
2411 if (epause->autoneg == AUTONEG_DISABLE)
2412 lc->requested_fc = 0;
2413 else if (lc->supported & FW_PORT_CAP_ANEG)
2414 lc->requested_fc = PAUSE_AUTONEG;
2415 else
2416 return -EINVAL;
2417
2418 if (epause->rx_pause)
2419 lc->requested_fc |= PAUSE_RX;
2420 if (epause->tx_pause)
2421 lc->requested_fc |= PAUSE_TX;
2422 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002423 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2424 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002425 return 0;
2426}
2427
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002428static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2429{
2430 const struct port_info *pi = netdev_priv(dev);
2431 const struct sge *s = &pi->adapter->sge;
2432
2433 e->rx_max_pending = MAX_RX_BUFFERS;
2434 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2435 e->rx_jumbo_max_pending = 0;
2436 e->tx_max_pending = MAX_TXQ_ENTRIES;
2437
2438 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2439 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2440 e->rx_jumbo_pending = 0;
2441 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2442}
2443
2444static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2445{
2446 int i;
2447 const struct port_info *pi = netdev_priv(dev);
2448 struct adapter *adapter = pi->adapter;
2449 struct sge *s = &adapter->sge;
2450
2451 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2452 e->tx_pending > MAX_TXQ_ENTRIES ||
2453 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2454 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2455 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2456 return -EINVAL;
2457
2458 if (adapter->flags & FULL_INIT_DONE)
2459 return -EBUSY;
2460
2461 for (i = 0; i < pi->nqsets; ++i) {
2462 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2463 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2464 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2465 }
2466 return 0;
2467}
2468
2469static int closest_timer(const struct sge *s, int time)
2470{
2471 int i, delta, match = 0, min_delta = INT_MAX;
2472
2473 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2474 delta = time - s->timer_val[i];
2475 if (delta < 0)
2476 delta = -delta;
2477 if (delta < min_delta) {
2478 min_delta = delta;
2479 match = i;
2480 }
2481 }
2482 return match;
2483}
2484
2485static int closest_thres(const struct sge *s, int thres)
2486{
2487 int i, delta, match = 0, min_delta = INT_MAX;
2488
2489 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2490 delta = thres - s->counter_val[i];
2491 if (delta < 0)
2492 delta = -delta;
2493 if (delta < min_delta) {
2494 min_delta = delta;
2495 match = i;
2496 }
2497 }
2498 return match;
2499}
2500
2501/*
2502 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2503 */
Hariprasad Shenaidc9daab2015-01-27 13:47:45 +05302504unsigned int qtimer_val(const struct adapter *adap,
2505 const struct sge_rspq *q)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002506{
2507 unsigned int idx = q->intr_params >> 1;
2508
2509 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2510}
2511
2512/**
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302513 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002514 * @q: the Rx queue
2515 * @us: the hold-off time in us, or 0 to disable timer
2516 * @cnt: the hold-off packet count, or 0 to disable counter
2517 *
2518 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2519 * one of the two needs to be enabled for the queue to generate interrupts.
2520 */
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302521static int set_rspq_intr_params(struct sge_rspq *q,
2522 unsigned int us, unsigned int cnt)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002523{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302524 struct adapter *adap = q->adap;
2525
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002526 if ((us | cnt) == 0)
2527 cnt = 1;
2528
2529 if (cnt) {
2530 int err;
2531 u32 v, new_idx;
2532
2533 new_idx = closest_thres(&adap->sge, cnt);
2534 if (q->desc && q->pktcnt_idx != new_idx) {
2535 /* the queue has already been created, update it */
Hariprasad Shenai51678652014-11-21 12:52:02 +05302536 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2537 FW_PARAMS_PARAM_X_V(
2538 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2539 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002540 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2541 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002542 if (err)
2543 return err;
2544 }
2545 q->pktcnt_idx = new_idx;
2546 }
2547
2548 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2549 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2550 return 0;
2551}
2552
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302553/**
2554 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2555 * @dev: the network device
2556 * @us: the hold-off time in us, or 0 to disable timer
2557 * @cnt: the hold-off packet count, or 0 to disable counter
2558 *
2559 * Set the RX interrupt hold-off parameters for a network device.
2560 */
2561static int set_rx_intr_params(struct net_device *dev,
2562 unsigned int us, unsigned int cnt)
2563{
2564 int i, err;
2565 struct port_info *pi = netdev_priv(dev);
2566 struct adapter *adap = pi->adapter;
2567 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2568
2569 for (i = 0; i < pi->nqsets; i++, q++) {
2570 err = set_rspq_intr_params(&q->rspq, us, cnt);
2571 if (err)
2572 return err;
2573 }
2574 return 0;
2575}
2576
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302577static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2578{
2579 int i;
2580 struct port_info *pi = netdev_priv(dev);
2581 struct adapter *adap = pi->adapter;
2582 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2583
2584 for (i = 0; i < pi->nqsets; i++, q++)
2585 q->rspq.adaptive_rx = adaptive_rx;
2586
2587 return 0;
2588}
2589
2590static int get_adaptive_rx_setting(struct net_device *dev)
2591{
2592 struct port_info *pi = netdev_priv(dev);
2593 struct adapter *adap = pi->adapter;
2594 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2595
2596 return q->rspq.adaptive_rx;
2597}
2598
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002599static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2600{
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302601 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302602 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2603 c->rx_max_coalesced_frames);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002604}
2605
2606static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2607{
2608 const struct port_info *pi = netdev_priv(dev);
2609 const struct adapter *adap = pi->adapter;
2610 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2611
2612 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2613 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2614 adap->sge.counter_val[rq->pktcnt_idx] : 0;
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302615 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002616 return 0;
2617}
2618
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002619/**
2620 * eeprom_ptov - translate a physical EEPROM address to virtual
2621 * @phys_addr: the physical EEPROM address
2622 * @fn: the PCI function number
2623 * @sz: size of function-specific area
2624 *
2625 * Translate a physical EEPROM address to virtual. The first 1K is
2626 * accessed through virtual addresses starting at 31K, the rest is
2627 * accessed through virtual addresses starting at 0.
2628 *
2629 * The mapping is as follows:
2630 * [0..1K) -> [31K..32K)
2631 * [1K..1K+A) -> [31K-A..31K)
2632 * [1K+A..ES) -> [0..ES-A-1K)
2633 *
2634 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002635 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002636static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002637{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002638 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002639 if (phys_addr < 1024)
2640 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002641 if (phys_addr < 1024 + fn)
2642 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002643 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002644 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002645 return -EINVAL;
2646}
2647
2648/*
2649 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002650 */
2651static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2652{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002653 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002654
2655 if (vaddr >= 0)
2656 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2657 return vaddr < 0 ? vaddr : 0;
2658}
2659
2660static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2661{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002662 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002663
2664 if (vaddr >= 0)
2665 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2666 return vaddr < 0 ? vaddr : 0;
2667}
2668
2669#define EEPROM_MAGIC 0x38E2F10C
2670
2671static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2672 u8 *data)
2673{
2674 int i, err = 0;
2675 struct adapter *adapter = netdev2adap(dev);
2676
2677 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2678 if (!buf)
2679 return -ENOMEM;
2680
2681 e->magic = EEPROM_MAGIC;
2682 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2683 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2684
2685 if (!err)
2686 memcpy(data, buf + e->offset, e->len);
2687 kfree(buf);
2688 return err;
2689}
2690
2691static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2692 u8 *data)
2693{
2694 u8 *buf;
2695 int err = 0;
2696 u32 aligned_offset, aligned_len, *p;
2697 struct adapter *adapter = netdev2adap(dev);
2698
2699 if (eeprom->magic != EEPROM_MAGIC)
2700 return -EINVAL;
2701
2702 aligned_offset = eeprom->offset & ~3;
2703 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2704
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002705 if (adapter->fn > 0) {
2706 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2707
2708 if (aligned_offset < start ||
2709 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2710 return -EPERM;
2711 }
2712
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002713 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2714 /*
2715 * RMW possibly needed for first or last words.
2716 */
2717 buf = kmalloc(aligned_len, GFP_KERNEL);
2718 if (!buf)
2719 return -ENOMEM;
2720 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2721 if (!err && aligned_len > 4)
2722 err = eeprom_rd_phys(adapter,
2723 aligned_offset + aligned_len - 4,
2724 (u32 *)&buf[aligned_len - 4]);
2725 if (err)
2726 goto out;
2727 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2728 } else
2729 buf = data;
2730
2731 err = t4_seeprom_wp(adapter, false);
2732 if (err)
2733 goto out;
2734
2735 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2736 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2737 aligned_offset += 4;
2738 }
2739
2740 if (!err)
2741 err = t4_seeprom_wp(adapter, true);
2742out:
2743 if (buf != data)
2744 kfree(buf);
2745 return err;
2746}
2747
2748static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2749{
2750 int ret;
2751 const struct firmware *fw;
2752 struct adapter *adap = netdev2adap(netdev);
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302753 unsigned int mbox = PCIE_FW_MASTER_M + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002754
2755 ef->data[sizeof(ef->data) - 1] = '\0';
2756 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2757 if (ret < 0)
2758 return ret;
2759
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302760 /* If the adapter has been fully initialized then we'll go ahead and
2761 * try to get the firmware's cooperation in upgrading to the new
2762 * firmware image otherwise we'll try to do the entire job from the
2763 * host ... and we always "force" the operation in this path.
2764 */
2765 if (adap->flags & FULL_INIT_DONE)
2766 mbox = adap->mbox;
2767
2768 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002769 release_firmware(fw);
2770 if (!ret)
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302771 dev_info(adap->pdev_dev, "loaded firmware %s,"
2772 " reload cxgb4 driver\n", ef->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002773 return ret;
2774}
2775
2776#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2777#define BCAST_CRC 0xa0ccc1a6
2778
2779static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2780{
2781 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2782 wol->wolopts = netdev2adap(dev)->wol;
2783 memset(&wol->sopass, 0, sizeof(wol->sopass));
2784}
2785
2786static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2787{
2788 int err = 0;
2789 struct port_info *pi = netdev_priv(dev);
2790
2791 if (wol->wolopts & ~WOL_SUPPORTED)
2792 return -EINVAL;
2793 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2794 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2795 if (wol->wolopts & WAKE_BCAST) {
2796 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2797 ~0ULL, 0, false);
2798 if (!err)
2799 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2800 ~6ULL, ~0ULL, BCAST_CRC, true);
2801 } else
2802 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2803 return err;
2804}
2805
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002806static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002807{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002808 const struct port_info *pi = netdev_priv(dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002809 netdev_features_t changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002810 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002811
Patrick McHardyf6469682013-04-19 02:04:27 +00002812 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002813 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002814
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002815 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2816 -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +00002817 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002818 if (unlikely(err))
Patrick McHardyf6469682013-04-19 02:04:27 +00002819 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002820 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07002821}
2822
Ben Hutchings7850f632011-12-15 13:55:01 +00002823static u32 get_rss_table_size(struct net_device *dev)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002824{
2825 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002826
Ben Hutchings7850f632011-12-15 13:55:01 +00002827 return pi->rss_size;
2828}
2829
Eyal Perry892311f2014-12-02 18:12:10 +02002830static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
Ben Hutchings7850f632011-12-15 13:55:01 +00002831{
2832 const struct port_info *pi = netdev_priv(dev);
2833 unsigned int n = pi->rss_size;
2834
Eyal Perry892311f2014-12-02 18:12:10 +02002835 if (hfunc)
2836 *hfunc = ETH_RSS_HASH_TOP;
2837 if (!p)
2838 return 0;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002839 while (n--)
Ben Hutchings7850f632011-12-15 13:55:01 +00002840 p[n] = pi->rss[n];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002841 return 0;
2842}
2843
Eyal Perry892311f2014-12-02 18:12:10 +02002844static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
2845 const u8 hfunc)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002846{
2847 unsigned int i;
2848 struct port_info *pi = netdev_priv(dev);
2849
Eyal Perry892311f2014-12-02 18:12:10 +02002850 /* We require at least one supported parameter to be changed and no
2851 * change in any of the unsupported parameters
2852 */
2853 if (key ||
2854 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
2855 return -EOPNOTSUPP;
2856 if (!p)
2857 return 0;
2858
Ben Hutchings7850f632011-12-15 13:55:01 +00002859 for (i = 0; i < pi->rss_size; i++)
2860 pi->rss[i] = p[i];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002861 if (pi->adapter->flags & FULL_INIT_DONE)
2862 return write_rss(pi, pi->rss);
2863 return 0;
2864}
2865
2866static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
Ben Hutchings815c7db2011-09-06 13:49:12 +00002867 u32 *rules)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002868{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002869 const struct port_info *pi = netdev_priv(dev);
2870
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002871 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002872 case ETHTOOL_GRXFH: {
2873 unsigned int v = pi->rss_mode;
2874
2875 info->data = 0;
2876 switch (info->flow_type) {
2877 case TCP_V4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302878 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002879 info->data = RXH_IP_SRC | RXH_IP_DST |
2880 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302881 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002882 info->data = RXH_IP_SRC | RXH_IP_DST;
2883 break;
2884 case UDP_V4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302885 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
2886 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002887 info->data = RXH_IP_SRC | RXH_IP_DST |
2888 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302889 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002890 info->data = RXH_IP_SRC | RXH_IP_DST;
2891 break;
2892 case SCTP_V4_FLOW:
2893 case AH_ESP_V4_FLOW:
2894 case IPV4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302895 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002896 info->data = RXH_IP_SRC | RXH_IP_DST;
2897 break;
2898 case TCP_V6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302899 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002900 info->data = RXH_IP_SRC | RXH_IP_DST |
2901 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302902 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002903 info->data = RXH_IP_SRC | RXH_IP_DST;
2904 break;
2905 case UDP_V6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302906 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
2907 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002908 info->data = RXH_IP_SRC | RXH_IP_DST |
2909 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302910 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002911 info->data = RXH_IP_SRC | RXH_IP_DST;
2912 break;
2913 case SCTP_V6_FLOW:
2914 case AH_ESP_V6_FLOW:
2915 case IPV6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302916 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002917 info->data = RXH_IP_SRC | RXH_IP_DST;
2918 break;
2919 }
2920 return 0;
2921 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002922 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002923 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002924 return 0;
2925 }
2926 return -EOPNOTSUPP;
2927}
2928
stephen hemminger9b07be42012-01-04 12:59:49 +00002929static const struct ethtool_ops cxgb_ethtool_ops = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002930 .get_settings = get_settings,
2931 .set_settings = set_settings,
2932 .get_drvinfo = get_drvinfo,
2933 .get_msglevel = get_msglevel,
2934 .set_msglevel = set_msglevel,
2935 .get_ringparam = get_sge_param,
2936 .set_ringparam = set_sge_param,
2937 .get_coalesce = get_coalesce,
2938 .set_coalesce = set_coalesce,
2939 .get_eeprom_len = get_eeprom_len,
2940 .get_eeprom = get_eeprom,
2941 .set_eeprom = set_eeprom,
2942 .get_pauseparam = get_pauseparam,
2943 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002944 .get_link = ethtool_op_get_link,
2945 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002946 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002947 .nway_reset = restart_autoneg,
2948 .get_sset_count = get_sset_count,
2949 .get_ethtool_stats = get_stats,
2950 .get_regs_len = get_regs_len,
2951 .get_regs = get_regs,
2952 .get_wol = get_wol,
2953 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002954 .get_rxnfc = get_rxnfc,
Ben Hutchings7850f632011-12-15 13:55:01 +00002955 .get_rxfh_indir_size = get_rss_table_size,
Ben Hutchingsfe62d002014-05-15 01:25:27 +01002956 .get_rxfh = get_rss_table,
2957 .set_rxfh = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002958 .flash_device = set_flash,
2959};
2960
Bill Pemberton91744942012-12-03 09:23:02 -05002961static int setup_debugfs(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002962{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002963 if (IS_ERR_OR_NULL(adap->debugfs_root))
2964 return -1;
2965
Hariprasad Shenaifd88b312014-11-07 09:35:23 +05302966#ifdef CONFIG_DEBUG_FS
2967 t4_setup_debugfs(adap);
2968#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002969 return 0;
2970}
2971
2972/*
2973 * upper-layer driver support
2974 */
2975
2976/*
2977 * Allocate an active-open TID and set it to the supplied value.
2978 */
2979int cxgb4_alloc_atid(struct tid_info *t, void *data)
2980{
2981 int atid = -1;
2982
2983 spin_lock_bh(&t->atid_lock);
2984 if (t->afree) {
2985 union aopen_entry *p = t->afree;
2986
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002987 atid = (p - t->atid_tab) + t->atid_base;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002988 t->afree = p->next;
2989 p->data = data;
2990 t->atids_in_use++;
2991 }
2992 spin_unlock_bh(&t->atid_lock);
2993 return atid;
2994}
2995EXPORT_SYMBOL(cxgb4_alloc_atid);
2996
2997/*
2998 * Release an active-open TID.
2999 */
3000void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3001{
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003002 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003003
3004 spin_lock_bh(&t->atid_lock);
3005 p->next = t->afree;
3006 t->afree = p;
3007 t->atids_in_use--;
3008 spin_unlock_bh(&t->atid_lock);
3009}
3010EXPORT_SYMBOL(cxgb4_free_atid);
3011
3012/*
3013 * Allocate a server TID and set it to the supplied value.
3014 */
3015int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3016{
3017 int stid;
3018
3019 spin_lock_bh(&t->stid_lock);
3020 if (family == PF_INET) {
3021 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3022 if (stid < t->nstids)
3023 __set_bit(stid, t->stid_bmap);
3024 else
3025 stid = -1;
3026 } else {
3027 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3028 if (stid < 0)
3029 stid = -1;
3030 }
3031 if (stid >= 0) {
3032 t->stid_tab[stid].data = data;
3033 stid += t->stid_base;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303034 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3035 * This is equivalent to 4 TIDs. With CLIP enabled it
3036 * needs 2 TIDs.
3037 */
3038 if (family == PF_INET)
3039 t->stids_in_use++;
3040 else
3041 t->stids_in_use += 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003042 }
3043 spin_unlock_bh(&t->stid_lock);
3044 return stid;
3045}
3046EXPORT_SYMBOL(cxgb4_alloc_stid);
3047
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003048/* Allocate a server filter TID and set it to the supplied value.
3049 */
3050int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3051{
3052 int stid;
3053
3054 spin_lock_bh(&t->stid_lock);
3055 if (family == PF_INET) {
3056 stid = find_next_zero_bit(t->stid_bmap,
3057 t->nstids + t->nsftids, t->nstids);
3058 if (stid < (t->nstids + t->nsftids))
3059 __set_bit(stid, t->stid_bmap);
3060 else
3061 stid = -1;
3062 } else {
3063 stid = -1;
3064 }
3065 if (stid >= 0) {
3066 t->stid_tab[stid].data = data;
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303067 stid -= t->nstids;
3068 stid += t->sftid_base;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003069 t->stids_in_use++;
3070 }
3071 spin_unlock_bh(&t->stid_lock);
3072 return stid;
3073}
3074EXPORT_SYMBOL(cxgb4_alloc_sftid);
3075
3076/* Release a server TID.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003077 */
3078void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3079{
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303080 /* Is it a server filter TID? */
3081 if (t->nsftids && (stid >= t->sftid_base)) {
3082 stid -= t->sftid_base;
3083 stid += t->nstids;
3084 } else {
3085 stid -= t->stid_base;
3086 }
3087
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003088 spin_lock_bh(&t->stid_lock);
3089 if (family == PF_INET)
3090 __clear_bit(stid, t->stid_bmap);
3091 else
3092 bitmap_release_region(t->stid_bmap, stid, 2);
3093 t->stid_tab[stid].data = NULL;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303094 if (family == PF_INET)
3095 t->stids_in_use--;
3096 else
3097 t->stids_in_use -= 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003098 spin_unlock_bh(&t->stid_lock);
3099}
3100EXPORT_SYMBOL(cxgb4_free_stid);
3101
3102/*
3103 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3104 */
3105static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3106 unsigned int tid)
3107{
3108 struct cpl_tid_release *req;
3109
3110 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3111 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3112 INIT_TP_WR(req, tid);
3113 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3114}
3115
3116/*
3117 * Queue a TID release request and if necessary schedule a work queue to
3118 * process it.
3119 */
stephen hemminger31b9c192010-10-18 05:39:18 +00003120static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3121 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003122{
3123 void **p = &t->tid_tab[tid];
3124 struct adapter *adap = container_of(t, struct adapter, tids);
3125
3126 spin_lock_bh(&adap->tid_release_lock);
3127 *p = adap->tid_release_head;
3128 /* Low 2 bits encode the Tx channel number */
3129 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3130 if (!adap->tid_release_task_busy) {
3131 adap->tid_release_task_busy = true;
Anish Bhatt29aaee62014-08-20 13:44:06 -07003132 queue_work(adap->workq, &adap->tid_release_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003133 }
3134 spin_unlock_bh(&adap->tid_release_lock);
3135}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003136
3137/*
3138 * Process the list of pending TID release requests.
3139 */
3140static void process_tid_release_list(struct work_struct *work)
3141{
3142 struct sk_buff *skb;
3143 struct adapter *adap;
3144
3145 adap = container_of(work, struct adapter, tid_release_task);
3146
3147 spin_lock_bh(&adap->tid_release_lock);
3148 while (adap->tid_release_head) {
3149 void **p = adap->tid_release_head;
3150 unsigned int chan = (uintptr_t)p & 3;
3151 p = (void *)p - chan;
3152
3153 adap->tid_release_head = *p;
3154 *p = NULL;
3155 spin_unlock_bh(&adap->tid_release_lock);
3156
3157 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3158 GFP_KERNEL)))
3159 schedule_timeout_uninterruptible(1);
3160
3161 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3162 t4_ofld_send(adap, skb);
3163 spin_lock_bh(&adap->tid_release_lock);
3164 }
3165 adap->tid_release_task_busy = false;
3166 spin_unlock_bh(&adap->tid_release_lock);
3167}
3168
3169/*
3170 * Release a TID and inform HW. If we are unable to allocate the release
3171 * message we defer to a work queue.
3172 */
3173void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3174{
3175 void *old;
3176 struct sk_buff *skb;
3177 struct adapter *adap = container_of(t, struct adapter, tids);
3178
3179 old = t->tid_tab[tid];
3180 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3181 if (likely(skb)) {
3182 t->tid_tab[tid] = NULL;
3183 mk_tid_release(skb, chan, tid);
3184 t4_ofld_send(adap, skb);
3185 } else
3186 cxgb4_queue_tid_release(t, chan, tid);
3187 if (old)
3188 atomic_dec(&t->tids_in_use);
3189}
3190EXPORT_SYMBOL(cxgb4_remove_tid);
3191
3192/*
3193 * Allocate and initialize the TID tables. Returns 0 on success.
3194 */
3195static int tid_init(struct tid_info *t)
3196{
3197 size_t size;
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003198 unsigned int stid_bmap_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003199 unsigned int natids = t->natids;
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303200 struct adapter *adap = container_of(t, struct adapter, tids);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003201
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003202 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003203 size = t->ntids * sizeof(*t->tid_tab) +
3204 natids * sizeof(*t->atid_tab) +
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003205 t->nstids * sizeof(*t->stid_tab) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003206 t->nsftids * sizeof(*t->stid_tab) +
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003207 stid_bmap_size * sizeof(long) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003208 t->nftids * sizeof(*t->ftid_tab) +
3209 t->nsftids * sizeof(*t->ftid_tab);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003210
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003211 t->tid_tab = t4_alloc_mem(size);
3212 if (!t->tid_tab)
3213 return -ENOMEM;
3214
3215 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3216 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003217 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003218 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003219 spin_lock_init(&t->stid_lock);
3220 spin_lock_init(&t->atid_lock);
3221
3222 t->stids_in_use = 0;
3223 t->afree = NULL;
3224 t->atids_in_use = 0;
3225 atomic_set(&t->tids_in_use, 0);
3226
3227 /* Setup the free list for atid_tab and clear the stid bitmap. */
3228 if (natids) {
3229 while (--natids)
3230 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3231 t->afree = t->atid_tab;
3232 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003233 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303234 /* Reserve stid 0 for T4/T5 adapters */
3235 if (!t->stid_base &&
3236 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3237 __set_bit(0, t->stid_bmap);
3238
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003239 return 0;
3240}
3241
3242/**
3243 * cxgb4_create_server - create an IP server
3244 * @dev: the device
3245 * @stid: the server TID
3246 * @sip: local IP address to bind server to
3247 * @sport: the server's TCP port
3248 * @queue: queue to direct messages from this server to
3249 *
3250 * Create an IP server for the given port and address.
3251 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3252 */
3253int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00003254 __be32 sip, __be16 sport, __be16 vlan,
3255 unsigned int queue)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003256{
3257 unsigned int chan;
3258 struct sk_buff *skb;
3259 struct adapter *adap;
3260 struct cpl_pass_open_req *req;
Vipul Pandya80f40c12013-07-04 16:10:45 +05303261 int ret;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003262
3263 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3264 if (!skb)
3265 return -ENOMEM;
3266
3267 adap = netdev2adap(dev);
3268 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3269 INIT_TP_WR(req, 0);
3270 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3271 req->local_port = sport;
3272 req->peer_port = htons(0);
3273 req->local_ip = sip;
3274 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003275 chan = rxq_to_chan(&adap->sge, queue);
Anish Bhattd7990b02014-11-12 17:15:57 -08003276 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003277 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3278 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303279 ret = t4_mgmt_tx(adap, skb);
3280 return net_xmit_eval(ret);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003281}
3282EXPORT_SYMBOL(cxgb4_create_server);
3283
Vipul Pandya80f40c12013-07-04 16:10:45 +05303284/* cxgb4_create_server6 - create an IPv6 server
3285 * @dev: the device
3286 * @stid: the server TID
3287 * @sip: local IPv6 address to bind server to
3288 * @sport: the server's TCP port
3289 * @queue: queue to direct messages from this server to
3290 *
3291 * Create an IPv6 server for the given port and address.
3292 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3293 */
3294int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3295 const struct in6_addr *sip, __be16 sport,
3296 unsigned int queue)
3297{
3298 unsigned int chan;
3299 struct sk_buff *skb;
3300 struct adapter *adap;
3301 struct cpl_pass_open_req6 *req;
3302 int ret;
3303
3304 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3305 if (!skb)
3306 return -ENOMEM;
3307
3308 adap = netdev2adap(dev);
3309 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3310 INIT_TP_WR(req, 0);
3311 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3312 req->local_port = sport;
3313 req->peer_port = htons(0);
3314 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3315 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3316 req->peer_ip_hi = cpu_to_be64(0);
3317 req->peer_ip_lo = cpu_to_be64(0);
3318 chan = rxq_to_chan(&adap->sge, queue);
Anish Bhattd7990b02014-11-12 17:15:57 -08003319 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003320 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3321 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303322 ret = t4_mgmt_tx(adap, skb);
3323 return net_xmit_eval(ret);
3324}
3325EXPORT_SYMBOL(cxgb4_create_server6);
3326
3327int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3328 unsigned int queue, bool ipv6)
3329{
3330 struct sk_buff *skb;
3331 struct adapter *adap;
3332 struct cpl_close_listsvr_req *req;
3333 int ret;
3334
3335 adap = netdev2adap(dev);
3336
3337 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3338 if (!skb)
3339 return -ENOMEM;
3340
3341 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3342 INIT_TP_WR(req, 0);
3343 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -08003344 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
3345 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303346 ret = t4_mgmt_tx(adap, skb);
3347 return net_xmit_eval(ret);
3348}
3349EXPORT_SYMBOL(cxgb4_remove_server);
3350
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003351/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003352 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3353 * @mtus: the HW MTU table
3354 * @mtu: the target MTU
3355 * @idx: index of selected entry in the MTU table
3356 *
3357 * Returns the index and the value in the HW MTU table that is closest to
3358 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3359 * table, in which case that smallest available value is selected.
3360 */
3361unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3362 unsigned int *idx)
3363{
3364 unsigned int i = 0;
3365
3366 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3367 ++i;
3368 if (idx)
3369 *idx = i;
3370 return mtus[i];
3371}
3372EXPORT_SYMBOL(cxgb4_best_mtu);
3373
3374/**
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05303375 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3376 * @mtus: the HW MTU table
3377 * @header_size: Header Size
3378 * @data_size_max: maximum Data Segment Size
3379 * @data_size_align: desired Data Segment Size Alignment (2^N)
3380 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3381 *
3382 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3383 * MTU Table based solely on a Maximum MTU parameter, we break that
3384 * parameter up into a Header Size and Maximum Data Segment Size, and
3385 * provide a desired Data Segment Size Alignment. If we find an MTU in
3386 * the Hardware MTU Table which will result in a Data Segment Size with
3387 * the requested alignment _and_ that MTU isn't "too far" from the
3388 * closest MTU, then we'll return that rather than the closest MTU.
3389 */
3390unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3391 unsigned short header_size,
3392 unsigned short data_size_max,
3393 unsigned short data_size_align,
3394 unsigned int *mtu_idxp)
3395{
3396 unsigned short max_mtu = header_size + data_size_max;
3397 unsigned short data_size_align_mask = data_size_align - 1;
3398 int mtu_idx, aligned_mtu_idx;
3399
3400 /* Scan the MTU Table till we find an MTU which is larger than our
3401 * Maximum MTU or we reach the end of the table. Along the way,
3402 * record the last MTU found, if any, which will result in a Data
3403 * Segment Length matching the requested alignment.
3404 */
3405 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3406 unsigned short data_size = mtus[mtu_idx] - header_size;
3407
3408 /* If this MTU minus the Header Size would result in a
3409 * Data Segment Size of the desired alignment, remember it.
3410 */
3411 if ((data_size & data_size_align_mask) == 0)
3412 aligned_mtu_idx = mtu_idx;
3413
3414 /* If we're not at the end of the Hardware MTU Table and the
3415 * next element is larger than our Maximum MTU, drop out of
3416 * the loop.
3417 */
3418 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3419 break;
3420 }
3421
3422 /* If we fell out of the loop because we ran to the end of the table,
3423 * then we just have to use the last [largest] entry.
3424 */
3425 if (mtu_idx == NMTUS)
3426 mtu_idx--;
3427
3428 /* If we found an MTU which resulted in the requested Data Segment
3429 * Length alignment and that's "not far" from the largest MTU which is
3430 * less than or equal to the maximum MTU, then use that.
3431 */
3432 if (aligned_mtu_idx >= 0 &&
3433 mtu_idx - aligned_mtu_idx <= 1)
3434 mtu_idx = aligned_mtu_idx;
3435
3436 /* If the caller has passed in an MTU Index pointer, pass the
3437 * MTU Index back. Return the MTU value.
3438 */
3439 if (mtu_idxp)
3440 *mtu_idxp = mtu_idx;
3441 return mtus[mtu_idx];
3442}
3443EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3444
3445/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003446 * cxgb4_port_chan - get the HW channel of a port
3447 * @dev: the net device for the port
3448 *
3449 * Return the HW Tx channel of the given port.
3450 */
3451unsigned int cxgb4_port_chan(const struct net_device *dev)
3452{
3453 return netdev2pinfo(dev)->tx_chan;
3454}
3455EXPORT_SYMBOL(cxgb4_port_chan);
3456
Vipul Pandya881806b2012-05-18 15:29:24 +05303457unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3458{
3459 struct adapter *adap = netdev2adap(dev);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003460 u32 v1, v2, lp_count, hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303461
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303462 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3463 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303464 if (is_t4(adap->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303465 lp_count = LP_COUNT_G(v1);
3466 hp_count = HP_COUNT_G(v1);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003467 } else {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303468 lp_count = LP_COUNT_T5_G(v1);
3469 hp_count = HP_COUNT_T5_G(v2);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003470 }
3471 return lpfifo ? lp_count : hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303472}
3473EXPORT_SYMBOL(cxgb4_dbfifo_count);
3474
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003475/**
3476 * cxgb4_port_viid - get the VI id of a port
3477 * @dev: the net device for the port
3478 *
3479 * Return the VI id of the given port.
3480 */
3481unsigned int cxgb4_port_viid(const struct net_device *dev)
3482{
3483 return netdev2pinfo(dev)->viid;
3484}
3485EXPORT_SYMBOL(cxgb4_port_viid);
3486
3487/**
3488 * cxgb4_port_idx - get the index of a port
3489 * @dev: the net device for the port
3490 *
3491 * Return the index of the given port.
3492 */
3493unsigned int cxgb4_port_idx(const struct net_device *dev)
3494{
3495 return netdev2pinfo(dev)->port_id;
3496}
3497EXPORT_SYMBOL(cxgb4_port_idx);
3498
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003499void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3500 struct tp_tcp_stats *v6)
3501{
3502 struct adapter *adap = pci_get_drvdata(pdev);
3503
3504 spin_lock(&adap->stats_lock);
3505 t4_tp_get_tcp_stats(adap, v4, v6);
3506 spin_unlock(&adap->stats_lock);
3507}
3508EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3509
3510void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3511 const unsigned int *pgsz_order)
3512{
3513 struct adapter *adap = netdev2adap(dev);
3514
Hariprasad Shenai0d804332015-01-05 16:30:47 +05303515 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
3516 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
3517 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
3518 HPZ3_V(pgsz_order[3]));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003519}
3520EXPORT_SYMBOL(cxgb4_iscsi_init);
3521
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303522int cxgb4_flush_eq_cache(struct net_device *dev)
3523{
3524 struct adapter *adap = netdev2adap(dev);
3525 int ret;
3526
3527 ret = t4_fwaddrspace_write(adap, adap->mbox,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303528 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303529 return ret;
3530}
3531EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3532
3533static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3534{
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303535 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303536 __be64 indices;
3537 int ret;
3538
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303539 spin_lock(&adap->win0_lock);
3540 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3541 sizeof(indices), (__be32 *)&indices,
3542 T4_MEMORY_READ);
3543 spin_unlock(&adap->win0_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303544 if (!ret) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00003545 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3546 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303547 }
3548 return ret;
3549}
3550
3551int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3552 u16 size)
3553{
3554 struct adapter *adap = netdev2adap(dev);
3555 u16 hw_pidx, hw_cidx;
3556 int ret;
3557
3558 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3559 if (ret)
3560 goto out;
3561
3562 if (pidx != hw_pidx) {
3563 u16 delta;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303564 u32 val;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303565
3566 if (pidx >= hw_pidx)
3567 delta = pidx - hw_pidx;
3568 else
3569 delta = size - hw_pidx + pidx;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303570
3571 if (is_t4(adap->params.chip))
3572 val = PIDX_V(delta);
3573 else
3574 val = PIDX_T5_V(delta);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303575 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303576 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3577 QID_V(qid) | val);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303578 }
3579out:
3580 return ret;
3581}
3582EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3583
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003584void cxgb4_disable_db_coalescing(struct net_device *dev)
3585{
3586 struct adapter *adap;
3587
3588 adap = netdev2adap(dev);
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303589 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303590 NOCOALESCE_F);
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003591}
3592EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3593
3594void cxgb4_enable_db_coalescing(struct net_device *dev)
3595{
3596 struct adapter *adap;
3597
3598 adap = netdev2adap(dev);
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303599 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003600}
3601EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3602
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303603int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3604{
3605 struct adapter *adap;
3606 u32 offset, memtype, memaddr;
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303607 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303608 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3609 int ret;
3610
3611 adap = netdev2adap(dev);
3612
3613 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3614
3615 /* Figure out where the offset lands in the Memory Type/Address scheme.
3616 * This code assumes that the memory is laid out starting at offset 0
3617 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3618 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3619 * MC0, and some have both MC0 and MC1.
3620 */
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303621 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3622 edc0_size = EDRAM0_SIZE_G(size) << 20;
3623 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3624 edc1_size = EDRAM1_SIZE_G(size) << 20;
3625 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3626 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303627
3628 edc0_end = edc0_size;
3629 edc1_end = edc0_end + edc1_size;
3630 mc0_end = edc1_end + mc0_size;
3631
3632 if (offset < edc0_end) {
3633 memtype = MEM_EDC0;
3634 memaddr = offset;
3635 } else if (offset < edc1_end) {
3636 memtype = MEM_EDC1;
3637 memaddr = offset - edc0_end;
3638 } else {
3639 if (offset < mc0_end) {
3640 memtype = MEM_MC0;
3641 memaddr = offset - edc1_end;
3642 } else if (is_t4(adap->params.chip)) {
3643 /* T4 only has a single memory channel */
3644 goto err;
3645 } else {
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303646 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3647 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303648 mc1_end = mc0_end + mc1_size;
3649 if (offset < mc1_end) {
3650 memtype = MEM_MC1;
3651 memaddr = offset - mc0_end;
3652 } else {
3653 /* offset beyond the end of any memory */
3654 goto err;
3655 }
3656 }
3657 }
3658
3659 spin_lock(&adap->win0_lock);
3660 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3661 spin_unlock(&adap->win0_lock);
3662 return ret;
3663
3664err:
3665 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3666 stag, offset);
3667 return -EINVAL;
3668}
3669EXPORT_SYMBOL(cxgb4_read_tpte);
3670
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303671u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3672{
3673 u32 hi, lo;
3674 struct adapter *adap;
3675
3676 adap = netdev2adap(dev);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303677 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
3678 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303679
3680 return ((u64)hi << 32) | (u64)lo;
3681}
3682EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3683
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303684int cxgb4_bar2_sge_qregs(struct net_device *dev,
3685 unsigned int qid,
3686 enum cxgb4_bar2_qtype qtype,
3687 u64 *pbar2_qoffset,
3688 unsigned int *pbar2_qid)
3689{
Stephen Rothwelldd0bcc02014-12-10 19:48:02 +11003690 return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303691 qid,
3692 (qtype == CXGB4_BAR2_QTYPE_EGRESS
3693 ? T4_BAR2_QTYPE_EGRESS
3694 : T4_BAR2_QTYPE_INGRESS),
3695 pbar2_qoffset,
3696 pbar2_qid);
3697}
3698EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
3699
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003700static struct pci_driver cxgb4_driver;
3701
3702static void check_neigh_update(struct neighbour *neigh)
3703{
3704 const struct device *parent;
3705 const struct net_device *netdev = neigh->dev;
3706
3707 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3708 netdev = vlan_dev_real_dev(netdev);
3709 parent = netdev->dev.parent;
3710 if (parent && parent->driver == &cxgb4_driver.driver)
3711 t4_l2t_update(dev_get_drvdata(parent), neigh);
3712}
3713
3714static int netevent_cb(struct notifier_block *nb, unsigned long event,
3715 void *data)
3716{
3717 switch (event) {
3718 case NETEVENT_NEIGH_UPDATE:
3719 check_neigh_update(data);
3720 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003721 case NETEVENT_REDIRECT:
3722 default:
3723 break;
3724 }
3725 return 0;
3726}
3727
3728static bool netevent_registered;
3729static struct notifier_block cxgb4_netevent_nb = {
3730 .notifier_call = netevent_cb
3731};
3732
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303733static void drain_db_fifo(struct adapter *adap, int usecs)
3734{
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003735 u32 v1, v2, lp_count, hp_count;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303736
3737 do {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303738 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3739 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303740 if (is_t4(adap->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303741 lp_count = LP_COUNT_G(v1);
3742 hp_count = HP_COUNT_G(v1);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003743 } else {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303744 lp_count = LP_COUNT_T5_G(v1);
3745 hp_count = HP_COUNT_T5_G(v2);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003746 }
3747
3748 if (lp_count == 0 && hp_count == 0)
3749 break;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303750 set_current_state(TASK_UNINTERRUPTIBLE);
3751 schedule_timeout(usecs_to_jiffies(usecs));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303752 } while (1);
3753}
3754
3755static void disable_txq_db(struct sge_txq *q)
3756{
Steve Wise05eb2382014-03-14 21:52:08 +05303757 unsigned long flags;
3758
3759 spin_lock_irqsave(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303760 q->db_disabled = 1;
Steve Wise05eb2382014-03-14 21:52:08 +05303761 spin_unlock_irqrestore(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303762}
3763
Steve Wise05eb2382014-03-14 21:52:08 +05303764static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303765{
3766 spin_lock_irq(&q->db_lock);
Steve Wise05eb2382014-03-14 21:52:08 +05303767 if (q->db_pidx_inc) {
3768 /* Make sure that all writes to the TX descriptors
3769 * are committed before we tell HW about them.
3770 */
3771 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303772 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3773 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
Steve Wise05eb2382014-03-14 21:52:08 +05303774 q->db_pidx_inc = 0;
3775 }
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303776 q->db_disabled = 0;
3777 spin_unlock_irq(&q->db_lock);
3778}
3779
3780static void disable_dbs(struct adapter *adap)
3781{
3782 int i;
3783
3784 for_each_ethrxq(&adap->sge, i)
3785 disable_txq_db(&adap->sge.ethtxq[i].q);
3786 for_each_ofldrxq(&adap->sge, i)
3787 disable_txq_db(&adap->sge.ofldtxq[i].q);
3788 for_each_port(adap, i)
3789 disable_txq_db(&adap->sge.ctrlq[i].q);
3790}
3791
3792static void enable_dbs(struct adapter *adap)
3793{
3794 int i;
3795
3796 for_each_ethrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303797 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303798 for_each_ofldrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303799 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303800 for_each_port(adap, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303801 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3802}
3803
3804static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3805{
3806 if (adap->uld_handle[CXGB4_ULD_RDMA])
3807 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3808 cmd);
3809}
3810
3811static void process_db_full(struct work_struct *work)
3812{
3813 struct adapter *adap;
3814
3815 adap = container_of(work, struct adapter, db_full_task);
3816
3817 drain_db_fifo(adap, dbfifo_drain_delay);
3818 enable_dbs(adap);
3819 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303820 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3821 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
3822 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303823}
3824
3825static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3826{
3827 u16 hw_pidx, hw_cidx;
3828 int ret;
3829
Steve Wise05eb2382014-03-14 21:52:08 +05303830 spin_lock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303831 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3832 if (ret)
3833 goto out;
3834 if (q->db_pidx != hw_pidx) {
3835 u16 delta;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303836 u32 val;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303837
3838 if (q->db_pidx >= hw_pidx)
3839 delta = q->db_pidx - hw_pidx;
3840 else
3841 delta = q->size - hw_pidx + q->db_pidx;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303842
3843 if (is_t4(adap->params.chip))
3844 val = PIDX_V(delta);
3845 else
3846 val = PIDX_T5_V(delta);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303847 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303848 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3849 QID_V(q->cntxt_id) | val);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303850 }
3851out:
3852 q->db_disabled = 0;
Steve Wise05eb2382014-03-14 21:52:08 +05303853 q->db_pidx_inc = 0;
3854 spin_unlock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303855 if (ret)
3856 CH_WARN(adap, "DB drop recovery failed.\n");
3857}
3858static void recover_all_queues(struct adapter *adap)
3859{
3860 int i;
3861
3862 for_each_ethrxq(&adap->sge, i)
3863 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3864 for_each_ofldrxq(&adap->sge, i)
3865 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3866 for_each_port(adap, i)
3867 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3868}
3869
Vipul Pandya881806b2012-05-18 15:29:24 +05303870static void process_db_drop(struct work_struct *work)
3871{
3872 struct adapter *adap;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303873
Vipul Pandya881806b2012-05-18 15:29:24 +05303874 adap = container_of(work, struct adapter, db_drop_task);
3875
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303876 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05303877 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003878 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
Steve Wise05eb2382014-03-14 21:52:08 +05303879 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003880 recover_all_queues(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05303881 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003882 enable_dbs(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05303883 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003884 } else {
3885 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3886 u16 qid = (dropped_db >> 15) & 0x1ffff;
3887 u16 pidx_inc = dropped_db & 0x1fff;
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303888 u64 bar2_qoffset;
3889 unsigned int bar2_qid;
3890 int ret;
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003891
Stephen Rothwelldd0bcc02014-12-10 19:48:02 +11003892 ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303893 &bar2_qoffset, &bar2_qid);
3894 if (ret)
3895 dev_err(adap->pdev_dev, "doorbell drop recovery: "
3896 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
3897 else
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303898 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303899 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003900
3901 /* Re-enable BAR2 WC */
3902 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3903 }
3904
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303905 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
Vipul Pandya881806b2012-05-18 15:29:24 +05303906}
3907
3908void t4_db_full(struct adapter *adap)
3909{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303910 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05303911 disable_dbs(adap);
3912 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303913 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3914 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
Anish Bhatt29aaee62014-08-20 13:44:06 -07003915 queue_work(adap->workq, &adap->db_full_task);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003916 }
Vipul Pandya881806b2012-05-18 15:29:24 +05303917}
3918
3919void t4_db_dropped(struct adapter *adap)
3920{
Steve Wise05eb2382014-03-14 21:52:08 +05303921 if (is_t4(adap->params.chip)) {
3922 disable_dbs(adap);
3923 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3924 }
Anish Bhatt29aaee62014-08-20 13:44:06 -07003925 queue_work(adap->workq, &adap->db_drop_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303926}
3927
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003928static void uld_attach(struct adapter *adap, unsigned int uld)
3929{
3930 void *handle;
3931 struct cxgb4_lld_info lli;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003932 unsigned short i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003933
3934 lli.pdev = adap->pdev;
Hariprasad Shenai35b1de52014-06-27 19:23:47 +05303935 lli.pf = adap->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003936 lli.l2t = adap->l2t;
3937 lli.tids = &adap->tids;
3938 lli.ports = adap->port;
3939 lli.vr = &adap->vres;
3940 lli.mtus = adap->params.mtus;
3941 if (uld == CXGB4_ULD_RDMA) {
3942 lli.rxq_ids = adap->sge.rdma_rxq;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05303943 lli.ciq_ids = adap->sge.rdma_ciq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003944 lli.nrxq = adap->sge.rdmaqs;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05303945 lli.nciq = adap->sge.rdmaciqs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003946 } else if (uld == CXGB4_ULD_ISCSI) {
3947 lli.rxq_ids = adap->sge.ofld_rxq;
3948 lli.nrxq = adap->sge.ofldqsets;
3949 }
3950 lli.ntxq = adap->sge.ofldqsets;
3951 lli.nchan = adap->params.nports;
3952 lli.nports = adap->params.nports;
3953 lli.wr_cred = adap->params.ofldq_wr_cred;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303954 lli.adapter_type = adap->params.chip;
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05303955 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303956 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303957 lli.udb_density = 1 << adap->params.sge.eq_qpp;
3958 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05303959 lli.filt_mode = adap->params.tp.vlan_pri_map;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003960 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3961 for (i = 0; i < NCHAN; i++)
3962 lli.tx_modq[i] = i;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303963 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
3964 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003965 lli.fw_vers = adap->params.fw_vers;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303966 lli.dbfifo_int_thresh = dbfifo_int_thresh;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +05303967 lli.sge_ingpadboundary = adap->sge.fl_align;
3968 lli.sge_egrstatuspagesize = adap->sge.stat_len;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003969 lli.sge_pktshift = adap->sge.pktshift;
3970 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05303971 lli.max_ordird_qp = adap->params.max_ordird_qp;
3972 lli.max_ird_adapter = adap->params.max_ird_adapter;
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05303973 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003974
3975 handle = ulds[uld].add(&lli);
3976 if (IS_ERR(handle)) {
3977 dev_warn(adap->pdev_dev,
3978 "could not attach to the %s driver, error %ld\n",
3979 uld_str[uld], PTR_ERR(handle));
3980 return;
3981 }
3982
3983 adap->uld_handle[uld] = handle;
3984
3985 if (!netevent_registered) {
3986 register_netevent_notifier(&cxgb4_netevent_nb);
3987 netevent_registered = true;
3988 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00003989
3990 if (adap->flags & FULL_INIT_DONE)
3991 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003992}
3993
3994static void attach_ulds(struct adapter *adap)
3995{
3996 unsigned int i;
3997
Vipul Pandya01bcca62013-07-04 16:10:46 +05303998 spin_lock(&adap_rcu_lock);
3999 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4000 spin_unlock(&adap_rcu_lock);
4001
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004002 mutex_lock(&uld_mutex);
4003 list_add_tail(&adap->list_node, &adapter_list);
4004 for (i = 0; i < CXGB4_ULD_MAX; i++)
4005 if (ulds[i].add)
4006 uld_attach(adap, i);
4007 mutex_unlock(&uld_mutex);
4008}
4009
4010static void detach_ulds(struct adapter *adap)
4011{
4012 unsigned int i;
4013
4014 mutex_lock(&uld_mutex);
4015 list_del(&adap->list_node);
4016 for (i = 0; i < CXGB4_ULD_MAX; i++)
4017 if (adap->uld_handle[i]) {
4018 ulds[i].state_change(adap->uld_handle[i],
4019 CXGB4_STATE_DETACH);
4020 adap->uld_handle[i] = NULL;
4021 }
4022 if (netevent_registered && list_empty(&adapter_list)) {
4023 unregister_netevent_notifier(&cxgb4_netevent_nb);
4024 netevent_registered = false;
4025 }
4026 mutex_unlock(&uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304027
4028 spin_lock(&adap_rcu_lock);
4029 list_del_rcu(&adap->rcu_node);
4030 spin_unlock(&adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004031}
4032
4033static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4034{
4035 unsigned int i;
4036
4037 mutex_lock(&uld_mutex);
4038 for (i = 0; i < CXGB4_ULD_MAX; i++)
4039 if (adap->uld_handle[i])
4040 ulds[i].state_change(adap->uld_handle[i], new_state);
4041 mutex_unlock(&uld_mutex);
4042}
4043
4044/**
4045 * cxgb4_register_uld - register an upper-layer driver
4046 * @type: the ULD type
4047 * @p: the ULD methods
4048 *
4049 * Registers an upper-layer driver with this driver and notifies the ULD
4050 * about any presently available devices that support its type. Returns
4051 * %-EBUSY if a ULD of the same type is already registered.
4052 */
4053int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4054{
4055 int ret = 0;
4056 struct adapter *adap;
4057
4058 if (type >= CXGB4_ULD_MAX)
4059 return -EINVAL;
4060 mutex_lock(&uld_mutex);
4061 if (ulds[type].add) {
4062 ret = -EBUSY;
4063 goto out;
4064 }
4065 ulds[type] = *p;
4066 list_for_each_entry(adap, &adapter_list, list_node)
4067 uld_attach(adap, type);
4068out: mutex_unlock(&uld_mutex);
4069 return ret;
4070}
4071EXPORT_SYMBOL(cxgb4_register_uld);
4072
4073/**
4074 * cxgb4_unregister_uld - unregister an upper-layer driver
4075 * @type: the ULD type
4076 *
4077 * Unregisters an existing upper-layer driver.
4078 */
4079int cxgb4_unregister_uld(enum cxgb4_uld type)
4080{
4081 struct adapter *adap;
4082
4083 if (type >= CXGB4_ULD_MAX)
4084 return -EINVAL;
4085 mutex_lock(&uld_mutex);
4086 list_for_each_entry(adap, &adapter_list, list_node)
4087 adap->uld_handle[type] = NULL;
4088 ulds[type].add = NULL;
4089 mutex_unlock(&uld_mutex);
4090 return 0;
4091}
4092EXPORT_SYMBOL(cxgb4_unregister_uld);
4093
Anish Bhatt1bb60372014-10-14 20:07:22 -07004094#if IS_ENABLED(CONFIG_IPV6)
Anish Bhattb5a02f52015-01-14 15:17:34 -08004095static int cxgb4_inet6addr_handler(struct notifier_block *this,
4096 unsigned long event, void *data)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304097{
Anish Bhattb5a02f52015-01-14 15:17:34 -08004098 struct inet6_ifaddr *ifa = data;
4099 struct net_device *event_dev = ifa->idev->dev;
4100 const struct device *parent = NULL;
4101#if IS_ENABLED(CONFIG_BONDING)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304102 struct adapter *adap;
Anish Bhattb5a02f52015-01-14 15:17:34 -08004103#endif
4104 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
4105 event_dev = vlan_dev_real_dev(event_dev);
4106#if IS_ENABLED(CONFIG_BONDING)
4107 if (event_dev->flags & IFF_MASTER) {
4108 list_for_each_entry(adap, &adapter_list, list_node) {
4109 switch (event) {
4110 case NETDEV_UP:
4111 cxgb4_clip_get(adap->port[0],
4112 (const u32 *)ifa, 1);
4113 break;
4114 case NETDEV_DOWN:
4115 cxgb4_clip_release(adap->port[0],
4116 (const u32 *)ifa, 1);
4117 break;
4118 default:
4119 break;
4120 }
4121 }
4122 return NOTIFY_OK;
4123 }
4124#endif
Vipul Pandya01bcca62013-07-04 16:10:46 +05304125
Anish Bhattb5a02f52015-01-14 15:17:34 -08004126 if (event_dev)
4127 parent = event_dev->dev.parent;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304128
Anish Bhattb5a02f52015-01-14 15:17:34 -08004129 if (parent && parent->driver == &cxgb4_driver.driver) {
Vipul Pandya01bcca62013-07-04 16:10:46 +05304130 switch (event) {
4131 case NETDEV_UP:
Anish Bhattb5a02f52015-01-14 15:17:34 -08004132 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304133 break;
4134 case NETDEV_DOWN:
Anish Bhattb5a02f52015-01-14 15:17:34 -08004135 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304136 break;
4137 default:
4138 break;
4139 }
4140 }
Anish Bhattb5a02f52015-01-14 15:17:34 -08004141 return NOTIFY_OK;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304142}
4143
Anish Bhattb5a02f52015-01-14 15:17:34 -08004144static bool inet6addr_registered;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304145static struct notifier_block cxgb4_inet6addr_notifier = {
4146 .notifier_call = cxgb4_inet6addr_handler
4147};
4148
Vipul Pandya01bcca62013-07-04 16:10:46 +05304149static void update_clip(const struct adapter *adap)
4150{
4151 int i;
4152 struct net_device *dev;
4153 int ret;
4154
4155 rcu_read_lock();
4156
4157 for (i = 0; i < MAX_NPORTS; i++) {
4158 dev = adap->port[i];
4159 ret = 0;
4160
4161 if (dev)
Anish Bhattb5a02f52015-01-14 15:17:34 -08004162 ret = cxgb4_update_root_dev_clip(dev);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304163
4164 if (ret < 0)
4165 break;
4166 }
4167 rcu_read_unlock();
4168}
Anish Bhatt1bb60372014-10-14 20:07:22 -07004169#endif /* IS_ENABLED(CONFIG_IPV6) */
Vipul Pandya01bcca62013-07-04 16:10:46 +05304170
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004171/**
4172 * cxgb_up - enable the adapter
4173 * @adap: adapter being enabled
4174 *
4175 * Called when the first port is enabled, this function performs the
4176 * actions necessary to make an adapter operational, such as completing
4177 * the initialization of HW modules, and enabling interrupts.
4178 *
4179 * Must be called with the rtnl lock held.
4180 */
4181static int cxgb_up(struct adapter *adap)
4182{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004183 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004184
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004185 err = setup_sge_queues(adap);
4186 if (err)
4187 goto out;
4188 err = setup_rss(adap);
4189 if (err)
4190 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004191
4192 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004193 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004194 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4195 adap->msix_info[0].desc, adap);
4196 if (err)
4197 goto irq_err;
4198
4199 err = request_msix_queue_irqs(adap);
4200 if (err) {
4201 free_irq(adap->msix_info[0].vec, adap);
4202 goto irq_err;
4203 }
4204 } else {
4205 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4206 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00004207 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004208 if (err)
4209 goto irq_err;
4210 }
4211 enable_rx(adap);
4212 t4_sge_start(adap);
4213 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004214 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004215 notify_ulds(adap, CXGB4_STATE_UP);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004216#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304217 update_clip(adap);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004218#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004219 out:
4220 return err;
4221 irq_err:
4222 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004223 freeq:
4224 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004225 goto out;
4226}
4227
4228static void cxgb_down(struct adapter *adapter)
4229{
4230 t4_intr_disable(adapter);
4231 cancel_work_sync(&adapter->tid_release_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05304232 cancel_work_sync(&adapter->db_full_task);
4233 cancel_work_sync(&adapter->db_drop_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004234 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004235 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004236
4237 if (adapter->flags & USING_MSIX) {
4238 free_msix_queue_irqs(adapter);
4239 free_irq(adapter->msix_info[0].vec, adapter);
4240 } else
4241 free_irq(adapter->pdev->irq, adapter);
4242 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004243 t4_sge_stop(adapter);
4244 t4_free_sge_resources(adapter);
4245 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004246}
4247
4248/*
4249 * net_device operations
4250 */
4251static int cxgb_open(struct net_device *dev)
4252{
4253 int err;
4254 struct port_info *pi = netdev_priv(dev);
4255 struct adapter *adapter = pi->adapter;
4256
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00004257 netif_carrier_off(dev);
4258
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004259 if (!(adapter->flags & FULL_INIT_DONE)) {
4260 err = cxgb_up(adapter);
4261 if (err < 0)
4262 return err;
4263 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004264
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00004265 err = link_start(dev);
4266 if (!err)
4267 netif_tx_start_all_queues(dev);
4268 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004269}
4270
4271static int cxgb_close(struct net_device *dev)
4272{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004273 struct port_info *pi = netdev_priv(dev);
4274 struct adapter *adapter = pi->adapter;
4275
4276 netif_tx_stop_all_queues(dev);
4277 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004278 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004279}
4280
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004281/* Return an error number if the indicated filter isn't writable ...
4282 */
4283static int writable_filter(struct filter_entry *f)
4284{
4285 if (f->locked)
4286 return -EPERM;
4287 if (f->pending)
4288 return -EBUSY;
4289
4290 return 0;
4291}
4292
4293/* Delete the filter at the specified index (if valid). The checks for all
4294 * the common problems with doing this like the filter being locked, currently
4295 * pending in another operation, etc.
4296 */
4297static int delete_filter(struct adapter *adapter, unsigned int fidx)
4298{
4299 struct filter_entry *f;
4300 int ret;
4301
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004302 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004303 return -EINVAL;
4304
4305 f = &adapter->tids.ftid_tab[fidx];
4306 ret = writable_filter(f);
4307 if (ret)
4308 return ret;
4309 if (f->valid)
4310 return del_filter_wr(adapter, fidx);
4311
4312 return 0;
4313}
4314
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004315int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00004316 __be32 sip, __be16 sport, __be16 vlan,
4317 unsigned int queue, unsigned char port, unsigned char mask)
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004318{
4319 int ret;
4320 struct filter_entry *f;
4321 struct adapter *adap;
4322 int i;
4323 u8 *val;
4324
4325 adap = netdev2adap(dev);
4326
Vipul Pandya1cab7752012-12-10 09:30:55 +00004327 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304328 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004329 stid += adap->tids.nftids;
4330
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004331 /* Check to make sure the filter requested is writable ...
4332 */
4333 f = &adap->tids.ftid_tab[stid];
4334 ret = writable_filter(f);
4335 if (ret)
4336 return ret;
4337
4338 /* Clear out any old resources being used by the filter before
4339 * we start constructing the new filter.
4340 */
4341 if (f->valid)
4342 clear_filter(adap, f);
4343
4344 /* Clear out filter specifications */
4345 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4346 f->fs.val.lport = cpu_to_be16(sport);
4347 f->fs.mask.lport = ~0;
4348 val = (u8 *)&sip;
Vipul Pandya793dad92012-12-10 09:30:56 +00004349 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004350 for (i = 0; i < 4; i++) {
4351 f->fs.val.lip[i] = val[i];
4352 f->fs.mask.lip[i] = ~0;
4353 }
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304354 if (adap->params.tp.vlan_pri_map & PORT_F) {
Vipul Pandya793dad92012-12-10 09:30:56 +00004355 f->fs.val.iport = port;
4356 f->fs.mask.iport = mask;
4357 }
4358 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004359
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304360 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
Kumar Sanghvi7c89e552013-12-18 16:38:20 +05304361 f->fs.val.proto = IPPROTO_TCP;
4362 f->fs.mask.proto = ~0;
4363 }
4364
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004365 f->fs.dirsteer = 1;
4366 f->fs.iq = queue;
4367 /* Mark filter as locked */
4368 f->locked = 1;
4369 f->fs.rpttid = 1;
4370
4371 ret = set_filter_wr(adap, stid);
4372 if (ret) {
4373 clear_filter(adap, f);
4374 return ret;
4375 }
4376
4377 return 0;
4378}
4379EXPORT_SYMBOL(cxgb4_create_server_filter);
4380
4381int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4382 unsigned int queue, bool ipv6)
4383{
4384 int ret;
4385 struct filter_entry *f;
4386 struct adapter *adap;
4387
4388 adap = netdev2adap(dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00004389
4390 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304391 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004392 stid += adap->tids.nftids;
4393
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004394 f = &adap->tids.ftid_tab[stid];
4395 /* Unlock the filter */
4396 f->locked = 0;
4397
4398 ret = delete_filter(adap, stid);
4399 if (ret)
4400 return ret;
4401
4402 return 0;
4403}
4404EXPORT_SYMBOL(cxgb4_remove_server_filter);
4405
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00004406static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4407 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004408{
4409 struct port_stats stats;
4410 struct port_info *p = netdev_priv(dev);
4411 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004412
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004413 /* Block retrieving statistics during EEH error
4414 * recovery. Otherwise, the recovery might fail
4415 * and the PCI device will be removed permanently
4416 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004417 spin_lock(&adapter->stats_lock);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004418 if (!netif_device_present(dev)) {
4419 spin_unlock(&adapter->stats_lock);
4420 return ns;
4421 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004422 t4_get_port_stats(adapter, p->tx_chan, &stats);
4423 spin_unlock(&adapter->stats_lock);
4424
4425 ns->tx_bytes = stats.tx_octets;
4426 ns->tx_packets = stats.tx_frames;
4427 ns->rx_bytes = stats.rx_octets;
4428 ns->rx_packets = stats.rx_frames;
4429 ns->multicast = stats.rx_mcast_frames;
4430
4431 /* detailed rx_errors */
4432 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4433 stats.rx_runt;
4434 ns->rx_over_errors = 0;
4435 ns->rx_crc_errors = stats.rx_fcs_err;
4436 ns->rx_frame_errors = stats.rx_symbol_err;
4437 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4438 stats.rx_ovflow2 + stats.rx_ovflow3 +
4439 stats.rx_trunc0 + stats.rx_trunc1 +
4440 stats.rx_trunc2 + stats.rx_trunc3;
4441 ns->rx_missed_errors = 0;
4442
4443 /* detailed tx_errors */
4444 ns->tx_aborted_errors = 0;
4445 ns->tx_carrier_errors = 0;
4446 ns->tx_fifo_errors = 0;
4447 ns->tx_heartbeat_errors = 0;
4448 ns->tx_window_errors = 0;
4449
4450 ns->tx_errors = stats.tx_error_frames;
4451 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4452 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4453 return ns;
4454}
4455
4456static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4457{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004458 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004459 int ret = 0, prtad, devad;
4460 struct port_info *pi = netdev_priv(dev);
4461 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4462
4463 switch (cmd) {
4464 case SIOCGMIIPHY:
4465 if (pi->mdio_addr < 0)
4466 return -EOPNOTSUPP;
4467 data->phy_id = pi->mdio_addr;
4468 break;
4469 case SIOCGMIIREG:
4470 case SIOCSMIIREG:
4471 if (mdio_phy_id_is_c45(data->phy_id)) {
4472 prtad = mdio_phy_id_prtad(data->phy_id);
4473 devad = mdio_phy_id_devad(data->phy_id);
4474 } else if (data->phy_id < 32) {
4475 prtad = data->phy_id;
4476 devad = 0;
4477 data->reg_num &= 0x1f;
4478 } else
4479 return -EINVAL;
4480
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004481 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004482 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004483 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004484 data->reg_num, &data->val_out);
4485 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004486 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004487 data->reg_num, data->val_in);
4488 break;
4489 default:
4490 return -EOPNOTSUPP;
4491 }
4492 return ret;
4493}
4494
4495static void cxgb_set_rxmode(struct net_device *dev)
4496{
4497 /* unfortunately we can't return errors to the stack */
4498 set_rxmode(dev, -1, false);
4499}
4500
4501static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4502{
4503 int ret;
4504 struct port_info *pi = netdev_priv(dev);
4505
4506 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4507 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004508 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4509 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004510 if (!ret)
4511 dev->mtu = new_mtu;
4512 return ret;
4513}
4514
4515static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4516{
4517 int ret;
4518 struct sockaddr *addr = p;
4519 struct port_info *pi = netdev_priv(dev);
4520
4521 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00004522 return -EADDRNOTAVAIL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004523
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004524 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4525 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004526 if (ret < 0)
4527 return ret;
4528
4529 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4530 pi->xact_addr_filt = ret;
4531 return 0;
4532}
4533
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004534#ifdef CONFIG_NET_POLL_CONTROLLER
4535static void cxgb_netpoll(struct net_device *dev)
4536{
4537 struct port_info *pi = netdev_priv(dev);
4538 struct adapter *adap = pi->adapter;
4539
4540 if (adap->flags & USING_MSIX) {
4541 int i;
4542 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4543
4544 for (i = pi->nqsets; i; i--, rx++)
4545 t4_sge_intr_msix(0, &rx->rspq);
4546 } else
4547 t4_intr_handler(adap)(0, adap);
4548}
4549#endif
4550
4551static const struct net_device_ops cxgb4_netdev_ops = {
4552 .ndo_open = cxgb_open,
4553 .ndo_stop = cxgb_close,
4554 .ndo_start_xmit = t4_eth_xmit,
Anish Bhatt688848b2014-06-19 21:37:13 -07004555 .ndo_select_queue = cxgb_select_queue,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00004556 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004557 .ndo_set_rx_mode = cxgb_set_rxmode,
4558 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00004559 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004560 .ndo_validate_addr = eth_validate_addr,
4561 .ndo_do_ioctl = cxgb_ioctl,
4562 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004563#ifdef CONFIG_NET_POLL_CONTROLLER
4564 .ndo_poll_controller = cxgb_netpoll,
4565#endif
4566};
4567
4568void t4_fatal_err(struct adapter *adap)
4569{
Hariprasad Shenaif612b812015-01-05 16:30:43 +05304570 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004571 t4_intr_disable(adap);
4572 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4573}
4574
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304575/* Return the specified PCI-E Configuration Space register from our Physical
4576 * Function. We try first via a Firmware LDST Command since we prefer to let
4577 * the firmware own all of these registers, but if that fails we go for it
4578 * directly ourselves.
4579 */
4580static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4581{
4582 struct fw_ldst_cmd ldst_cmd;
4583 u32 val;
4584 int ret;
4585
4586 /* Construct and send the Firmware LDST Command to retrieve the
4587 * specified PCI-E Configuration Space register.
4588 */
4589 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4590 ldst_cmd.op_to_addrspace =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304591 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4592 FW_CMD_REQUEST_F |
4593 FW_CMD_READ_F |
Hariprasad Shenai51678652014-11-21 12:52:02 +05304594 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304595 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
Hariprasad Shenai51678652014-11-21 12:52:02 +05304596 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304597 ldst_cmd.u.pcie.ctrl_to_fn =
Hariprasad Shenai51678652014-11-21 12:52:02 +05304598 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304599 ldst_cmd.u.pcie.r = reg;
4600 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4601 &ldst_cmd);
4602
4603 /* If the LDST Command suucceeded, exctract the returned register
4604 * value. Otherwise read it directly ourself.
4605 */
4606 if (ret == 0)
4607 val = ntohl(ldst_cmd.u.pcie.data[0]);
4608 else
4609 t4_hw_pci_read_cfg4(adap, reg, &val);
4610
4611 return val;
4612}
4613
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004614static void setup_memwin(struct adapter *adap)
4615{
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304616 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004617
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304618 if (is_t4(adap->params.chip)) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304619 u32 bar0;
4620
4621 /* Truncation intentional: we only read the bottom 32-bits of
4622 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4623 * mechanism to read BAR0 instead of using
4624 * pci_resource_start() because we could be operating from
4625 * within a Virtual Machine which is trapping our accesses to
4626 * our Configuration Space and we need to set up the PCI-E
4627 * Memory Window decoders with the actual addresses which will
4628 * be coming across the PCI-E link.
4629 */
4630 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4631 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4632 adap->t4_bar0 = bar0;
4633
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004634 mem_win0_base = bar0 + MEMWIN0_BASE;
4635 mem_win1_base = bar0 + MEMWIN1_BASE;
4636 mem_win2_base = bar0 + MEMWIN2_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304637 mem_win2_aperture = MEMWIN2_APERTURE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004638 } else {
4639 /* For T5, only relative offset inside the PCIe BAR is passed */
4640 mem_win0_base = MEMWIN0_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304641 mem_win1_base = MEMWIN1_BASE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004642 mem_win2_base = MEMWIN2_BASE_T5;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304643 mem_win2_aperture = MEMWIN2_APERTURE_T5;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004644 }
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304645 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
4646 mem_win0_base | BIR_V(0) |
4647 WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
4648 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
4649 mem_win1_base | BIR_V(0) |
4650 WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
4651 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
4652 mem_win2_base | BIR_V(0) |
4653 WINDOW_V(ilog2(mem_win2_aperture) - 10));
4654 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004655}
4656
4657static void setup_memwin_rdma(struct adapter *adap)
4658{
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004659 if (adap->vres.ocq.size) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304660 u32 start;
4661 unsigned int sz_kb;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004662
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304663 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4664 start &= PCI_BASE_ADDRESS_MEM_MASK;
4665 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004666 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4667 t4_write_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304668 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
4669 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004670 t4_write_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304671 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004672 adap->vres.ocq.start);
4673 t4_read_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304674 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004675 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004676}
4677
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004678static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4679{
4680 u32 v;
4681 int ret;
4682
4683 /* get device capabilities */
4684 memset(c, 0, sizeof(*c));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304685 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4686 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304687 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004688 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004689 if (ret < 0)
4690 return ret;
4691
4692 /* select capabilities we'll be using */
4693 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4694 if (!vf_acls)
4695 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4696 else
4697 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4698 } else if (vf_acls) {
4699 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4700 return ret;
4701 }
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304702 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4703 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004704 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004705 if (ret < 0)
4706 return ret;
4707
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004708 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004709 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05304710 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4711 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004712 if (ret < 0)
4713 return ret;
4714
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004715 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4716 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004717 if (ret < 0)
4718 return ret;
4719
4720 t4_sge_init(adap);
4721
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004722 /* tweak some settings */
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304723 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304724 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304725 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4726 v = t4_read_reg(adap, TP_PIO_DATA_A);
4727 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004728
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004729 /* first 4 Tx modulation queues point to consecutive Tx channels */
4730 adap->params.tp.tx_modq_map = 0xE4;
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304731 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4732 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004733
4734 /* associate each Tx modulation queue with consecutive Tx channels */
4735 v = 0x84218421;
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304736 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304737 &v, 1, TP_TX_SCHED_HDR_A);
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304738 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304739 &v, 1, TP_TX_SCHED_FIFO_A);
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304740 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304741 &v, 1, TP_TX_SCHED_PCMD_A);
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004742
4743#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4744 if (is_offload(adap)) {
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304745 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4746 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4747 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4748 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4749 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4750 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4751 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4752 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4753 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4754 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004755 }
4756
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004757 /* get basic stuff going */
4758 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004759}
4760
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004761/*
4762 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4763 */
4764#define MAX_ATIDS 8192U
4765
4766/*
4767 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Vipul Pandya636f9d32012-09-26 02:39:39 +00004768 *
4769 * If the firmware we're dealing with has Configuration File support, then
4770 * we use that to perform all configuration
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004771 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00004772
4773/*
4774 * Tweak configuration based on module parameters, etc. Most of these have
4775 * defaults assigned to them by Firmware Configuration Files (if we're using
4776 * them) but need to be explicitly set if we're using hard-coded
4777 * initialization. But even in the case of using Firmware Configuration
4778 * Files, we'd like to expose the ability to change these via module
4779 * parameters so these are essentially common tweaks/settings for
4780 * Configuration Files and hard-coded initialization ...
4781 */
4782static int adap_init0_tweaks(struct adapter *adapter)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004783{
Vipul Pandya636f9d32012-09-26 02:39:39 +00004784 /*
4785 * Fix up various Host-Dependent Parameters like Page Size, Cache
4786 * Line Size, etc. The firmware default is for a 4KB Page Size and
4787 * 64B Cache Line Size ...
4788 */
4789 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004790
Vipul Pandya636f9d32012-09-26 02:39:39 +00004791 /*
4792 * Process module parameters which affect early initialization.
4793 */
4794 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4795 dev_err(&adapter->pdev->dev,
4796 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4797 rx_dma_offset);
4798 rx_dma_offset = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004799 }
Hariprasad Shenaif612b812015-01-05 16:30:43 +05304800 t4_set_reg_field(adapter, SGE_CONTROL_A,
4801 PKTSHIFT_V(PKTSHIFT_M),
4802 PKTSHIFT_V(rx_dma_offset));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004803
Vipul Pandya636f9d32012-09-26 02:39:39 +00004804 /*
4805 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4806 * adds the pseudo header itself.
4807 */
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304808 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4809 CSUM_HAS_PSEUDO_HDR_F, 0);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004810
4811 return 0;
4812}
4813
4814/*
4815 * Attempt to initialize the adapter via a Firmware Configuration File.
4816 */
4817static int adap_init0_config(struct adapter *adapter, int reset)
4818{
4819 struct fw_caps_config_cmd caps_cmd;
4820 const struct firmware *cf;
4821 unsigned long mtype = 0, maddr = 0;
4822 u32 finiver, finicsum, cfcsum;
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304823 int ret;
4824 int config_issued = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004825 char *fw_config_file, fw_config_file_path[256];
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304826 char *config_name = NULL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004827
4828 /*
4829 * Reset device if necessary.
4830 */
4831 if (reset) {
4832 ret = t4_fw_reset(adapter, adapter->mbox,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304833 PIORSTMODE_F | PIORST_F);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004834 if (ret < 0)
4835 goto bye;
4836 }
4837
4838 /*
4839 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4840 * then use that. Otherwise, use the configuration file stored
4841 * in the adapter flash ...
4842 */
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304843 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004844 case CHELSIO_T4:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304845 fw_config_file = FW4_CFNAME;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004846 break;
4847 case CHELSIO_T5:
4848 fw_config_file = FW5_CFNAME;
4849 break;
4850 default:
4851 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4852 adapter->pdev->device);
4853 ret = -EINVAL;
4854 goto bye;
4855 }
4856
4857 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004858 if (ret < 0) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304859 config_name = "On FLASH";
Vipul Pandya636f9d32012-09-26 02:39:39 +00004860 mtype = FW_MEMTYPE_CF_FLASH;
4861 maddr = t4_flash_cfg_addr(adapter);
4862 } else {
4863 u32 params[7], val[7];
4864
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304865 sprintf(fw_config_file_path,
4866 "/lib/firmware/%s", fw_config_file);
4867 config_name = fw_config_file_path;
4868
Vipul Pandya636f9d32012-09-26 02:39:39 +00004869 if (cf->size >= FLASH_CFG_MAX_SIZE)
4870 ret = -ENOMEM;
4871 else {
Hariprasad Shenai51678652014-11-21 12:52:02 +05304872 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4873 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004874 ret = t4_query_params(adapter, adapter->mbox,
4875 adapter->fn, 0, 1, params, val);
4876 if (ret == 0) {
4877 /*
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304878 * For t4_memory_rw() below addresses and
Vipul Pandya636f9d32012-09-26 02:39:39 +00004879 * sizes have to be in terms of multiples of 4
4880 * bytes. So, if the Configuration File isn't
4881 * a multiple of 4 bytes in length we'll have
4882 * to write that out separately since we can't
4883 * guarantee that the bytes following the
4884 * residual byte in the buffer returned by
4885 * request_firmware() are zeroed out ...
4886 */
4887 size_t resid = cf->size & 0x3;
4888 size_t size = cf->size & ~0x3;
4889 __be32 *data = (__be32 *)cf->data;
4890
Hariprasad Shenai51678652014-11-21 12:52:02 +05304891 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4892 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004893
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304894 spin_lock(&adapter->win0_lock);
4895 ret = t4_memory_rw(adapter, 0, mtype, maddr,
4896 size, data, T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004897 if (ret == 0 && resid != 0) {
4898 union {
4899 __be32 word;
4900 char buf[4];
4901 } last;
4902 int i;
4903
4904 last.word = data[size >> 2];
4905 for (i = resid; i < 4; i++)
4906 last.buf[i] = 0;
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304907 ret = t4_memory_rw(adapter, 0, mtype,
4908 maddr + size,
4909 4, &last.word,
4910 T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004911 }
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304912 spin_unlock(&adapter->win0_lock);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004913 }
4914 }
4915
4916 release_firmware(cf);
4917 if (ret)
4918 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004919 }
4920
Vipul Pandya636f9d32012-09-26 02:39:39 +00004921 /*
4922 * Issue a Capability Configuration command to the firmware to get it
4923 * to parse the Configuration File. We don't use t4_fw_config_file()
4924 * because we want the ability to modify various features after we've
4925 * processed the configuration file ...
4926 */
4927 memset(&caps_cmd, 0, sizeof(caps_cmd));
4928 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304929 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4930 FW_CMD_REQUEST_F |
4931 FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304932 caps_cmd.cfvalid_to_len16 =
Hariprasad Shenai51678652014-11-21 12:52:02 +05304933 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4934 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4935 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00004936 FW_LEN16(caps_cmd));
4937 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4938 &caps_cmd);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304939
4940 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4941 * Configuration File in FLASH), our last gasp effort is to use the
4942 * Firmware Configuration File which is embedded in the firmware. A
4943 * very few early versions of the firmware didn't have one embedded
4944 * but we can ignore those.
4945 */
4946 if (ret == -ENOENT) {
4947 memset(&caps_cmd, 0, sizeof(caps_cmd));
4948 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304949 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4950 FW_CMD_REQUEST_F |
4951 FW_CMD_READ_F);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304952 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4953 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4954 sizeof(caps_cmd), &caps_cmd);
4955 config_name = "Firmware Default";
4956 }
4957
4958 config_issued = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004959 if (ret < 0)
4960 goto bye;
4961
Vipul Pandya636f9d32012-09-26 02:39:39 +00004962 finiver = ntohl(caps_cmd.finiver);
4963 finicsum = ntohl(caps_cmd.finicsum);
4964 cfcsum = ntohl(caps_cmd.cfcsum);
4965 if (finicsum != cfcsum)
4966 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4967 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4968 finicsum, cfcsum);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004969
Vipul Pandya636f9d32012-09-26 02:39:39 +00004970 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00004971 * And now tell the firmware to use the configuration we just loaded.
4972 */
4973 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304974 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4975 FW_CMD_REQUEST_F |
4976 FW_CMD_WRITE_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304977 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004978 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4979 NULL);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004980 if (ret < 0)
4981 goto bye;
4982
Vipul Pandya636f9d32012-09-26 02:39:39 +00004983 /*
4984 * Tweak configuration based on system architecture, module
4985 * parameters, etc.
4986 */
4987 ret = adap_init0_tweaks(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004988 if (ret < 0)
4989 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004990
Vipul Pandya636f9d32012-09-26 02:39:39 +00004991 /*
4992 * And finally tell the firmware to initialize itself using the
4993 * parameters from the Configuration File.
4994 */
4995 ret = t4_fw_initialize(adapter, adapter->mbox);
4996 if (ret < 0)
4997 goto bye;
4998
Hariprasad Shenai06640312015-01-13 15:19:25 +05304999 /* Emit Firmware Configuration File information and return
5000 * successfully.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005001 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00005002 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305003 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5004 config_name, finiver, cfcsum);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005005 return 0;
5006
5007 /*
5008 * Something bad happened. Return the error ... (If the "error"
5009 * is that there's no Configuration File on the adapter we don't
5010 * want to issue a warning since this is fairly common.)
5011 */
5012bye:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305013 if (config_issued && ret != -ENOENT)
5014 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5015 config_name, -ret);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005016 return ret;
5017}
5018
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305019static struct fw_info fw_info_array[] = {
5020 {
5021 .chip = CHELSIO_T4,
5022 .fs_name = FW4_CFNAME,
5023 .fw_mod_name = FW4_FNAME,
5024 .fw_hdr = {
5025 .chip = FW_HDR_CHIP_T4,
5026 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5027 .intfver_nic = FW_INTFVER(T4, NIC),
5028 .intfver_vnic = FW_INTFVER(T4, VNIC),
5029 .intfver_ri = FW_INTFVER(T4, RI),
5030 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5031 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5032 },
5033 }, {
5034 .chip = CHELSIO_T5,
5035 .fs_name = FW5_CFNAME,
5036 .fw_mod_name = FW5_FNAME,
5037 .fw_hdr = {
5038 .chip = FW_HDR_CHIP_T5,
5039 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5040 .intfver_nic = FW_INTFVER(T5, NIC),
5041 .intfver_vnic = FW_INTFVER(T5, VNIC),
5042 .intfver_ri = FW_INTFVER(T5, RI),
5043 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5044 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5045 },
5046 }
5047};
5048
5049static struct fw_info *find_fw_info(int chip)
5050{
5051 int i;
5052
5053 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5054 if (fw_info_array[i].chip == chip)
5055 return &fw_info_array[i];
5056 }
5057 return NULL;
5058}
5059
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005060/*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005061 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005062 */
5063static int adap_init0(struct adapter *adap)
5064{
5065 int ret;
5066 u32 v, port_vec;
5067 enum dev_state state;
5068 u32 params[7], val[7];
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005069 struct fw_caps_config_cmd caps_cmd;
Hariprasad Shenai49aa2842015-01-07 08:48:00 +05305070 struct fw_devlog_cmd devlog_cmd;
5071 u32 devlog_meminfo;
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05305072 int reset = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005073
Hariprasad Shenai666224d2014-12-11 11:11:43 +05305074 /* Contact FW, advertising Master capability */
5075 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005076 if (ret < 0) {
5077 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5078 ret);
5079 return ret;
5080 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005081 if (ret == adap->mbox)
5082 adap->flags |= MASTER_PF;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005083
Vipul Pandya636f9d32012-09-26 02:39:39 +00005084 /*
5085 * If we're the Master PF Driver and the device is uninitialized,
5086 * then let's consider upgrading the firmware ... (We always want
5087 * to check the firmware version number in order to A. get it for
5088 * later reporting and B. to warn if the currently loaded firmware
5089 * is excessively mismatched relative to the driver.)
5090 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305091 t4_get_fw_version(adap, &adap->params.fw_vers);
5092 t4_get_tp_version(adap, &adap->params.tp_vers);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005093 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305094 struct fw_info *fw_info;
5095 struct fw_hdr *card_fw;
5096 const struct firmware *fw;
5097 const u8 *fw_data = NULL;
5098 unsigned int fw_size = 0;
5099
5100 /* This is the firmware whose headers the driver was compiled
5101 * against
5102 */
5103 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5104 if (fw_info == NULL) {
5105 dev_err(adap->pdev_dev,
5106 "unable to get firmware info for chip %d.\n",
5107 CHELSIO_CHIP_VERSION(adap->params.chip));
5108 return -EINVAL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005109 }
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305110
5111 /* allocate memory to read the header of the firmware on the
5112 * card
5113 */
5114 card_fw = t4_alloc_mem(sizeof(*card_fw));
5115
5116 /* Get FW from from /lib/firmware/ */
5117 ret = request_firmware(&fw, fw_info->fw_mod_name,
5118 adap->pdev_dev);
5119 if (ret < 0) {
5120 dev_err(adap->pdev_dev,
5121 "unable to load firmware image %s, error %d\n",
5122 fw_info->fw_mod_name, ret);
5123 } else {
5124 fw_data = fw->data;
5125 fw_size = fw->size;
5126 }
5127
5128 /* upgrade FW logic */
5129 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5130 state, &reset);
5131
5132 /* Cleaning up */
5133 if (fw != NULL)
5134 release_firmware(fw);
5135 t4_free_mem(card_fw);
5136
Vipul Pandya636f9d32012-09-26 02:39:39 +00005137 if (ret < 0)
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305138 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005139 }
5140
5141 /*
5142 * Grab VPD parameters. This should be done after we establish a
5143 * connection to the firmware since some of the VPD parameters
5144 * (notably the Core Clock frequency) are retrieved via requests to
5145 * the firmware. On the other hand, we need these fairly early on
5146 * so we do this right after getting ahold of the firmware.
5147 */
5148 ret = get_vpd_params(adap, &adap->params.vpd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005149 if (ret < 0)
5150 goto bye;
5151
Hariprasad Shenai49aa2842015-01-07 08:48:00 +05305152 /* Read firmware device log parameters. We really need to find a way
5153 * to get these parameters initialized with some default values (which
5154 * are likely to be correct) for the case where we either don't
5155 * attache to the firmware or it's crashed when we probe the adapter.
5156 * That way we'll still be able to perform early firmware startup
5157 * debugging ... If the request to get the Firmware's Device Log
5158 * parameters fails, we'll live so we don't make that a fatal error.
5159 */
5160 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5161 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5162 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5163 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5164 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5165 &devlog_cmd);
5166 if (ret == 0) {
5167 devlog_meminfo =
5168 ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5169 adap->params.devlog.memtype =
5170 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5171 adap->params.devlog.start =
5172 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5173 adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
5174 }
5175
Vipul Pandya636f9d32012-09-26 02:39:39 +00005176 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005177 * Find out what ports are available to us. Note that we need to do
5178 * this before calling adap_init0_no_config() since it needs nports
5179 * and portvec ...
Vipul Pandya636f9d32012-09-26 02:39:39 +00005180 */
5181 v =
Hariprasad Shenai51678652014-11-21 12:52:02 +05305182 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5183 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005184 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5185 if (ret < 0)
5186 goto bye;
5187
5188 adap->params.nports = hweight32(port_vec);
5189 adap->params.portvec = port_vec;
5190
Hariprasad Shenai06640312015-01-13 15:19:25 +05305191 /* If the firmware is initialized already, emit a simply note to that
5192 * effect. Otherwise, it's time to try initializing the adapter.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005193 */
5194 if (state == DEV_STATE_INIT) {
5195 dev_info(adap->pdev_dev, "Coming up as %s: "\
5196 "Adapter already initialized\n",
5197 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
Vipul Pandya636f9d32012-09-26 02:39:39 +00005198 } else {
5199 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5200 "Initializing adapter\n");
Hariprasad Shenai06640312015-01-13 15:19:25 +05305201
5202 /* Find out whether we're dealing with a version of the
5203 * firmware which has configuration file support.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005204 */
Hariprasad Shenai06640312015-01-13 15:19:25 +05305205 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5206 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
5207 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5208 params, val);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005209
Hariprasad Shenai06640312015-01-13 15:19:25 +05305210 /* If the firmware doesn't support Configuration Files,
5211 * return an error.
5212 */
5213 if (ret < 0) {
5214 dev_err(adap->pdev_dev, "firmware doesn't support "
5215 "Firmware Configuration Files\n");
5216 goto bye;
5217 }
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005218
Hariprasad Shenai06640312015-01-13 15:19:25 +05305219 /* The firmware provides us with a memory buffer where we can
5220 * load a Configuration File from the host if we want to
5221 * override the Configuration File in flash.
5222 */
5223 ret = adap_init0_config(adap, reset);
5224 if (ret == -ENOENT) {
5225 dev_err(adap->pdev_dev, "no Configuration File "
5226 "present on adapter.\n");
5227 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005228 }
5229 if (ret < 0) {
Hariprasad Shenai06640312015-01-13 15:19:25 +05305230 dev_err(adap->pdev_dev, "could not initialize "
5231 "adapter, error %d\n", -ret);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005232 goto bye;
5233 }
5234 }
5235
Hariprasad Shenai06640312015-01-13 15:19:25 +05305236 /* Give the SGE code a chance to pull in anything that it needs ...
5237 * Note that this must be called after we retrieve our VPD parameters
5238 * in order to know how to convert core ticks to seconds, etc.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005239 */
Hariprasad Shenai06640312015-01-13 15:19:25 +05305240 ret = t4_sge_init(adap);
5241 if (ret < 0)
5242 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005243
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005244 if (is_bypass_device(adap->pdev->device))
5245 adap->params.bypass = 1;
5246
Vipul Pandya636f9d32012-09-26 02:39:39 +00005247 /*
5248 * Grab some of our basic fundamental operating parameters.
5249 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005250#define FW_PARAM_DEV(param) \
Hariprasad Shenai51678652014-11-21 12:52:02 +05305251 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
5252 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005253
5254#define FW_PARAM_PFVF(param) \
Hariprasad Shenai51678652014-11-21 12:52:02 +05305255 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
5256 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
5257 FW_PARAMS_PARAM_Y_V(0) | \
5258 FW_PARAMS_PARAM_Z_V(0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005259
Vipul Pandya636f9d32012-09-26 02:39:39 +00005260 params[0] = FW_PARAM_PFVF(EQ_START);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005261 params[1] = FW_PARAM_PFVF(L2T_START);
5262 params[2] = FW_PARAM_PFVF(L2T_END);
5263 params[3] = FW_PARAM_PFVF(FILTER_START);
5264 params[4] = FW_PARAM_PFVF(FILTER_END);
5265 params[5] = FW_PARAM_PFVF(IQFLINT_START);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005266 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005267 if (ret < 0)
5268 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005269 adap->sge.egr_start = val[0];
5270 adap->l2t_start = val[1];
5271 adap->l2t_end = val[2];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005272 adap->tids.ftid_base = val[3];
5273 adap->tids.nftids = val[4] - val[3] + 1;
5274 adap->sge.ingr_start = val[5];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005275
Anish Bhattb5a02f52015-01-14 15:17:34 -08005276 params[0] = FW_PARAM_PFVF(CLIP_START);
5277 params[1] = FW_PARAM_PFVF(CLIP_END);
5278 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5279 if (ret < 0)
5280 goto bye;
5281 adap->clipt_start = val[0];
5282 adap->clipt_end = val[1];
5283
Vipul Pandya636f9d32012-09-26 02:39:39 +00005284 /* query params related to active filter region */
5285 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5286 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5287 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5288 /* If Active filter size is set we enable establishing
5289 * offload connection through firmware work request
5290 */
5291 if ((val[0] != val[1]) && (ret >= 0)) {
5292 adap->flags |= FW_OFLD_CONN;
5293 adap->tids.aftid_base = val[0];
5294 adap->tids.aftid_end = val[1];
5295 }
5296
Vipul Pandyab407a4a2013-04-29 04:04:40 +00005297 /* If we're running on newer firmware, let it know that we're
5298 * prepared to deal with encapsulated CPL messages. Older
5299 * firmware won't understand this and we'll just get
5300 * unencapsulated messages ...
5301 */
5302 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5303 val[0] = 1;
5304 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5305
Vipul Pandya636f9d32012-09-26 02:39:39 +00005306 /*
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05305307 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5308 * capability. Earlier versions of the firmware didn't have the
5309 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5310 * permission to use ULPTX MEMWRITE DSGL.
5311 */
5312 if (is_t4(adap->params.chip)) {
5313 adap->params.ulptx_memwrite_dsgl = false;
5314 } else {
5315 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5316 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5317 1, params, val);
5318 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5319 }
5320
5321 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005322 * Get device capabilities so we can determine what resources we need
5323 * to manage.
5324 */
5325 memset(&caps_cmd, 0, sizeof(caps_cmd));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305326 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5327 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305328 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005329 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5330 &caps_cmd);
5331 if (ret < 0)
5332 goto bye;
5333
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005334 if (caps_cmd.ofldcaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005335 /* query offload-related parameters */
5336 params[0] = FW_PARAM_DEV(NTID);
5337 params[1] = FW_PARAM_PFVF(SERVER_START);
5338 params[2] = FW_PARAM_PFVF(SERVER_END);
5339 params[3] = FW_PARAM_PFVF(TDDP_START);
5340 params[4] = FW_PARAM_PFVF(TDDP_END);
5341 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005342 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5343 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005344 if (ret < 0)
5345 goto bye;
5346 adap->tids.ntids = val[0];
5347 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5348 adap->tids.stid_base = val[1];
5349 adap->tids.nstids = val[2] - val[1] + 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005350 /*
5351 * Setup server filter region. Divide the availble filter
5352 * region into two parts. Regular filters get 1/3rd and server
5353 * filters get 2/3rd part. This is only enabled if workarond
5354 * path is enabled.
5355 * 1. For regular filters.
5356 * 2. Server filter: This are special filters which are used
5357 * to redirect SYN packets to offload queue.
5358 */
5359 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5360 adap->tids.sftid_base = adap->tids.ftid_base +
5361 DIV_ROUND_UP(adap->tids.nftids, 3);
5362 adap->tids.nsftids = adap->tids.nftids -
5363 DIV_ROUND_UP(adap->tids.nftids, 3);
5364 adap->tids.nftids = adap->tids.sftid_base -
5365 adap->tids.ftid_base;
5366 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005367 adap->vres.ddp.start = val[3];
5368 adap->vres.ddp.size = val[4] - val[3] + 1;
5369 adap->params.ofldq_wr_cred = val[5];
Vipul Pandya636f9d32012-09-26 02:39:39 +00005370
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005371 adap->params.offload = 1;
5372 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005373 if (caps_cmd.rdmacaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005374 params[0] = FW_PARAM_PFVF(STAG_START);
5375 params[1] = FW_PARAM_PFVF(STAG_END);
5376 params[2] = FW_PARAM_PFVF(RQ_START);
5377 params[3] = FW_PARAM_PFVF(RQ_END);
5378 params[4] = FW_PARAM_PFVF(PBL_START);
5379 params[5] = FW_PARAM_PFVF(PBL_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005380 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5381 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005382 if (ret < 0)
5383 goto bye;
5384 adap->vres.stag.start = val[0];
5385 adap->vres.stag.size = val[1] - val[0] + 1;
5386 adap->vres.rq.start = val[2];
5387 adap->vres.rq.size = val[3] - val[2] + 1;
5388 adap->vres.pbl.start = val[4];
5389 adap->vres.pbl.size = val[5] - val[4] + 1;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005390
5391 params[0] = FW_PARAM_PFVF(SQRQ_START);
5392 params[1] = FW_PARAM_PFVF(SQRQ_END);
5393 params[2] = FW_PARAM_PFVF(CQ_START);
5394 params[3] = FW_PARAM_PFVF(CQ_END);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005395 params[4] = FW_PARAM_PFVF(OCQ_START);
5396 params[5] = FW_PARAM_PFVF(OCQ_END);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05305397 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5398 val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005399 if (ret < 0)
5400 goto bye;
5401 adap->vres.qp.start = val[0];
5402 adap->vres.qp.size = val[1] - val[0] + 1;
5403 adap->vres.cq.start = val[2];
5404 adap->vres.cq.size = val[3] - val[2] + 1;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005405 adap->vres.ocq.start = val[4];
5406 adap->vres.ocq.size = val[5] - val[4] + 1;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05305407
5408 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5409 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05305410 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5411 val);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05305412 if (ret < 0) {
5413 adap->params.max_ordird_qp = 8;
5414 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5415 ret = 0;
5416 } else {
5417 adap->params.max_ordird_qp = val[0];
5418 adap->params.max_ird_adapter = val[1];
5419 }
5420 dev_info(adap->pdev_dev,
5421 "max_ordird_qp %d max_ird_adapter %d\n",
5422 adap->params.max_ordird_qp,
5423 adap->params.max_ird_adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005424 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005425 if (caps_cmd.iscsicaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005426 params[0] = FW_PARAM_PFVF(ISCSI_START);
5427 params[1] = FW_PARAM_PFVF(ISCSI_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005428 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5429 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005430 if (ret < 0)
5431 goto bye;
5432 adap->vres.iscsi.start = val[0];
5433 adap->vres.iscsi.size = val[1] - val[0] + 1;
5434 }
5435#undef FW_PARAM_PFVF
5436#undef FW_PARAM_DEV
5437
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305438 /* The MTU/MSS Table is initialized by now, so load their values. If
5439 * we're initializing the adapter, then we'll make any modifications
5440 * we want to the MTU/MSS Table and also initialize the congestion
5441 * parameters.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005442 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005443 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305444 if (state != DEV_STATE_INIT) {
5445 int i;
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005446
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305447 /* The default MTU Table contains values 1492 and 1500.
5448 * However, for TCP, it's better to have two values which are
5449 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5450 * This allows us to have a TCP Data Payload which is a
5451 * multiple of 8 regardless of what combination of TCP Options
5452 * are in use (always a multiple of 4 bytes) which is
5453 * important for performance reasons. For instance, if no
5454 * options are in use, then we have a 20-byte IP header and a
5455 * 20-byte TCP header. In this case, a 1500-byte MSS would
5456 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5457 * which is not a multiple of 8. So using an MSS of 1488 in
5458 * this case results in a TCP Data Payload of 1448 bytes which
5459 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5460 * Stamps have been negotiated, then an MTU of 1500 bytes
5461 * results in a TCP Data Payload of 1448 bytes which, as
5462 * above, is a multiple of 8 bytes ...
5463 */
5464 for (i = 0; i < NMTUS; i++)
5465 if (adap->params.mtus[i] == 1492) {
5466 adap->params.mtus[i] = 1488;
5467 break;
5468 }
5469
5470 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5471 adap->params.b_wnd);
5472 }
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05305473 t4_init_sge_params(adap);
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05305474 t4_init_tp_params(adap);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005475 adap->flags |= FW_OK;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005476 return 0;
5477
5478 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005479 * Something bad happened. If a command timed out or failed with EIO
5480 * FW does not operate within its spec or something catastrophic
5481 * happened to HW/FW, stop issuing commands.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005482 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00005483bye:
5484 if (ret != -ETIMEDOUT && ret != -EIO)
5485 t4_fw_bye(adap, adap->mbox);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005486 return ret;
5487}
5488
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005489/* EEH callbacks */
5490
5491static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5492 pci_channel_state_t state)
5493{
5494 int i;
5495 struct adapter *adap = pci_get_drvdata(pdev);
5496
5497 if (!adap)
5498 goto out;
5499
5500 rtnl_lock();
5501 adap->flags &= ~FW_OK;
5502 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08005503 spin_lock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005504 for_each_port(adap, i) {
5505 struct net_device *dev = adap->port[i];
5506
5507 netif_device_detach(dev);
5508 netif_carrier_off(dev);
5509 }
Gavin Shan9fe6cb52014-01-23 12:27:35 +08005510 spin_unlock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005511 if (adap->flags & FULL_INIT_DONE)
5512 cxgb_down(adap);
5513 rtnl_unlock();
Gavin Shan144be3d2014-01-23 12:27:34 +08005514 if ((adap->flags & DEV_ENABLED)) {
5515 pci_disable_device(pdev);
5516 adap->flags &= ~DEV_ENABLED;
5517 }
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005518out: return state == pci_channel_io_perm_failure ?
5519 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5520}
5521
5522static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5523{
5524 int i, ret;
5525 struct fw_caps_config_cmd c;
5526 struct adapter *adap = pci_get_drvdata(pdev);
5527
5528 if (!adap) {
5529 pci_restore_state(pdev);
5530 pci_save_state(pdev);
5531 return PCI_ERS_RESULT_RECOVERED;
5532 }
5533
Gavin Shan144be3d2014-01-23 12:27:34 +08005534 if (!(adap->flags & DEV_ENABLED)) {
5535 if (pci_enable_device(pdev)) {
5536 dev_err(&pdev->dev, "Cannot reenable PCI "
5537 "device after reset\n");
5538 return PCI_ERS_RESULT_DISCONNECT;
5539 }
5540 adap->flags |= DEV_ENABLED;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005541 }
5542
5543 pci_set_master(pdev);
5544 pci_restore_state(pdev);
5545 pci_save_state(pdev);
5546 pci_cleanup_aer_uncorrect_error_status(pdev);
5547
Hariprasad Shenai8203b502014-10-09 05:48:47 +05305548 if (t4_wait_dev_ready(adap->regs) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005549 return PCI_ERS_RESULT_DISCONNECT;
Thadeu Lima de Souza Cascardo777c2302013-05-03 08:11:04 +00005550 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005551 return PCI_ERS_RESULT_DISCONNECT;
5552 adap->flags |= FW_OK;
5553 if (adap_init1(adap, &c))
5554 return PCI_ERS_RESULT_DISCONNECT;
5555
5556 for_each_port(adap, i) {
5557 struct port_info *p = adap2pinfo(adap, i);
5558
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005559 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5560 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005561 if (ret < 0)
5562 return PCI_ERS_RESULT_DISCONNECT;
5563 p->viid = ret;
5564 p->xact_addr_filt = -1;
5565 }
5566
5567 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5568 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005569 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005570 if (cxgb_up(adap))
5571 return PCI_ERS_RESULT_DISCONNECT;
5572 return PCI_ERS_RESULT_RECOVERED;
5573}
5574
5575static void eeh_resume(struct pci_dev *pdev)
5576{
5577 int i;
5578 struct adapter *adap = pci_get_drvdata(pdev);
5579
5580 if (!adap)
5581 return;
5582
5583 rtnl_lock();
5584 for_each_port(adap, i) {
5585 struct net_device *dev = adap->port[i];
5586
5587 if (netif_running(dev)) {
5588 link_start(dev);
5589 cxgb_set_rxmode(dev);
5590 }
5591 netif_device_attach(dev);
5592 }
5593 rtnl_unlock();
5594}
5595
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005596static const struct pci_error_handlers cxgb4_eeh = {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005597 .error_detected = eeh_err_detected,
5598 .slot_reset = eeh_slot_reset,
5599 .resume = eeh_resume,
5600};
5601
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305602static inline bool is_x_10g_port(const struct link_config *lc)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005603{
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305604 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5605 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005606}
5607
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305608static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5609 unsigned int us, unsigned int cnt,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005610 unsigned int size, unsigned int iqe_size)
5611{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305612 q->adap = adap;
5613 set_rspq_intr_params(q, us, cnt);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005614 q->iqe_len = iqe_size;
5615 q->size = size;
5616}
5617
5618/*
5619 * Perform default configuration of DMA queues depending on the number and type
5620 * of ports we found and the number of available CPUs. Most settings can be
5621 * modified by the admin prior to actual use.
5622 */
Bill Pemberton91744942012-12-03 09:23:02 -05005623static void cfg_queues(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005624{
5625 struct sge *s = &adap->sge;
Anish Bhatt688848b2014-06-19 21:37:13 -07005626 int i, n10g = 0, qidx = 0;
5627#ifndef CONFIG_CHELSIO_T4_DCB
5628 int q10g = 0;
5629#endif
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305630 int ciq_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005631
5632 for_each_port(adap, i)
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305633 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
Anish Bhatt688848b2014-06-19 21:37:13 -07005634#ifdef CONFIG_CHELSIO_T4_DCB
5635 /* For Data Center Bridging support we need to be able to support up
5636 * to 8 Traffic Priorities; each of which will be assigned to its
5637 * own TX Queue in order to prevent Head-Of-Line Blocking.
5638 */
5639 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
5640 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
5641 MAX_ETH_QSETS, adap->params.nports * 8);
5642 BUG_ON(1);
5643 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005644
Anish Bhatt688848b2014-06-19 21:37:13 -07005645 for_each_port(adap, i) {
5646 struct port_info *pi = adap2pinfo(adap, i);
5647
5648 pi->first_qset = qidx;
5649 pi->nqsets = 8;
5650 qidx += pi->nqsets;
5651 }
5652#else /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005653 /*
5654 * We default to 1 queue per non-10G port and up to # of cores queues
5655 * per 10G port.
5656 */
5657 if (n10g)
5658 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
Yuval Mintz5952dde2012-07-01 03:18:55 +00005659 if (q10g > netif_get_num_default_rss_queues())
5660 q10g = netif_get_num_default_rss_queues();
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005661
5662 for_each_port(adap, i) {
5663 struct port_info *pi = adap2pinfo(adap, i);
5664
5665 pi->first_qset = qidx;
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305666 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005667 qidx += pi->nqsets;
5668 }
Anish Bhatt688848b2014-06-19 21:37:13 -07005669#endif /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005670
5671 s->ethqsets = qidx;
5672 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5673
5674 if (is_offload(adap)) {
5675 /*
5676 * For offload we use 1 queue/channel if all ports are up to 1G,
5677 * otherwise we divide all available queues amongst the channels
5678 * capped by the number of available cores.
5679 */
5680 if (n10g) {
5681 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5682 num_online_cpus());
5683 s->ofldqsets = roundup(i, adap->params.nports);
5684 } else
5685 s->ofldqsets = adap->params.nports;
5686 /* For RDMA one Rx queue per channel suffices */
5687 s->rdmaqs = adap->params.nports;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305688 s->rdmaciqs = adap->params.nports;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005689 }
5690
5691 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5692 struct sge_eth_rxq *r = &s->ethrxq[i];
5693
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305694 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005695 r->fl.size = 72;
5696 }
5697
5698 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5699 s->ethtxq[i].q.size = 1024;
5700
5701 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5702 s->ctrlq[i].q.size = 512;
5703
5704 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5705 s->ofldtxq[i].q.size = 1024;
5706
5707 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5708 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5709
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305710 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005711 r->rspq.uld = CXGB4_ULD_ISCSI;
5712 r->fl.size = 72;
5713 }
5714
5715 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5716 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5717
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305718 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005719 r->rspq.uld = CXGB4_ULD_RDMA;
5720 r->fl.size = 72;
5721 }
5722
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305723 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5724 if (ciq_size > SGE_MAX_IQ_SIZE) {
5725 CH_WARN(adap, "CIQ size too small for available IQs\n");
5726 ciq_size = SGE_MAX_IQ_SIZE;
5727 }
5728
5729 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5730 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5731
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305732 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305733 r->rspq.uld = CXGB4_ULD_RDMA;
5734 }
5735
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305736 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5737 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005738}
5739
5740/*
5741 * Reduce the number of Ethernet queues across all ports to at most n.
5742 * n provides at least one queue per port.
5743 */
Bill Pemberton91744942012-12-03 09:23:02 -05005744static void reduce_ethqs(struct adapter *adap, int n)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005745{
5746 int i;
5747 struct port_info *pi;
5748
5749 while (n < adap->sge.ethqsets)
5750 for_each_port(adap, i) {
5751 pi = adap2pinfo(adap, i);
5752 if (pi->nqsets > 1) {
5753 pi->nqsets--;
5754 adap->sge.ethqsets--;
5755 if (adap->sge.ethqsets <= n)
5756 break;
5757 }
5758 }
5759
5760 n = 0;
5761 for_each_port(adap, i) {
5762 pi = adap2pinfo(adap, i);
5763 pi->first_qset = n;
5764 n += pi->nqsets;
5765 }
5766}
5767
5768/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5769#define EXTRA_VECS 2
5770
Bill Pemberton91744942012-12-03 09:23:02 -05005771static int enable_msix(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005772{
5773 int ofld_need = 0;
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005774 int i, want, need;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005775 struct sge *s = &adap->sge;
5776 unsigned int nchan = adap->params.nports;
5777 struct msix_entry entries[MAX_INGQ + 1];
5778
5779 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5780 entries[i].entry = i;
5781
5782 want = s->max_ethqsets + EXTRA_VECS;
5783 if (is_offload(adap)) {
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305784 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005785 /* need nchan for each possible ULD */
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305786 ofld_need = 3 * nchan;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005787 }
Anish Bhatt688848b2014-06-19 21:37:13 -07005788#ifdef CONFIG_CHELSIO_T4_DCB
5789 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5790 * each port.
5791 */
5792 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
5793#else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005794 need = adap->params.nports + EXTRA_VECS + ofld_need;
Anish Bhatt688848b2014-06-19 21:37:13 -07005795#endif
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005796 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5797 if (want < 0)
5798 return want;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005799
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005800 /*
5801 * Distribute available vectors to the various queue groups.
5802 * Every group gets its minimum requirement and NIC gets top
5803 * priority for leftovers.
5804 */
5805 i = want - EXTRA_VECS - ofld_need;
5806 if (i < s->max_ethqsets) {
5807 s->max_ethqsets = i;
5808 if (i < s->ethqsets)
5809 reduce_ethqs(adap, i);
5810 }
5811 if (is_offload(adap)) {
5812 i = want - EXTRA_VECS - s->max_ethqsets;
5813 i -= ofld_need - nchan;
5814 s->ofldqsets = (i / nchan) * nchan; /* round down */
5815 }
5816 for (i = 0; i < want; ++i)
5817 adap->msix_info[i].vec = entries[i].vector;
5818
5819 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005820}
5821
5822#undef EXTRA_VECS
5823
Bill Pemberton91744942012-12-03 09:23:02 -05005824static int init_rss(struct adapter *adap)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005825{
5826 unsigned int i, j;
5827
5828 for_each_port(adap, i) {
5829 struct port_info *pi = adap2pinfo(adap, i);
5830
5831 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5832 if (!pi->rss)
5833 return -ENOMEM;
5834 for (j = 0; j < pi->rss_size; j++)
Ben Hutchings278bc422011-12-15 13:56:49 +00005835 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005836 }
5837 return 0;
5838}
5839
Bill Pemberton91744942012-12-03 09:23:02 -05005840static void print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005841{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005842 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005843 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00005844 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005845 const struct port_info *pi = netdev_priv(dev);
5846 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00005847
5848 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5849 spd = " 2.5 GT/s";
5850 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5851 spd = " 5 GT/s";
Roland Dreierd2e752d2014-04-28 17:36:20 -07005852 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5853 spd = " 8 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005854
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005855 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5856 bufp += sprintf(bufp, "100/");
5857 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5858 bufp += sprintf(bufp, "1000/");
5859 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5860 bufp += sprintf(bufp, "10G/");
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05305861 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5862 bufp += sprintf(bufp, "40G/");
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005863 if (bufp != buf)
5864 --bufp;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05305865 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005866
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005867 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005868 adap->params.vpd.id,
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05305869 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005870 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5871 (adap->flags & USING_MSIX) ? " MSI-X" :
5872 (adap->flags & USING_MSI) ? " MSI" : "");
Kumar Sanghvia94cd702014-02-18 17:56:09 +05305873 netdev_info(dev, "S/N: %s, P/N: %s\n",
5874 adap->params.vpd.sn, adap->params.vpd.pn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005875}
5876
Bill Pemberton91744942012-12-03 09:23:02 -05005877static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005878{
Jiang Liue5c8ae52012-08-20 13:53:19 -06005879 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005880}
5881
Dimitris Michailidis06546392010-07-11 12:01:16 +00005882/*
5883 * Free the following resources:
5884 * - memory used for tables
5885 * - MSI/MSI-X
5886 * - net devices
5887 * - resources FW is holding for us
5888 */
5889static void free_some_resources(struct adapter *adapter)
5890{
5891 unsigned int i;
5892
5893 t4_free_mem(adapter->l2t);
5894 t4_free_mem(adapter->tids.tid_tab);
5895 disable_msi(adapter);
5896
5897 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005898 if (adapter->port[i]) {
5899 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00005900 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005901 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00005902 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005903 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00005904}
5905
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00005906#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00005907#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005908 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005909#define SEGMENT_SIZE 128
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005910
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005911static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005912{
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005913 int func, i, err, s_qpp, qpp, num_seg;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005914 struct port_info *pi;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005915 bool highdma = false;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005916 struct adapter *adapter = NULL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305917 void __iomem *regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005918
5919 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5920
5921 err = pci_request_regions(pdev, KBUILD_MODNAME);
5922 if (err) {
5923 /* Just info, some other driver may have claimed the device. */
5924 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5925 return err;
5926 }
5927
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005928 err = pci_enable_device(pdev);
5929 if (err) {
5930 dev_err(&pdev->dev, "cannot enable PCI device\n");
5931 goto out_release_regions;
5932 }
5933
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305934 regs = pci_ioremap_bar(pdev, 0);
5935 if (!regs) {
5936 dev_err(&pdev->dev, "cannot map device registers\n");
5937 err = -ENOMEM;
5938 goto out_disable_device;
5939 }
5940
Hariprasad Shenai8203b502014-10-09 05:48:47 +05305941 err = t4_wait_dev_ready(regs);
5942 if (err < 0)
5943 goto out_unmap_bar0;
5944
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305945 /* We control everything through one PF */
Hariprasad Shenai0d804332015-01-05 16:30:47 +05305946 func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305947 if (func != ent->driver_data) {
5948 iounmap(regs);
5949 pci_disable_device(pdev);
5950 pci_save_state(pdev); /* to restore SR-IOV later */
5951 goto sriov;
5952 }
5953
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005954 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005955 highdma = true;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005956 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5957 if (err) {
5958 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5959 "coherent allocations\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305960 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005961 }
5962 } else {
5963 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5964 if (err) {
5965 dev_err(&pdev->dev, "no usable DMA configuration\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305966 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005967 }
5968 }
5969
5970 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005971 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005972 pci_set_master(pdev);
5973 pci_save_state(pdev);
5974
5975 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5976 if (!adapter) {
5977 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305978 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005979 }
5980
Anish Bhatt29aaee62014-08-20 13:44:06 -07005981 adapter->workq = create_singlethread_workqueue("cxgb4");
5982 if (!adapter->workq) {
5983 err = -ENOMEM;
5984 goto out_free_adapter;
5985 }
5986
Gavin Shan144be3d2014-01-23 12:27:34 +08005987 /* PCI device has been enabled */
5988 adapter->flags |= DEV_ENABLED;
5989
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305990 adapter->regs = regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005991 adapter->pdev = pdev;
5992 adapter->pdev_dev = &pdev->dev;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05305993 adapter->mbox = func;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005994 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005995 adapter->msg_enable = dflt_msg_enable;
5996 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5997
5998 spin_lock_init(&adapter->stats_lock);
5999 spin_lock_init(&adapter->tid_release_lock);
Anish Bhatte327c222014-10-29 17:54:03 -07006000 spin_lock_init(&adapter->win0_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006001
6002 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
Vipul Pandya881806b2012-05-18 15:29:24 +05306003 INIT_WORK(&adapter->db_full_task, process_db_full);
6004 INIT_WORK(&adapter->db_drop_task, process_db_drop);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006005
6006 err = t4_prep_adapter(adapter);
6007 if (err)
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306008 goto out_free_adapter;
6009
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006010
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306011 if (!is_t4(adapter->params.chip)) {
Hariprasad Shenaif612b812015-01-05 16:30:43 +05306012 s_qpp = (QUEUESPERPAGEPF0_S +
6013 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6014 adapter->fn);
6015 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6016 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006017 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6018
6019 /* Each segment size is 128B. Write coalescing is enabled only
6020 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6021 * queue is less no of segments that can be accommodated in
6022 * a page size.
6023 */
6024 if (qpp > num_seg) {
6025 dev_err(&pdev->dev,
6026 "Incorrect number of egress queues per page\n");
6027 err = -EINVAL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306028 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006029 }
6030 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6031 pci_resource_len(pdev, 2));
6032 if (!adapter->bar2) {
6033 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6034 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306035 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006036 }
6037 }
6038
Vipul Pandya636f9d32012-09-26 02:39:39 +00006039 setup_memwin(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006040 err = adap_init0(adapter);
Vipul Pandya636f9d32012-09-26 02:39:39 +00006041 setup_memwin_rdma(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006042 if (err)
6043 goto out_unmap_bar;
6044
6045 for_each_port(adapter, i) {
6046 struct net_device *netdev;
6047
6048 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6049 MAX_ETH_QSETS);
6050 if (!netdev) {
6051 err = -ENOMEM;
6052 goto out_free_dev;
6053 }
6054
6055 SET_NETDEV_DEV(netdev, &pdev->dev);
6056
6057 adapter->port[i] = netdev;
6058 pi = netdev_priv(netdev);
6059 pi->adapter = adapter;
6060 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006061 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006062 netdev->irq = pdev->irq;
6063
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00006064 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6065 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6066 NETIF_F_RXCSUM | NETIF_F_RXHASH |
Patrick McHardyf6469682013-04-19 02:04:27 +00006067 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006068 if (highdma)
6069 netdev->hw_features |= NETIF_F_HIGHDMA;
6070 netdev->features |= netdev->hw_features;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006071 netdev->vlan_features = netdev->features & VLAN_FEAT;
6072
Jiri Pirko01789342011-08-16 06:29:00 +00006073 netdev->priv_flags |= IFF_UNICAST_FLT;
6074
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006075 netdev->netdev_ops = &cxgb4_netdev_ops;
Anish Bhatt688848b2014-06-19 21:37:13 -07006076#ifdef CONFIG_CHELSIO_T4_DCB
6077 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6078 cxgb4_dcb_state_init(netdev);
6079#endif
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00006080 netdev->ethtool_ops = &cxgb_ethtool_ops;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006081 }
6082
6083 pci_set_drvdata(pdev, adapter);
6084
6085 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006086 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006087 if (err)
6088 goto out_free_dev;
6089 }
6090
6091 /*
6092 * Configure queues and allocate tables now, they can be needed as
6093 * soon as the first register_netdev completes.
6094 */
6095 cfg_queues(adapter);
6096
6097 adapter->l2t = t4_init_l2t();
6098 if (!adapter->l2t) {
6099 /* We tolerate a lack of L2T, giving up some functionality */
6100 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6101 adapter->params.offload = 0;
6102 }
6103
Anish Bhattb5a02f52015-01-14 15:17:34 -08006104#if IS_ENABLED(CONFIG_IPV6)
6105 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6106 adapter->clipt_end);
6107 if (!adapter->clipt) {
6108 /* We tolerate a lack of clip_table, giving up
6109 * some functionality
6110 */
6111 dev_warn(&pdev->dev,
6112 "could not allocate Clip table, continuing\n");
6113 adapter->params.offload = 0;
6114 }
6115#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006116 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6117 dev_warn(&pdev->dev, "could not allocate TID table, "
6118 "continuing\n");
6119 adapter->params.offload = 0;
6120 }
6121
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00006122 /* See what interrupts we'll be using */
6123 if (msi > 1 && enable_msix(adapter) == 0)
6124 adapter->flags |= USING_MSIX;
6125 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6126 adapter->flags |= USING_MSI;
6127
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006128 err = init_rss(adapter);
6129 if (err)
6130 goto out_free_dev;
6131
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006132 /*
6133 * The card is now ready to go. If any errors occur during device
6134 * registration we do not fail the whole card but rather proceed only
6135 * with the ports we manage to register successfully. However we must
6136 * register at least one net device.
6137 */
6138 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00006139 pi = adap2pinfo(adapter, i);
6140 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6141 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6142
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006143 err = register_netdev(adapter->port[i]);
6144 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006145 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006146 adapter->chan_map[pi->tx_chan] = i;
6147 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006148 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006149 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006150 dev_err(&pdev->dev, "could not register any net devices\n");
6151 goto out_free_dev;
6152 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006153 if (err) {
6154 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6155 err = 0;
Joe Perches6403eab2011-06-03 11:51:20 +00006156 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006157
6158 if (cxgb4_debugfs_root) {
6159 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6160 cxgb4_debugfs_root);
6161 setup_debugfs(adapter);
6162 }
6163
David S. Miller88c51002011-10-07 13:38:43 -04006164 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6165 pdev->needs_freset = 1;
6166
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006167 if (is_offload(adapter))
6168 attach_ulds(adapter);
6169
Hariprasad Shenai8e1e6052014-08-06 17:10:59 +05306170sriov:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006171#ifdef CONFIG_PCI_IOV
Santosh Rastapur7d6727c2013-03-14 05:08:56 +00006172 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006173 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6174 dev_info(&pdev->dev,
6175 "instantiated %u virtual functions\n",
6176 num_vf[func]);
6177#endif
6178 return 0;
6179
6180 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00006181 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006182 out_unmap_bar:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306183 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006184 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006185 out_free_adapter:
Anish Bhatt29aaee62014-08-20 13:44:06 -07006186 if (adapter->workq)
6187 destroy_workqueue(adapter->workq);
6188
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006189 kfree(adapter);
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306190 out_unmap_bar0:
6191 iounmap(regs);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006192 out_disable_device:
6193 pci_disable_pcie_error_reporting(pdev);
6194 pci_disable_device(pdev);
6195 out_release_regions:
6196 pci_release_regions(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006197 return err;
6198}
6199
Bill Pemberton91744942012-12-03 09:23:02 -05006200static void remove_one(struct pci_dev *pdev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006201{
6202 struct adapter *adapter = pci_get_drvdata(pdev);
6203
Vipul Pandya636f9d32012-09-26 02:39:39 +00006204#ifdef CONFIG_PCI_IOV
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006205 pci_disable_sriov(pdev);
6206
Vipul Pandya636f9d32012-09-26 02:39:39 +00006207#endif
6208
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006209 if (adapter) {
6210 int i;
6211
Anish Bhatt29aaee62014-08-20 13:44:06 -07006212 /* Tear down per-adapter Work Queue first since it can contain
6213 * references to our adapter data structure.
6214 */
6215 destroy_workqueue(adapter->workq);
6216
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006217 if (is_offload(adapter))
6218 detach_ulds(adapter);
6219
6220 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00006221 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006222 unregister_netdev(adapter->port[i]);
6223
Fabian Frederick9f16dc22014-06-27 22:51:52 +02006224 debugfs_remove_recursive(adapter->debugfs_root);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006225
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006226 /* If we allocated filters, free up state associated with any
6227 * valid filters ...
6228 */
6229 if (adapter->tids.ftid_tab) {
6230 struct filter_entry *f = &adapter->tids.ftid_tab[0];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00006231 for (i = 0; i < (adapter->tids.nftids +
6232 adapter->tids.nsftids); i++, f++)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006233 if (f->valid)
6234 clear_filter(adapter, f);
6235 }
6236
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00006237 if (adapter->flags & FULL_INIT_DONE)
6238 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006239
Dimitris Michailidis06546392010-07-11 12:01:16 +00006240 free_some_resources(adapter);
Anish Bhattb5a02f52015-01-14 15:17:34 -08006241#if IS_ENABLED(CONFIG_IPV6)
6242 t4_cleanup_clip_tbl(adapter);
6243#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006244 iounmap(adapter->regs);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306245 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006246 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006247 pci_disable_pcie_error_reporting(pdev);
Gavin Shan144be3d2014-01-23 12:27:34 +08006248 if ((adapter->flags & DEV_ENABLED)) {
6249 pci_disable_device(pdev);
6250 adapter->flags &= ~DEV_ENABLED;
6251 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006252 pci_release_regions(pdev);
Li RongQingee9a33b2014-06-20 17:32:36 +08006253 synchronize_rcu();
Gavin Shan8b662fe2014-01-24 17:12:03 +08006254 kfree(adapter);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00006255 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006256 pci_release_regions(pdev);
6257}
6258
6259static struct pci_driver cxgb4_driver = {
6260 .name = KBUILD_MODNAME,
6261 .id_table = cxgb4_pci_tbl,
6262 .probe = init_one,
Bill Pemberton91744942012-12-03 09:23:02 -05006263 .remove = remove_one,
Thadeu Lima de Souza Cascardo687d7052014-02-24 17:04:52 -03006264 .shutdown = remove_one,
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006265 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006266};
6267
6268static int __init cxgb4_init_module(void)
6269{
6270 int ret;
6271
6272 /* Debugfs support is optional, just warn if this fails */
6273 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6274 if (!cxgb4_debugfs_root)
Joe Perches428ac432013-01-06 13:34:49 +00006275 pr_warn("could not create debugfs entry, continuing\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006276
6277 ret = pci_register_driver(&cxgb4_driver);
Anish Bhatt29aaee62014-08-20 13:44:06 -07006278 if (ret < 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006279 debugfs_remove(cxgb4_debugfs_root);
Vipul Pandya01bcca62013-07-04 16:10:46 +05306280
Anish Bhatt1bb60372014-10-14 20:07:22 -07006281#if IS_ENABLED(CONFIG_IPV6)
Anish Bhattb5a02f52015-01-14 15:17:34 -08006282 if (!inet6addr_registered) {
6283 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6284 inet6addr_registered = true;
6285 }
Anish Bhatt1bb60372014-10-14 20:07:22 -07006286#endif
Vipul Pandya01bcca62013-07-04 16:10:46 +05306287
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006288 return ret;
6289}
6290
6291static void __exit cxgb4_cleanup_module(void)
6292{
Anish Bhatt1bb60372014-10-14 20:07:22 -07006293#if IS_ENABLED(CONFIG_IPV6)
Hariprasad Shenai1793c792015-01-21 20:57:52 +05306294 if (inet6addr_registered) {
Anish Bhattb5a02f52015-01-14 15:17:34 -08006295 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6296 inet6addr_registered = false;
6297 }
Anish Bhatt1bb60372014-10-14 20:07:22 -07006298#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006299 pci_unregister_driver(&cxgb4_driver);
6300 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006301}
6302
6303module_init(cxgb4_init_module);
6304module_exit(cxgb4_cleanup_module);