blob: c27dcd98ea33ae23b19062e11b5022b32083c487 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000045#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
Vipul Pandya01bcca62013-07-04 16:10:46 +053063#include <net/addrconf.h>
David S. Miller1ef80192014-11-10 13:27:49 -050064#include <net/bonding.h>
Anish Bhattb5a02f52015-01-14 15:17:34 -080065#include <net/addrconf.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000066#include <asm/uaccess.h>
67
68#include "cxgb4.h"
69#include "t4_regs.h"
Hariprasad Shenaif612b812015-01-05 16:30:43 +053070#include "t4_values.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000071#include "t4_msg.h"
72#include "t4fw_api.h"
Anish Bhatt688848b2014-06-19 21:37:13 -070073#include "cxgb4_dcb.h"
Hariprasad Shenaifd88b312014-11-07 09:35:23 +053074#include "cxgb4_debugfs.h"
Anish Bhattb5a02f52015-01-14 15:17:34 -080075#include "clip_tbl.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000076#include "l2t.h"
77
Vipul Pandya01bcca62013-07-04 16:10:46 +053078#ifdef DRV_VERSION
79#undef DRV_VERSION
80#endif
Santosh Rastapur3a7f8552013-03-14 05:08:55 +000081#define DRV_VERSION "2.0.0-ko"
82#define DRV_DESC "Chelsio T4/T5 Network Driver"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000083
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000084enum {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000085 MAX_TXQ_ENTRIES = 16384,
86 MAX_CTRL_TXQ_ENTRIES = 1024,
87 MAX_RSPQ_ENTRIES = 16384,
88 MAX_RX_BUFFERS = 16384,
89 MIN_TXQ_ENTRIES = 32,
90 MIN_CTRL_TXQ_ENTRIES = 32,
91 MIN_RSPQ_ENTRIES = 128,
92 MIN_FL_ENTRIES = 16
93};
94
Vipul Pandyaf2b7e782012-12-10 09:30:52 +000095/* Host shadow copy of ingress filter entry. This is in host native format
96 * and doesn't match the ordering or bit order, etc. of the hardware of the
97 * firmware command. The use of bit-field structure elements is purely to
98 * remind ourselves of the field size limitations and save memory in the case
99 * where the filter table is large.
100 */
101struct filter_entry {
102 /* Administrative fields for filter.
103 */
104 u32 valid:1; /* filter allocated and valid */
105 u32 locked:1; /* filter is administratively locked */
106
107 u32 pending:1; /* filter action is pending firmware reply */
108 u32 smtidx:8; /* Source MAC Table index for smac */
109 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
110
111 /* The filter itself. Most of this is a straight copy of information
112 * provided by the extended ioctl(). Some fields are translated to
113 * internal forms -- for instance the Ingress Queue ID passed in from
114 * the ioctl() is translated into the Absolute Ingress Queue ID.
115 */
116 struct ch_filter_specification fs;
117};
118
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000119#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
120 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
121 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
122
Hariprasad Shenai3fedeab2014-11-25 08:33:58 +0530123/* Macros needed to support the PCI Device ID Table ...
124 */
125#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
126 static struct pci_device_id cxgb4_pci_tbl[] = {
127#define CH_PCI_DEVICE_ID_FUNCTION 0x4
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000128
Hariprasad Shenai3fedeab2014-11-25 08:33:58 +0530129/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
130 * called for both.
131 */
132#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
133
134#define CH_PCI_ID_TABLE_ENTRY(devid) \
135 {PCI_VDEVICE(CHELSIO, (devid)), 4}
136
137#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
138 { 0, } \
139 }
140
141#include "t4_pci_id_tbl.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000142
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530143#define FW4_FNAME "cxgb4/t4fw.bin"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000144#define FW5_FNAME "cxgb4/t5fw.bin"
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530145#define FW4_CFNAME "cxgb4/t4-config.txt"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000146#define FW5_CFNAME "cxgb4/t5-config.txt"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000147
148MODULE_DESCRIPTION(DRV_DESC);
149MODULE_AUTHOR("Chelsio Communications");
150MODULE_LICENSE("Dual BSD/GPL");
151MODULE_VERSION(DRV_VERSION);
152MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530153MODULE_FIRMWARE(FW4_FNAME);
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000154MODULE_FIRMWARE(FW5_FNAME);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000155
Vipul Pandya636f9d32012-09-26 02:39:39 +0000156/*
157 * Normally we're willing to become the firmware's Master PF but will be happy
158 * if another PF has already become the Master and initialized the adapter.
159 * Setting "force_init" will cause this driver to forcibly establish itself as
160 * the Master PF and initialize the adapter.
161 */
162static uint force_init;
163
164module_param(force_init, uint, 0644);
165MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
166
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000167/*
168 * Normally if the firmware we connect to has Configuration File support, we
169 * use that and only fall back to the old Driver-based initialization if the
170 * Configuration File fails for some reason. If force_old_init is set, then
171 * we'll always use the old Driver-based initialization sequence.
172 */
173static uint force_old_init;
174
175module_param(force_old_init, uint, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530176MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
177 " parameter");
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000178
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000179static int dflt_msg_enable = DFLT_MSG_ENABLE;
180
181module_param(dflt_msg_enable, int, 0644);
182MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
183
184/*
185 * The driver uses the best interrupt scheme available on a platform in the
186 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
187 * of these schemes the driver may consider as follows:
188 *
189 * msi = 2: choose from among all three options
190 * msi = 1: only consider MSI and INTx interrupts
191 * msi = 0: force INTx interrupts
192 */
193static int msi = 2;
194
195module_param(msi, int, 0644);
196MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
197
198/*
199 * Queue interrupt hold-off timer values. Queues default to the first of these
200 * upon creation.
201 */
202static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
203
204module_param_array(intr_holdoff, uint, NULL, 0644);
205MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
Hariprasad Shenai06640312015-01-13 15:19:25 +0530206 "0..4 in microseconds, deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000207
208static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
209
210module_param_array(intr_cnt, uint, NULL, 0644);
211MODULE_PARM_DESC(intr_cnt,
Hariprasad Shenai06640312015-01-13 15:19:25 +0530212 "thresholds 1..3 for queue interrupt packet counters, "
213 "deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000214
Vipul Pandya636f9d32012-09-26 02:39:39 +0000215/*
216 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
217 * offset by 2 bytes in order to have the IP headers line up on 4-byte
218 * boundaries. This is a requirement for many architectures which will throw
219 * a machine check fault if an attempt is made to access one of the 4-byte IP
220 * header fields on a non-4-byte boundary. And it's a major performance issue
221 * even on some architectures which allow it like some implementations of the
222 * x86 ISA. However, some architectures don't mind this and for some very
223 * edge-case performance sensitive applications (like forwarding large volumes
224 * of small packets), setting this DMA offset to 0 will decrease the number of
225 * PCI-E Bus transfers enough to measurably affect performance.
226 */
227static int rx_dma_offset = 2;
228
Rusty Russelleb939922011-12-19 14:08:01 +0000229static bool vf_acls;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000230
231#ifdef CONFIG_PCI_IOV
232module_param(vf_acls, bool, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530233MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
234 "deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000235
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000236/* Configure the number of PCI-E Virtual Function which are to be instantiated
237 * on SR-IOV Capable Physical Functions.
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000238 */
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000239static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000240
241module_param_array(num_vf, uint, NULL, 0644);
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000242MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000243#endif
244
Anish Bhatt688848b2014-06-19 21:37:13 -0700245/* TX Queue select used to determine what algorithm to use for selecting TX
246 * queue. Select between the kernel provided function (select_queue=0) or user
247 * cxgb_select_queue function (select_queue=1)
248 *
249 * Default: select_queue=0
250 */
251static int select_queue;
252module_param(select_queue, int, 0644);
253MODULE_PARM_DESC(select_queue,
254 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
255
Hariprasad Shenai06640312015-01-13 15:19:25 +0530256static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000257
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000258module_param(tp_vlan_pri_map, uint, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530259MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
260 "deprecated parameter");
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000261
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000262static struct dentry *cxgb4_debugfs_root;
263
264static LIST_HEAD(adapter_list);
265static DEFINE_MUTEX(uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +0530266/* Adapter list to be accessed from atomic context */
267static LIST_HEAD(adap_rcu_list);
268static DEFINE_SPINLOCK(adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000269static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
270static const char *uld_str[] = { "RDMA", "iSCSI" };
271
272static void link_report(struct net_device *dev)
273{
274 if (!netif_carrier_ok(dev))
275 netdev_info(dev, "link down\n");
276 else {
277 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
278
279 const char *s = "10Mbps";
280 const struct port_info *p = netdev_priv(dev);
281
282 switch (p->link_cfg.speed) {
Ben Hutchingse8b39012014-02-23 00:03:24 +0000283 case 10000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000284 s = "10Gbps";
285 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000286 case 1000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000287 s = "1000Mbps";
288 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000289 case 100:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000290 s = "100Mbps";
291 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000292 case 40000:
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +0530293 s = "40Gbps";
294 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000295 }
296
297 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
298 fc[p->link_cfg.fc]);
299 }
300}
301
Anish Bhatt688848b2014-06-19 21:37:13 -0700302#ifdef CONFIG_CHELSIO_T4_DCB
303/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
304static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
305{
306 struct port_info *pi = netdev_priv(dev);
307 struct adapter *adap = pi->adapter;
308 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
309 int i;
310
311 /* We use a simple mapping of Port TX Queue Index to DCB
312 * Priority when we're enabling DCB.
313 */
314 for (i = 0; i < pi->nqsets; i++, txq++) {
315 u32 name, value;
316 int err;
317
Hariprasad Shenai51678652014-11-21 12:52:02 +0530318 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
319 FW_PARAMS_PARAM_X_V(
320 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
321 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
Anish Bhatt688848b2014-06-19 21:37:13 -0700322 value = enable ? i : 0xffffffff;
323
324 /* Since we can be called while atomic (from "interrupt
325 * level") we need to issue the Set Parameters Commannd
326 * without sleeping (timeout < 0).
327 */
328 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
329 &name, &value);
330
331 if (err)
332 dev_err(adap->pdev_dev,
333 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
334 enable ? "set" : "unset", pi->port_id, i, -err);
Anish Bhatt10b00462014-08-07 16:14:03 -0700335 else
336 txq->dcb_prio = value;
Anish Bhatt688848b2014-06-19 21:37:13 -0700337 }
338}
339#endif /* CONFIG_CHELSIO_T4_DCB */
340
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000341void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
342{
343 struct net_device *dev = adapter->port[port_id];
344
345 /* Skip changes from disabled ports. */
346 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
347 if (link_stat)
348 netif_carrier_on(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700349 else {
350#ifdef CONFIG_CHELSIO_T4_DCB
351 cxgb4_dcb_state_init(dev);
352 dcb_tx_queue_prio_enable(dev, false);
353#endif /* CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000354 netif_carrier_off(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700355 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000356
357 link_report(dev);
358 }
359}
360
361void t4_os_portmod_changed(const struct adapter *adap, int port_id)
362{
363 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000364 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000365 };
366
367 const struct net_device *dev = adap->port[port_id];
368 const struct port_info *pi = netdev_priv(dev);
369
370 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
371 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000372 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000373 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
374}
375
376/*
377 * Configure the exact and hash address filters to handle a port's multicast
378 * and secondary unicast MAC addresses.
379 */
380static int set_addr_filters(const struct net_device *dev, bool sleep)
381{
382 u64 mhash = 0;
383 u64 uhash = 0;
384 bool free = true;
385 u16 filt_idx[7];
386 const u8 *addr[7];
387 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000388 const struct netdev_hw_addr *ha;
389 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700390 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000391 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000392 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000393
394 /* first do the secondary unicast addresses */
395 netdev_for_each_uc_addr(ha, dev) {
396 addr[naddr++] = ha->addr;
397 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000398 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000399 naddr, addr, filt_idx, &uhash, sleep);
400 if (ret < 0)
401 return ret;
402
403 free = false;
404 naddr = 0;
405 }
406 }
407
408 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700409 netdev_for_each_mc_addr(ha, dev) {
410 addr[naddr++] = ha->addr;
411 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000412 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000413 naddr, addr, filt_idx, &mhash, sleep);
414 if (ret < 0)
415 return ret;
416
417 free = false;
418 naddr = 0;
419 }
420 }
421
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000422 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000423 uhash | mhash, sleep);
424}
425
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530426int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
427module_param(dbfifo_int_thresh, int, 0644);
428MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
429
Vipul Pandya404d9e32012-10-08 02:59:43 +0000430/*
431 * usecs to sleep while draining the dbfifo
432 */
433static int dbfifo_drain_delay = 1000;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530434module_param(dbfifo_drain_delay, int, 0644);
435MODULE_PARM_DESC(dbfifo_drain_delay,
436 "usecs to sleep while draining the dbfifo");
437
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000438/*
439 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
440 * If @mtu is -1 it is left unchanged.
441 */
442static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
443{
444 int ret;
445 struct port_info *pi = netdev_priv(dev);
446
447 ret = set_addr_filters(dev, sleep_ok);
448 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000449 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000450 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000451 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000452 sleep_ok);
453 return ret;
454}
455
456/**
457 * link_start - enable a port
458 * @dev: the port to enable
459 *
460 * Performs the MAC and PHY actions needed to enable a port.
461 */
462static int link_start(struct net_device *dev)
463{
464 int ret;
465 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000466 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000467
468 /*
469 * We do not set address filters and promiscuity here, the stack does
470 * that step explicitly.
471 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000472 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +0000473 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000474 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000475 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000476 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000477 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000478 if (ret >= 0) {
479 pi->xact_addr_filt = ret;
480 ret = 0;
481 }
482 }
483 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000484 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
485 &pi->link_cfg);
Anish Bhatt30f00842014-08-05 16:05:23 -0700486 if (ret == 0) {
487 local_bh_disable();
Anish Bhatt688848b2014-06-19 21:37:13 -0700488 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
489 true, CXGB4_DCB_ENABLED);
Anish Bhatt30f00842014-08-05 16:05:23 -0700490 local_bh_enable();
491 }
Anish Bhatt688848b2014-06-19 21:37:13 -0700492
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000493 return ret;
494}
495
Anish Bhatt688848b2014-06-19 21:37:13 -0700496int cxgb4_dcb_enabled(const struct net_device *dev)
497{
498#ifdef CONFIG_CHELSIO_T4_DCB
499 struct port_info *pi = netdev_priv(dev);
500
Anish Bhatt3bb06262014-10-23 14:37:31 -0700501 if (!pi->dcb.enabled)
502 return 0;
503
504 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
505 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
Anish Bhatt688848b2014-06-19 21:37:13 -0700506#else
507 return 0;
508#endif
509}
510EXPORT_SYMBOL(cxgb4_dcb_enabled);
511
512#ifdef CONFIG_CHELSIO_T4_DCB
513/* Handle a Data Center Bridging update message from the firmware. */
514static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
515{
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530516 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
Anish Bhatt688848b2014-06-19 21:37:13 -0700517 struct net_device *dev = adap->port[port];
518 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
519 int new_dcb_enabled;
520
521 cxgb4_dcb_handle_fw_update(adap, pcmd);
522 new_dcb_enabled = cxgb4_dcb_enabled(dev);
523
524 /* If the DCB has become enabled or disabled on the port then we're
525 * going to need to set up/tear down DCB Priority parameters for the
526 * TX Queues associated with the port.
527 */
528 if (new_dcb_enabled != old_dcb_enabled)
529 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
530}
531#endif /* CONFIG_CHELSIO_T4_DCB */
532
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000533/* Clear a filter and release any of its resources that we own. This also
534 * clears the filter's "pending" status.
535 */
536static void clear_filter(struct adapter *adap, struct filter_entry *f)
537{
538 /* If the new or old filter have loopback rewriteing rules then we'll
539 * need to free any existing Layer Two Table (L2T) entries of the old
540 * filter rule. The firmware will handle freeing up any Source MAC
541 * Table (SMT) entries used for rewriting Source MAC Addresses in
542 * loopback rules.
543 */
544 if (f->l2t)
545 cxgb4_l2t_release(f->l2t);
546
547 /* The zeroing of the filter rule below clears the filter valid,
548 * pending, locked flags, l2t pointer, etc. so it's all we need for
549 * this operation.
550 */
551 memset(f, 0, sizeof(*f));
552}
553
554/* Handle a filter write/deletion reply.
555 */
556static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
557{
558 unsigned int idx = GET_TID(rpl);
559 unsigned int nidx = idx - adap->tids.ftid_base;
560 unsigned int ret;
561 struct filter_entry *f;
562
563 if (idx >= adap->tids.ftid_base && nidx <
564 (adap->tids.nftids + adap->tids.nsftids)) {
565 idx = nidx;
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -0800566 ret = TCB_COOKIE_G(rpl->cookie);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000567 f = &adap->tids.ftid_tab[idx];
568
569 if (ret == FW_FILTER_WR_FLT_DELETED) {
570 /* Clear the filter when we get confirmation from the
571 * hardware that the filter has been deleted.
572 */
573 clear_filter(adap, f);
574 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
575 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
576 idx);
577 clear_filter(adap, f);
578 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
579 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
580 f->pending = 0; /* asynchronous setup completed */
581 f->valid = 1;
582 } else {
583 /* Something went wrong. Issue a warning about the
584 * problem and clear everything out.
585 */
586 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
587 idx, ret);
588 clear_filter(adap, f);
589 }
590 }
591}
592
593/* Response queue handler for the FW event queue.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000594 */
595static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
596 const struct pkt_gl *gl)
597{
598 u8 opcode = ((const struct rss_header *)rsp)->opcode;
599
600 rsp++; /* skip RSS header */
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000601
602 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
603 */
604 if (unlikely(opcode == CPL_FW4_MSG &&
605 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
606 rsp++;
607 opcode = ((const struct rss_header *)rsp)->opcode;
608 rsp++;
609 if (opcode != CPL_SGE_EGR_UPDATE) {
610 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
611 , opcode);
612 goto out;
613 }
614 }
615
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000616 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
617 const struct cpl_sge_egr_update *p = (void *)rsp;
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -0800618 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000619 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000620
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000621 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000622 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000623 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000624 struct sge_eth_txq *eq;
625
626 eq = container_of(txq, struct sge_eth_txq, q);
627 netif_tx_wake_queue(eq->txq);
628 } else {
629 struct sge_ofld_txq *oq;
630
631 oq = container_of(txq, struct sge_ofld_txq, q);
632 tasklet_schedule(&oq->qresume_tsk);
633 }
634 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
635 const struct cpl_fw6_msg *p = (void *)rsp;
636
Anish Bhatt688848b2014-06-19 21:37:13 -0700637#ifdef CONFIG_CHELSIO_T4_DCB
638 const struct fw_port_cmd *pcmd = (const void *)p->data;
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530639 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
Anish Bhatt688848b2014-06-19 21:37:13 -0700640 unsigned int action =
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530641 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
Anish Bhatt688848b2014-06-19 21:37:13 -0700642
643 if (cmd == FW_PORT_CMD &&
644 action == FW_PORT_ACTION_GET_PORT_INFO) {
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530645 int port = FW_PORT_CMD_PORTID_G(
Anish Bhatt688848b2014-06-19 21:37:13 -0700646 be32_to_cpu(pcmd->op_to_portid));
647 struct net_device *dev = q->adap->port[port];
648 int state_input = ((pcmd->u.info.dcbxdis_pkd &
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530649 FW_PORT_CMD_DCBXDIS_F)
Anish Bhatt688848b2014-06-19 21:37:13 -0700650 ? CXGB4_DCB_INPUT_FW_DISABLED
651 : CXGB4_DCB_INPUT_FW_ENABLED);
652
653 cxgb4_dcb_state_fsm(dev, state_input);
654 }
655
656 if (cmd == FW_PORT_CMD &&
657 action == FW_PORT_ACTION_L2_DCB_CFG)
658 dcb_rpl(q->adap, pcmd);
659 else
660#endif
661 if (p->type == 0)
662 t4_handle_fw_rpl(q->adap, p->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000663 } else if (opcode == CPL_L2T_WRITE_RPL) {
664 const struct cpl_l2t_write_rpl *p = (void *)rsp;
665
666 do_l2t_write_rpl(q->adap, p);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000667 } else if (opcode == CPL_SET_TCB_RPL) {
668 const struct cpl_set_tcb_rpl *p = (void *)rsp;
669
670 filter_rpl(q->adap, p);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000671 } else
672 dev_err(q->adap->pdev_dev,
673 "unexpected CPL %#x on FW event queue\n", opcode);
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000674out:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000675 return 0;
676}
677
678/**
679 * uldrx_handler - response queue handler for ULD queues
680 * @q: the response queue that received the packet
681 * @rsp: the response queue descriptor holding the offload message
682 * @gl: the gather list of packet fragments
683 *
684 * Deliver an ingress offload packet to a ULD. All processing is done by
685 * the ULD, we just maintain statistics.
686 */
687static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
688 const struct pkt_gl *gl)
689{
690 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
691
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000692 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
693 */
694 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
695 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
696 rsp += 2;
697
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000698 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
699 rxq->stats.nomem++;
700 return -1;
701 }
702 if (gl == NULL)
703 rxq->stats.imm++;
704 else if (gl == CXGB4_MSG_AN)
705 rxq->stats.an++;
706 else
707 rxq->stats.pkts++;
708 return 0;
709}
710
711static void disable_msi(struct adapter *adapter)
712{
713 if (adapter->flags & USING_MSIX) {
714 pci_disable_msix(adapter->pdev);
715 adapter->flags &= ~USING_MSIX;
716 } else if (adapter->flags & USING_MSI) {
717 pci_disable_msi(adapter->pdev);
718 adapter->flags &= ~USING_MSI;
719 }
720}
721
722/*
723 * Interrupt handler for non-data events used with MSI-X.
724 */
725static irqreturn_t t4_nondata_intr(int irq, void *cookie)
726{
727 struct adapter *adap = cookie;
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530728 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000729
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530730 if (v & PFSW_F) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000731 adap->swintr = 1;
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530732 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000733 }
734 t4_slow_intr_handler(adap);
735 return IRQ_HANDLED;
736}
737
738/*
739 * Name the MSI-X interrupts.
740 */
741static void name_msix_vecs(struct adapter *adap)
742{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000743 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000744
745 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000746 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000747
748 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000749 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
750 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000751
752 /* Ethernet queues */
753 for_each_port(adap, j) {
754 struct net_device *d = adap->port[j];
755 const struct port_info *pi = netdev_priv(d);
756
Dimitris Michailidisba278162010-12-14 21:36:50 +0000757 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000758 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
759 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000760 }
761
762 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000763 for_each_ofldrxq(&adap->sge, i)
764 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000765 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000766
767 for_each_rdmarxq(&adap->sge, i)
768 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000769 adap->port[0]->name, i);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530770
771 for_each_rdmaciq(&adap->sge, i)
772 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
773 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000774}
775
776static int request_msix_queue_irqs(struct adapter *adap)
777{
778 struct sge *s = &adap->sge;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530779 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
780 int msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000781
782 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
783 adap->msix_info[1].desc, &s->fw_evtq);
784 if (err)
785 return err;
786
787 for_each_ethrxq(s, ethqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000788 err = request_irq(adap->msix_info[msi_index].vec,
789 t4_sge_intr_msix, 0,
790 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000791 &s->ethrxq[ethqidx].rspq);
792 if (err)
793 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000794 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000795 }
796 for_each_ofldrxq(s, ofldqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000797 err = request_irq(adap->msix_info[msi_index].vec,
798 t4_sge_intr_msix, 0,
799 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000800 &s->ofldrxq[ofldqidx].rspq);
801 if (err)
802 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000803 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000804 }
805 for_each_rdmarxq(s, rdmaqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000806 err = request_irq(adap->msix_info[msi_index].vec,
807 t4_sge_intr_msix, 0,
808 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000809 &s->rdmarxq[rdmaqidx].rspq);
810 if (err)
811 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000812 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000813 }
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530814 for_each_rdmaciq(s, rdmaciqqidx) {
815 err = request_irq(adap->msix_info[msi_index].vec,
816 t4_sge_intr_msix, 0,
817 adap->msix_info[msi_index].desc,
818 &s->rdmaciq[rdmaciqqidx].rspq);
819 if (err)
820 goto unwind;
821 msi_index++;
822 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000823 return 0;
824
825unwind:
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530826 while (--rdmaciqqidx >= 0)
827 free_irq(adap->msix_info[--msi_index].vec,
828 &s->rdmaciq[rdmaciqqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000829 while (--rdmaqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000830 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000831 &s->rdmarxq[rdmaqidx].rspq);
832 while (--ofldqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000833 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000834 &s->ofldrxq[ofldqidx].rspq);
835 while (--ethqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000836 free_irq(adap->msix_info[--msi_index].vec,
837 &s->ethrxq[ethqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000838 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
839 return err;
840}
841
842static void free_msix_queue_irqs(struct adapter *adap)
843{
Vipul Pandya404d9e32012-10-08 02:59:43 +0000844 int i, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000845 struct sge *s = &adap->sge;
846
847 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
848 for_each_ethrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000849 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000850 for_each_ofldrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000851 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000852 for_each_rdmarxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000853 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530854 for_each_rdmaciq(s, i)
855 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000856}
857
858/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000859 * write_rss - write the RSS table for a given port
860 * @pi: the port
861 * @queues: array of queue indices for RSS
862 *
863 * Sets up the portion of the HW RSS table for the port's VI to distribute
864 * packets to the Rx queues in @queues.
865 */
866static int write_rss(const struct port_info *pi, const u16 *queues)
867{
868 u16 *rss;
869 int i, err;
870 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
871
872 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
873 if (!rss)
874 return -ENOMEM;
875
876 /* map the queue indices to queue ids */
877 for (i = 0; i < pi->rss_size; i++, queues++)
878 rss[i] = q[*queues].rspq.abs_id;
879
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000880 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
881 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000882 kfree(rss);
883 return err;
884}
885
886/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000887 * setup_rss - configure RSS
888 * @adap: the adapter
889 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000890 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000891 */
892static int setup_rss(struct adapter *adap)
893{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000894 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000895
896 for_each_port(adap, i) {
897 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000898
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000899 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000900 if (err)
901 return err;
902 }
903 return 0;
904}
905
906/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000907 * Return the channel of the ingress queue with the given qid.
908 */
909static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
910{
911 qid -= p->ingr_start;
912 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
913}
914
915/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000916 * Wait until all NAPI handlers are descheduled.
917 */
918static void quiesce_rx(struct adapter *adap)
919{
920 int i;
921
922 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
923 struct sge_rspq *q = adap->sge.ingr_map[i];
924
925 if (q && q->handler)
926 napi_disable(&q->napi);
927 }
928}
929
930/*
931 * Enable NAPI scheduling and interrupt generation for all Rx queues.
932 */
933static void enable_rx(struct adapter *adap)
934{
935 int i;
936
937 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
938 struct sge_rspq *q = adap->sge.ingr_map[i];
939
940 if (!q)
941 continue;
942 if (q->handler)
943 napi_enable(&q->napi);
944 /* 0-increment GTS to start the timer and enable interrupts */
Hariprasad Shenaif612b812015-01-05 16:30:43 +0530945 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
946 SEINTARM_V(q->intr_params) |
947 INGRESSQID_V(q->cntxt_id));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000948 }
949}
950
951/**
952 * setup_sge_queues - configure SGE Tx/Rx/response queues
953 * @adap: the adapter
954 *
955 * Determines how many sets of SGE queues to use and initializes them.
956 * We support multiple queue sets per port if we have MSI-X, otherwise
957 * just one queue set per port.
958 */
959static int setup_sge_queues(struct adapter *adap)
960{
961 int err, msi_idx, i, j;
962 struct sge *s = &adap->sge;
963
964 bitmap_zero(s->starving_fl, MAX_EGRQ);
965 bitmap_zero(s->txq_maperr, MAX_EGRQ);
966
967 if (adap->flags & USING_MSIX)
968 msi_idx = 1; /* vector 0 is for non-queue interrupts */
969 else {
970 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
971 NULL, NULL);
972 if (err)
973 return err;
974 msi_idx = -((int)s->intrq.abs_id + 1);
975 }
976
977 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
978 msi_idx, NULL, fwevtq_handler);
979 if (err) {
980freeout: t4_free_sge_resources(adap);
981 return err;
982 }
983
984 for_each_port(adap, i) {
985 struct net_device *dev = adap->port[i];
986 struct port_info *pi = netdev_priv(dev);
987 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
988 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
989
990 for (j = 0; j < pi->nqsets; j++, q++) {
991 if (msi_idx > 0)
992 msi_idx++;
993 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
994 msi_idx, &q->fl,
995 t4_ethrx_handler);
996 if (err)
997 goto freeout;
998 q->rspq.idx = j;
999 memset(&q->stats, 0, sizeof(q->stats));
1000 }
1001 for (j = 0; j < pi->nqsets; j++, t++) {
1002 err = t4_sge_alloc_eth_txq(adap, t, dev,
1003 netdev_get_tx_queue(dev, j),
1004 s->fw_evtq.cntxt_id);
1005 if (err)
1006 goto freeout;
1007 }
1008 }
1009
1010 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1011 for_each_ofldrxq(s, i) {
1012 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1013 struct net_device *dev = adap->port[i / j];
1014
1015 if (msi_idx > 0)
1016 msi_idx++;
1017 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301018 q->fl.size ? &q->fl : NULL,
1019 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001020 if (err)
1021 goto freeout;
1022 memset(&q->stats, 0, sizeof(q->stats));
1023 s->ofld_rxq[i] = q->rspq.abs_id;
1024 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1025 s->fw_evtq.cntxt_id);
1026 if (err)
1027 goto freeout;
1028 }
1029
1030 for_each_rdmarxq(s, i) {
1031 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1032
1033 if (msi_idx > 0)
1034 msi_idx++;
1035 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301036 msi_idx, q->fl.size ? &q->fl : NULL,
1037 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001038 if (err)
1039 goto freeout;
1040 memset(&q->stats, 0, sizeof(q->stats));
1041 s->rdma_rxq[i] = q->rspq.abs_id;
1042 }
1043
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301044 for_each_rdmaciq(s, i) {
1045 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1046
1047 if (msi_idx > 0)
1048 msi_idx++;
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1050 msi_idx, q->fl.size ? &q->fl : NULL,
1051 uldrx_handler);
1052 if (err)
1053 goto freeout;
1054 memset(&q->stats, 0, sizeof(q->stats));
1055 s->rdma_ciq[i] = q->rspq.abs_id;
1056 }
1057
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001058 for_each_port(adap, i) {
1059 /*
1060 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1061 * have RDMA queues, and that's the right value.
1062 */
1063 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1064 s->fw_evtq.cntxt_id,
1065 s->rdmarxq[i].rspq.cntxt_id);
1066 if (err)
1067 goto freeout;
1068 }
1069
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301070 t4_write_reg(adap, is_t4(adap->params.chip) ?
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05301071 MPS_TRC_RSS_CONTROL_A :
1072 MPS_T5_TRC_RSS_CONTROL_A,
1073 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1074 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001075 return 0;
1076}
1077
1078/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001079 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1080 * The allocated memory is cleared.
1081 */
1082void *t4_alloc_mem(size_t size)
1083{
Joe Perches8be04b92013-06-19 12:15:53 -07001084 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001085
1086 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001087 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001088 return p;
1089}
1090
1091/*
1092 * Free memory allocated through alloc_mem().
1093 */
Hariprasad Shenaifd88b312014-11-07 09:35:23 +05301094void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001095{
1096 if (is_vmalloc_addr(addr))
1097 vfree(addr);
1098 else
1099 kfree(addr);
1100}
1101
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001102/* Send a Work Request to write the filter at a specified index. We construct
1103 * a Firmware Filter Work Request to have the work done and put the indicated
1104 * filter into "pending" mode which will prevent any further actions against
1105 * it till we get a reply from the firmware on the completion status of the
1106 * request.
1107 */
1108static int set_filter_wr(struct adapter *adapter, int fidx)
1109{
1110 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1111 struct sk_buff *skb;
1112 struct fw_filter_wr *fwr;
1113 unsigned int ftid;
1114
1115 /* If the new filter requires loopback Destination MAC and/or VLAN
1116 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1117 * the filter.
1118 */
1119 if (f->fs.newdmac || f->fs.newvlan) {
1120 /* allocate L2T entry for new filter */
1121 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1122 if (f->l2t == NULL)
1123 return -EAGAIN;
1124 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1125 f->fs.eport, f->fs.dmac)) {
1126 cxgb4_l2t_release(f->l2t);
1127 f->l2t = NULL;
1128 return -ENOMEM;
1129 }
1130 }
1131
1132 ftid = adapter->tids.ftid_base + fidx;
1133
1134 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1135 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1136 memset(fwr, 0, sizeof(*fwr));
1137
1138 /* It would be nice to put most of the following in t4_hw.c but most
1139 * of the work is translating the cxgbtool ch_filter_specification
1140 * into the Work Request and the definition of that structure is
1141 * currently in cxgbtool.h which isn't appropriate to pull into the
1142 * common code. We may eventually try to come up with a more neutral
1143 * filter specification structure but for now it's easiest to simply
1144 * put this fairly direct code in line ...
1145 */
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301146 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1147 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001148 fwr->tid_to_iq =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301149 htonl(FW_FILTER_WR_TID_V(ftid) |
1150 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1151 FW_FILTER_WR_NOREPLY_V(0) |
1152 FW_FILTER_WR_IQ_V(f->fs.iq));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001153 fwr->del_filter_to_l2tix =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301154 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1155 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1156 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1157 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1158 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1159 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1160 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1161 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1162 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001163 f->fs.newvlan == VLAN_REWRITE) |
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301164 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001165 f->fs.newvlan == VLAN_REWRITE) |
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301166 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1167 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1168 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1169 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001170 fwr->ethtype = htons(f->fs.val.ethtype);
1171 fwr->ethtypem = htons(f->fs.mask.ethtype);
1172 fwr->frag_to_ovlan_vldm =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301173 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1174 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1175 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1176 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1177 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1178 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001179 fwr->smac_sel = 0;
1180 fwr->rx_chan_rx_rpl_iq =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301181 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1182 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001183 fwr->maci_to_matchtypem =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301184 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1185 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1186 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1187 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1188 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1189 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1190 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1191 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001192 fwr->ptcl = f->fs.val.proto;
1193 fwr->ptclm = f->fs.mask.proto;
1194 fwr->ttyp = f->fs.val.tos;
1195 fwr->ttypm = f->fs.mask.tos;
1196 fwr->ivlan = htons(f->fs.val.ivlan);
1197 fwr->ivlanm = htons(f->fs.mask.ivlan);
1198 fwr->ovlan = htons(f->fs.val.ovlan);
1199 fwr->ovlanm = htons(f->fs.mask.ovlan);
1200 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1201 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1202 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1203 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1204 fwr->lp = htons(f->fs.val.lport);
1205 fwr->lpm = htons(f->fs.mask.lport);
1206 fwr->fp = htons(f->fs.val.fport);
1207 fwr->fpm = htons(f->fs.mask.fport);
1208 if (f->fs.newsmac)
1209 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1210
1211 /* Mark the filter as "pending" and ship off the Filter Work Request.
1212 * When we get the Work Request Reply we'll clear the pending status.
1213 */
1214 f->pending = 1;
1215 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1216 t4_ofld_send(adapter, skb);
1217 return 0;
1218}
1219
1220/* Delete the filter at a specified index.
1221 */
1222static int del_filter_wr(struct adapter *adapter, int fidx)
1223{
1224 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1225 struct sk_buff *skb;
1226 struct fw_filter_wr *fwr;
1227 unsigned int len, ftid;
1228
1229 len = sizeof(*fwr);
1230 ftid = adapter->tids.ftid_base + fidx;
1231
1232 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1233 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1234 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1235
1236 /* Mark the filter as "pending" and ship off the Filter Work Request.
1237 * When we get the Work Request Reply we'll clear the pending status.
1238 */
1239 f->pending = 1;
1240 t4_mgmt_tx(adapter, skb);
1241 return 0;
1242}
1243
Anish Bhatt688848b2014-06-19 21:37:13 -07001244static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1245 void *accel_priv, select_queue_fallback_t fallback)
1246{
1247 int txq;
1248
1249#ifdef CONFIG_CHELSIO_T4_DCB
1250 /* If a Data Center Bridging has been successfully negotiated on this
1251 * link then we'll use the skb's priority to map it to a TX Queue.
1252 * The skb's priority is determined via the VLAN Tag Priority Code
1253 * Point field.
1254 */
1255 if (cxgb4_dcb_enabled(dev)) {
1256 u16 vlan_tci;
1257 int err;
1258
1259 err = vlan_get_tag(skb, &vlan_tci);
1260 if (unlikely(err)) {
1261 if (net_ratelimit())
1262 netdev_warn(dev,
1263 "TX Packet without VLAN Tag on DCB Link\n");
1264 txq = 0;
1265 } else {
1266 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1267 }
1268 return txq;
1269 }
1270#endif /* CONFIG_CHELSIO_T4_DCB */
1271
1272 if (select_queue) {
1273 txq = (skb_rx_queue_recorded(skb)
1274 ? skb_get_rx_queue(skb)
1275 : smp_processor_id());
1276
1277 while (unlikely(txq >= dev->real_num_tx_queues))
1278 txq -= dev->real_num_tx_queues;
1279
1280 return txq;
1281 }
1282
1283 return fallback(dev, skb) % dev->real_num_tx_queues;
1284}
1285
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001286static inline int is_offload(const struct adapter *adap)
1287{
1288 return adap->params.offload;
1289}
1290
1291/*
1292 * Implementation of ethtool operations.
1293 */
1294
1295static u32 get_msglevel(struct net_device *dev)
1296{
1297 return netdev2adap(dev)->msg_enable;
1298}
1299
1300static void set_msglevel(struct net_device *dev, u32 val)
1301{
1302 netdev2adap(dev)->msg_enable = val;
1303}
1304
1305static char stats_strings[][ETH_GSTRING_LEN] = {
1306 "TxOctetsOK ",
1307 "TxFramesOK ",
1308 "TxBroadcastFrames ",
1309 "TxMulticastFrames ",
1310 "TxUnicastFrames ",
1311 "TxErrorFrames ",
1312
1313 "TxFrames64 ",
1314 "TxFrames65To127 ",
1315 "TxFrames128To255 ",
1316 "TxFrames256To511 ",
1317 "TxFrames512To1023 ",
1318 "TxFrames1024To1518 ",
1319 "TxFrames1519ToMax ",
1320
1321 "TxFramesDropped ",
1322 "TxPauseFrames ",
1323 "TxPPP0Frames ",
1324 "TxPPP1Frames ",
1325 "TxPPP2Frames ",
1326 "TxPPP3Frames ",
1327 "TxPPP4Frames ",
1328 "TxPPP5Frames ",
1329 "TxPPP6Frames ",
1330 "TxPPP7Frames ",
1331
1332 "RxOctetsOK ",
1333 "RxFramesOK ",
1334 "RxBroadcastFrames ",
1335 "RxMulticastFrames ",
1336 "RxUnicastFrames ",
1337
1338 "RxFramesTooLong ",
1339 "RxJabberErrors ",
1340 "RxFCSErrors ",
1341 "RxLengthErrors ",
1342 "RxSymbolErrors ",
1343 "RxRuntFrames ",
1344
1345 "RxFrames64 ",
1346 "RxFrames65To127 ",
1347 "RxFrames128To255 ",
1348 "RxFrames256To511 ",
1349 "RxFrames512To1023 ",
1350 "RxFrames1024To1518 ",
1351 "RxFrames1519ToMax ",
1352
1353 "RxPauseFrames ",
1354 "RxPPP0Frames ",
1355 "RxPPP1Frames ",
1356 "RxPPP2Frames ",
1357 "RxPPP3Frames ",
1358 "RxPPP4Frames ",
1359 "RxPPP5Frames ",
1360 "RxPPP6Frames ",
1361 "RxPPP7Frames ",
1362
1363 "RxBG0FramesDropped ",
1364 "RxBG1FramesDropped ",
1365 "RxBG2FramesDropped ",
1366 "RxBG3FramesDropped ",
1367 "RxBG0FramesTrunc ",
1368 "RxBG1FramesTrunc ",
1369 "RxBG2FramesTrunc ",
1370 "RxBG3FramesTrunc ",
1371
1372 "TSO ",
1373 "TxCsumOffload ",
1374 "RxCsumGood ",
1375 "VLANextractions ",
1376 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001377 "GROpackets ",
1378 "GROmerged ",
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001379 "WriteCoalSuccess ",
1380 "WriteCoalFail ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001381};
1382
1383static int get_sset_count(struct net_device *dev, int sset)
1384{
1385 switch (sset) {
1386 case ETH_SS_STATS:
1387 return ARRAY_SIZE(stats_strings);
1388 default:
1389 return -EOPNOTSUPP;
1390 }
1391}
1392
1393#define T4_REGMAP_SIZE (160 * 1024)
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001394#define T5_REGMAP_SIZE (332 * 1024)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001395
1396static int get_regs_len(struct net_device *dev)
1397{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001398 struct adapter *adap = netdev2adap(dev);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301399 if (is_t4(adap->params.chip))
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001400 return T4_REGMAP_SIZE;
1401 else
1402 return T5_REGMAP_SIZE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001403}
1404
1405static int get_eeprom_len(struct net_device *dev)
1406{
1407 return EEPROMSIZE;
1408}
1409
1410static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1411{
1412 struct adapter *adapter = netdev2adap(dev);
1413
Rick Jones23020ab2011-11-09 09:58:07 +00001414 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1415 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1416 strlcpy(info->bus_info, pci_name(adapter->pdev),
1417 sizeof(info->bus_info));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001418
Rick Jones84b40502011-11-21 10:54:05 +00001419 if (adapter->params.fw_vers)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001420 snprintf(info->fw_version, sizeof(info->fw_version),
1421 "%u.%u.%u.%u, TP %u.%u.%u.%u",
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05301422 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
1423 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
1424 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
1425 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
1426 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
1427 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1428 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1429 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001430}
1431
1432static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1433{
1434 if (stringset == ETH_SS_STATS)
1435 memcpy(data, stats_strings, sizeof(stats_strings));
1436}
1437
1438/*
1439 * port stats maintained per queue of the port. They should be in the same
1440 * order as in stats_strings above.
1441 */
1442struct queue_port_stats {
1443 u64 tso;
1444 u64 tx_csum;
1445 u64 rx_csum;
1446 u64 vlan_ex;
1447 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001448 u64 gro_pkts;
1449 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001450};
1451
1452static void collect_sge_port_stats(const struct adapter *adap,
1453 const struct port_info *p, struct queue_port_stats *s)
1454{
1455 int i;
1456 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1457 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1458
1459 memset(s, 0, sizeof(*s));
1460 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1461 s->tso += tx->tso;
1462 s->tx_csum += tx->tx_cso;
1463 s->rx_csum += rx->stats.rx_cso;
1464 s->vlan_ex += rx->stats.vlan_ex;
1465 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001466 s->gro_pkts += rx->stats.lro_pkts;
1467 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001468 }
1469}
1470
1471static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1472 u64 *data)
1473{
1474 struct port_info *pi = netdev_priv(dev);
1475 struct adapter *adapter = pi->adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001476 u32 val1, val2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001477
1478 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1479
1480 data += sizeof(struct port_stats) / sizeof(u64);
1481 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001482 data += sizeof(struct queue_port_stats) / sizeof(u64);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301483 if (!is_t4(adapter->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05301484 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
1485 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
1486 val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001487 *data = val1 - val2;
1488 data++;
1489 *data = val2;
1490 data++;
1491 } else {
1492 memset(data, 0, 2 * sizeof(u64));
1493 *data += 2;
1494 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001495}
1496
1497/*
1498 * Return a version number to identify the type of adapter. The scheme is:
1499 * - bits 0..9: chip version
1500 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001501 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001502 */
1503static inline unsigned int mk_adap_vers(const struct adapter *ap)
1504{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301505 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1506 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001507}
1508
1509static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1510 unsigned int end)
1511{
1512 u32 *p = buf + start;
1513
1514 for ( ; start <= end; start += sizeof(u32))
1515 *p++ = t4_read_reg(ap, start);
1516}
1517
1518static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1519 void *buf)
1520{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001521 static const unsigned int t4_reg_ranges[] = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001522 0x1008, 0x1108,
1523 0x1180, 0x11b4,
1524 0x11fc, 0x123c,
1525 0x1300, 0x173c,
1526 0x1800, 0x18fc,
1527 0x3000, 0x30d8,
1528 0x30e0, 0x5924,
1529 0x5960, 0x59d4,
1530 0x5a00, 0x5af8,
1531 0x6000, 0x6098,
1532 0x6100, 0x6150,
1533 0x6200, 0x6208,
1534 0x6240, 0x6248,
1535 0x6280, 0x6338,
1536 0x6370, 0x638c,
1537 0x6400, 0x643c,
1538 0x6500, 0x6524,
1539 0x6a00, 0x6a38,
1540 0x6a60, 0x6a78,
1541 0x6b00, 0x6b84,
1542 0x6bf0, 0x6c84,
1543 0x6cf0, 0x6d84,
1544 0x6df0, 0x6e84,
1545 0x6ef0, 0x6f84,
1546 0x6ff0, 0x7084,
1547 0x70f0, 0x7184,
1548 0x71f0, 0x7284,
1549 0x72f0, 0x7384,
1550 0x73f0, 0x7450,
1551 0x7500, 0x7530,
1552 0x7600, 0x761c,
1553 0x7680, 0x76cc,
1554 0x7700, 0x7798,
1555 0x77c0, 0x77fc,
1556 0x7900, 0x79fc,
1557 0x7b00, 0x7c38,
1558 0x7d00, 0x7efc,
1559 0x8dc0, 0x8e1c,
1560 0x8e30, 0x8e78,
1561 0x8ea0, 0x8f6c,
1562 0x8fc0, 0x9074,
1563 0x90fc, 0x90fc,
1564 0x9400, 0x9458,
1565 0x9600, 0x96bc,
1566 0x9800, 0x9808,
1567 0x9820, 0x983c,
1568 0x9850, 0x9864,
1569 0x9c00, 0x9c6c,
1570 0x9c80, 0x9cec,
1571 0x9d00, 0x9d6c,
1572 0x9d80, 0x9dec,
1573 0x9e00, 0x9e6c,
1574 0x9e80, 0x9eec,
1575 0x9f00, 0x9f6c,
1576 0x9f80, 0x9fec,
1577 0xd004, 0xd03c,
1578 0xdfc0, 0xdfe0,
1579 0xe000, 0xea7c,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301580 0xf000, 0x11110,
1581 0x11118, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001582 0x19040, 0x1906c,
1583 0x19078, 0x19080,
1584 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001585 0x19150, 0x191b0,
1586 0x191d0, 0x191e8,
1587 0x19238, 0x1924c,
1588 0x193f8, 0x19474,
1589 0x19490, 0x194f8,
1590 0x19800, 0x19f30,
1591 0x1a000, 0x1a06c,
1592 0x1a0b0, 0x1a120,
1593 0x1a128, 0x1a138,
1594 0x1a190, 0x1a1c4,
1595 0x1a1fc, 0x1a1fc,
1596 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001597 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001598 0x1e2c0, 0x1e2c0,
1599 0x1e2e0, 0x1e2e0,
1600 0x1e300, 0x1e384,
1601 0x1e3c0, 0x1e3c8,
1602 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001603 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001604 0x1e6c0, 0x1e6c0,
1605 0x1e6e0, 0x1e6e0,
1606 0x1e700, 0x1e784,
1607 0x1e7c0, 0x1e7c8,
1608 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001609 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001610 0x1eac0, 0x1eac0,
1611 0x1eae0, 0x1eae0,
1612 0x1eb00, 0x1eb84,
1613 0x1ebc0, 0x1ebc8,
1614 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001615 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001616 0x1eec0, 0x1eec0,
1617 0x1eee0, 0x1eee0,
1618 0x1ef00, 0x1ef84,
1619 0x1efc0, 0x1efc8,
1620 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001621 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001622 0x1f2c0, 0x1f2c0,
1623 0x1f2e0, 0x1f2e0,
1624 0x1f300, 0x1f384,
1625 0x1f3c0, 0x1f3c8,
1626 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001627 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001628 0x1f6c0, 0x1f6c0,
1629 0x1f6e0, 0x1f6e0,
1630 0x1f700, 0x1f784,
1631 0x1f7c0, 0x1f7c8,
1632 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001633 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001634 0x1fac0, 0x1fac0,
1635 0x1fae0, 0x1fae0,
1636 0x1fb00, 0x1fb84,
1637 0x1fbc0, 0x1fbc8,
1638 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001639 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001640 0x1fec0, 0x1fec0,
1641 0x1fee0, 0x1fee0,
1642 0x1ff00, 0x1ff84,
1643 0x1ffc0, 0x1ffc8,
1644 0x20000, 0x2002c,
1645 0x20100, 0x2013c,
1646 0x20190, 0x201c8,
1647 0x20200, 0x20318,
1648 0x20400, 0x20528,
1649 0x20540, 0x20614,
1650 0x21000, 0x21040,
1651 0x2104c, 0x21060,
1652 0x210c0, 0x210ec,
1653 0x21200, 0x21268,
1654 0x21270, 0x21284,
1655 0x212fc, 0x21388,
1656 0x21400, 0x21404,
1657 0x21500, 0x21518,
1658 0x2152c, 0x2153c,
1659 0x21550, 0x21554,
1660 0x21600, 0x21600,
1661 0x21608, 0x21628,
1662 0x21630, 0x2163c,
1663 0x21700, 0x2171c,
1664 0x21780, 0x2178c,
1665 0x21800, 0x21c38,
1666 0x21c80, 0x21d7c,
1667 0x21e00, 0x21e04,
1668 0x22000, 0x2202c,
1669 0x22100, 0x2213c,
1670 0x22190, 0x221c8,
1671 0x22200, 0x22318,
1672 0x22400, 0x22528,
1673 0x22540, 0x22614,
1674 0x23000, 0x23040,
1675 0x2304c, 0x23060,
1676 0x230c0, 0x230ec,
1677 0x23200, 0x23268,
1678 0x23270, 0x23284,
1679 0x232fc, 0x23388,
1680 0x23400, 0x23404,
1681 0x23500, 0x23518,
1682 0x2352c, 0x2353c,
1683 0x23550, 0x23554,
1684 0x23600, 0x23600,
1685 0x23608, 0x23628,
1686 0x23630, 0x2363c,
1687 0x23700, 0x2371c,
1688 0x23780, 0x2378c,
1689 0x23800, 0x23c38,
1690 0x23c80, 0x23d7c,
1691 0x23e00, 0x23e04,
1692 0x24000, 0x2402c,
1693 0x24100, 0x2413c,
1694 0x24190, 0x241c8,
1695 0x24200, 0x24318,
1696 0x24400, 0x24528,
1697 0x24540, 0x24614,
1698 0x25000, 0x25040,
1699 0x2504c, 0x25060,
1700 0x250c0, 0x250ec,
1701 0x25200, 0x25268,
1702 0x25270, 0x25284,
1703 0x252fc, 0x25388,
1704 0x25400, 0x25404,
1705 0x25500, 0x25518,
1706 0x2552c, 0x2553c,
1707 0x25550, 0x25554,
1708 0x25600, 0x25600,
1709 0x25608, 0x25628,
1710 0x25630, 0x2563c,
1711 0x25700, 0x2571c,
1712 0x25780, 0x2578c,
1713 0x25800, 0x25c38,
1714 0x25c80, 0x25d7c,
1715 0x25e00, 0x25e04,
1716 0x26000, 0x2602c,
1717 0x26100, 0x2613c,
1718 0x26190, 0x261c8,
1719 0x26200, 0x26318,
1720 0x26400, 0x26528,
1721 0x26540, 0x26614,
1722 0x27000, 0x27040,
1723 0x2704c, 0x27060,
1724 0x270c0, 0x270ec,
1725 0x27200, 0x27268,
1726 0x27270, 0x27284,
1727 0x272fc, 0x27388,
1728 0x27400, 0x27404,
1729 0x27500, 0x27518,
1730 0x2752c, 0x2753c,
1731 0x27550, 0x27554,
1732 0x27600, 0x27600,
1733 0x27608, 0x27628,
1734 0x27630, 0x2763c,
1735 0x27700, 0x2771c,
1736 0x27780, 0x2778c,
1737 0x27800, 0x27c38,
1738 0x27c80, 0x27d7c,
1739 0x27e00, 0x27e04
1740 };
1741
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001742 static const unsigned int t5_reg_ranges[] = {
1743 0x1008, 0x1148,
1744 0x1180, 0x11b4,
1745 0x11fc, 0x123c,
1746 0x1280, 0x173c,
1747 0x1800, 0x18fc,
1748 0x3000, 0x3028,
1749 0x3060, 0x30d8,
1750 0x30e0, 0x30fc,
1751 0x3140, 0x357c,
1752 0x35a8, 0x35cc,
1753 0x35ec, 0x35ec,
1754 0x3600, 0x5624,
1755 0x56cc, 0x575c,
1756 0x580c, 0x5814,
1757 0x5890, 0x58bc,
1758 0x5940, 0x59dc,
1759 0x59fc, 0x5a18,
1760 0x5a60, 0x5a9c,
1761 0x5b9c, 0x5bfc,
1762 0x6000, 0x6040,
1763 0x6058, 0x614c,
1764 0x7700, 0x7798,
1765 0x77c0, 0x78fc,
1766 0x7b00, 0x7c54,
1767 0x7d00, 0x7efc,
1768 0x8dc0, 0x8de0,
1769 0x8df8, 0x8e84,
1770 0x8ea0, 0x8f84,
1771 0x8fc0, 0x90f8,
1772 0x9400, 0x9470,
1773 0x9600, 0x96f4,
1774 0x9800, 0x9808,
1775 0x9820, 0x983c,
1776 0x9850, 0x9864,
1777 0x9c00, 0x9c6c,
1778 0x9c80, 0x9cec,
1779 0x9d00, 0x9d6c,
1780 0x9d80, 0x9dec,
1781 0x9e00, 0x9e6c,
1782 0x9e80, 0x9eec,
1783 0x9f00, 0x9f6c,
1784 0x9f80, 0xa020,
1785 0xd004, 0xd03c,
1786 0xdfc0, 0xdfe0,
1787 0xe000, 0x11088,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301788 0x1109c, 0x11110,
1789 0x11118, 0x1117c,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001790 0x11190, 0x11204,
1791 0x19040, 0x1906c,
1792 0x19078, 0x19080,
1793 0x1908c, 0x19124,
1794 0x19150, 0x191b0,
1795 0x191d0, 0x191e8,
1796 0x19238, 0x19290,
1797 0x193f8, 0x19474,
1798 0x19490, 0x194cc,
1799 0x194f0, 0x194f8,
1800 0x19c00, 0x19c60,
1801 0x19c94, 0x19e10,
1802 0x19e50, 0x19f34,
1803 0x19f40, 0x19f50,
1804 0x19f90, 0x19fe4,
1805 0x1a000, 0x1a06c,
1806 0x1a0b0, 0x1a120,
1807 0x1a128, 0x1a138,
1808 0x1a190, 0x1a1c4,
1809 0x1a1fc, 0x1a1fc,
1810 0x1e008, 0x1e00c,
1811 0x1e040, 0x1e04c,
1812 0x1e284, 0x1e290,
1813 0x1e2c0, 0x1e2c0,
1814 0x1e2e0, 0x1e2e0,
1815 0x1e300, 0x1e384,
1816 0x1e3c0, 0x1e3c8,
1817 0x1e408, 0x1e40c,
1818 0x1e440, 0x1e44c,
1819 0x1e684, 0x1e690,
1820 0x1e6c0, 0x1e6c0,
1821 0x1e6e0, 0x1e6e0,
1822 0x1e700, 0x1e784,
1823 0x1e7c0, 0x1e7c8,
1824 0x1e808, 0x1e80c,
1825 0x1e840, 0x1e84c,
1826 0x1ea84, 0x1ea90,
1827 0x1eac0, 0x1eac0,
1828 0x1eae0, 0x1eae0,
1829 0x1eb00, 0x1eb84,
1830 0x1ebc0, 0x1ebc8,
1831 0x1ec08, 0x1ec0c,
1832 0x1ec40, 0x1ec4c,
1833 0x1ee84, 0x1ee90,
1834 0x1eec0, 0x1eec0,
1835 0x1eee0, 0x1eee0,
1836 0x1ef00, 0x1ef84,
1837 0x1efc0, 0x1efc8,
1838 0x1f008, 0x1f00c,
1839 0x1f040, 0x1f04c,
1840 0x1f284, 0x1f290,
1841 0x1f2c0, 0x1f2c0,
1842 0x1f2e0, 0x1f2e0,
1843 0x1f300, 0x1f384,
1844 0x1f3c0, 0x1f3c8,
1845 0x1f408, 0x1f40c,
1846 0x1f440, 0x1f44c,
1847 0x1f684, 0x1f690,
1848 0x1f6c0, 0x1f6c0,
1849 0x1f6e0, 0x1f6e0,
1850 0x1f700, 0x1f784,
1851 0x1f7c0, 0x1f7c8,
1852 0x1f808, 0x1f80c,
1853 0x1f840, 0x1f84c,
1854 0x1fa84, 0x1fa90,
1855 0x1fac0, 0x1fac0,
1856 0x1fae0, 0x1fae0,
1857 0x1fb00, 0x1fb84,
1858 0x1fbc0, 0x1fbc8,
1859 0x1fc08, 0x1fc0c,
1860 0x1fc40, 0x1fc4c,
1861 0x1fe84, 0x1fe90,
1862 0x1fec0, 0x1fec0,
1863 0x1fee0, 0x1fee0,
1864 0x1ff00, 0x1ff84,
1865 0x1ffc0, 0x1ffc8,
1866 0x30000, 0x30030,
1867 0x30100, 0x30144,
1868 0x30190, 0x301d0,
1869 0x30200, 0x30318,
1870 0x30400, 0x3052c,
1871 0x30540, 0x3061c,
1872 0x30800, 0x30834,
1873 0x308c0, 0x30908,
1874 0x30910, 0x309ac,
1875 0x30a00, 0x30a04,
1876 0x30a0c, 0x30a2c,
1877 0x30a44, 0x30a50,
1878 0x30a74, 0x30c24,
1879 0x30d08, 0x30d14,
1880 0x30d1c, 0x30d20,
1881 0x30d3c, 0x30d50,
1882 0x31200, 0x3120c,
1883 0x31220, 0x31220,
1884 0x31240, 0x31240,
1885 0x31600, 0x31600,
1886 0x31608, 0x3160c,
1887 0x31a00, 0x31a1c,
1888 0x31e04, 0x31e20,
1889 0x31e38, 0x31e3c,
1890 0x31e80, 0x31e80,
1891 0x31e88, 0x31ea8,
1892 0x31eb0, 0x31eb4,
1893 0x31ec8, 0x31ed4,
1894 0x31fb8, 0x32004,
1895 0x32208, 0x3223c,
1896 0x32600, 0x32630,
1897 0x32a00, 0x32abc,
1898 0x32b00, 0x32b70,
1899 0x33000, 0x33048,
1900 0x33060, 0x3309c,
1901 0x330f0, 0x33148,
1902 0x33160, 0x3319c,
1903 0x331f0, 0x332e4,
1904 0x332f8, 0x333e4,
1905 0x333f8, 0x33448,
1906 0x33460, 0x3349c,
1907 0x334f0, 0x33548,
1908 0x33560, 0x3359c,
1909 0x335f0, 0x336e4,
1910 0x336f8, 0x337e4,
1911 0x337f8, 0x337fc,
1912 0x33814, 0x33814,
1913 0x3382c, 0x3382c,
1914 0x33880, 0x3388c,
1915 0x338e8, 0x338ec,
1916 0x33900, 0x33948,
1917 0x33960, 0x3399c,
1918 0x339f0, 0x33ae4,
1919 0x33af8, 0x33b10,
1920 0x33b28, 0x33b28,
1921 0x33b3c, 0x33b50,
1922 0x33bf0, 0x33c10,
1923 0x33c28, 0x33c28,
1924 0x33c3c, 0x33c50,
1925 0x33cf0, 0x33cfc,
1926 0x34000, 0x34030,
1927 0x34100, 0x34144,
1928 0x34190, 0x341d0,
1929 0x34200, 0x34318,
1930 0x34400, 0x3452c,
1931 0x34540, 0x3461c,
1932 0x34800, 0x34834,
1933 0x348c0, 0x34908,
1934 0x34910, 0x349ac,
1935 0x34a00, 0x34a04,
1936 0x34a0c, 0x34a2c,
1937 0x34a44, 0x34a50,
1938 0x34a74, 0x34c24,
1939 0x34d08, 0x34d14,
1940 0x34d1c, 0x34d20,
1941 0x34d3c, 0x34d50,
1942 0x35200, 0x3520c,
1943 0x35220, 0x35220,
1944 0x35240, 0x35240,
1945 0x35600, 0x35600,
1946 0x35608, 0x3560c,
1947 0x35a00, 0x35a1c,
1948 0x35e04, 0x35e20,
1949 0x35e38, 0x35e3c,
1950 0x35e80, 0x35e80,
1951 0x35e88, 0x35ea8,
1952 0x35eb0, 0x35eb4,
1953 0x35ec8, 0x35ed4,
1954 0x35fb8, 0x36004,
1955 0x36208, 0x3623c,
1956 0x36600, 0x36630,
1957 0x36a00, 0x36abc,
1958 0x36b00, 0x36b70,
1959 0x37000, 0x37048,
1960 0x37060, 0x3709c,
1961 0x370f0, 0x37148,
1962 0x37160, 0x3719c,
1963 0x371f0, 0x372e4,
1964 0x372f8, 0x373e4,
1965 0x373f8, 0x37448,
1966 0x37460, 0x3749c,
1967 0x374f0, 0x37548,
1968 0x37560, 0x3759c,
1969 0x375f0, 0x376e4,
1970 0x376f8, 0x377e4,
1971 0x377f8, 0x377fc,
1972 0x37814, 0x37814,
1973 0x3782c, 0x3782c,
1974 0x37880, 0x3788c,
1975 0x378e8, 0x378ec,
1976 0x37900, 0x37948,
1977 0x37960, 0x3799c,
1978 0x379f0, 0x37ae4,
1979 0x37af8, 0x37b10,
1980 0x37b28, 0x37b28,
1981 0x37b3c, 0x37b50,
1982 0x37bf0, 0x37c10,
1983 0x37c28, 0x37c28,
1984 0x37c3c, 0x37c50,
1985 0x37cf0, 0x37cfc,
1986 0x38000, 0x38030,
1987 0x38100, 0x38144,
1988 0x38190, 0x381d0,
1989 0x38200, 0x38318,
1990 0x38400, 0x3852c,
1991 0x38540, 0x3861c,
1992 0x38800, 0x38834,
1993 0x388c0, 0x38908,
1994 0x38910, 0x389ac,
1995 0x38a00, 0x38a04,
1996 0x38a0c, 0x38a2c,
1997 0x38a44, 0x38a50,
1998 0x38a74, 0x38c24,
1999 0x38d08, 0x38d14,
2000 0x38d1c, 0x38d20,
2001 0x38d3c, 0x38d50,
2002 0x39200, 0x3920c,
2003 0x39220, 0x39220,
2004 0x39240, 0x39240,
2005 0x39600, 0x39600,
2006 0x39608, 0x3960c,
2007 0x39a00, 0x39a1c,
2008 0x39e04, 0x39e20,
2009 0x39e38, 0x39e3c,
2010 0x39e80, 0x39e80,
2011 0x39e88, 0x39ea8,
2012 0x39eb0, 0x39eb4,
2013 0x39ec8, 0x39ed4,
2014 0x39fb8, 0x3a004,
2015 0x3a208, 0x3a23c,
2016 0x3a600, 0x3a630,
2017 0x3aa00, 0x3aabc,
2018 0x3ab00, 0x3ab70,
2019 0x3b000, 0x3b048,
2020 0x3b060, 0x3b09c,
2021 0x3b0f0, 0x3b148,
2022 0x3b160, 0x3b19c,
2023 0x3b1f0, 0x3b2e4,
2024 0x3b2f8, 0x3b3e4,
2025 0x3b3f8, 0x3b448,
2026 0x3b460, 0x3b49c,
2027 0x3b4f0, 0x3b548,
2028 0x3b560, 0x3b59c,
2029 0x3b5f0, 0x3b6e4,
2030 0x3b6f8, 0x3b7e4,
2031 0x3b7f8, 0x3b7fc,
2032 0x3b814, 0x3b814,
2033 0x3b82c, 0x3b82c,
2034 0x3b880, 0x3b88c,
2035 0x3b8e8, 0x3b8ec,
2036 0x3b900, 0x3b948,
2037 0x3b960, 0x3b99c,
2038 0x3b9f0, 0x3bae4,
2039 0x3baf8, 0x3bb10,
2040 0x3bb28, 0x3bb28,
2041 0x3bb3c, 0x3bb50,
2042 0x3bbf0, 0x3bc10,
2043 0x3bc28, 0x3bc28,
2044 0x3bc3c, 0x3bc50,
2045 0x3bcf0, 0x3bcfc,
2046 0x3c000, 0x3c030,
2047 0x3c100, 0x3c144,
2048 0x3c190, 0x3c1d0,
2049 0x3c200, 0x3c318,
2050 0x3c400, 0x3c52c,
2051 0x3c540, 0x3c61c,
2052 0x3c800, 0x3c834,
2053 0x3c8c0, 0x3c908,
2054 0x3c910, 0x3c9ac,
2055 0x3ca00, 0x3ca04,
2056 0x3ca0c, 0x3ca2c,
2057 0x3ca44, 0x3ca50,
2058 0x3ca74, 0x3cc24,
2059 0x3cd08, 0x3cd14,
2060 0x3cd1c, 0x3cd20,
2061 0x3cd3c, 0x3cd50,
2062 0x3d200, 0x3d20c,
2063 0x3d220, 0x3d220,
2064 0x3d240, 0x3d240,
2065 0x3d600, 0x3d600,
2066 0x3d608, 0x3d60c,
2067 0x3da00, 0x3da1c,
2068 0x3de04, 0x3de20,
2069 0x3de38, 0x3de3c,
2070 0x3de80, 0x3de80,
2071 0x3de88, 0x3dea8,
2072 0x3deb0, 0x3deb4,
2073 0x3dec8, 0x3ded4,
2074 0x3dfb8, 0x3e004,
2075 0x3e208, 0x3e23c,
2076 0x3e600, 0x3e630,
2077 0x3ea00, 0x3eabc,
2078 0x3eb00, 0x3eb70,
2079 0x3f000, 0x3f048,
2080 0x3f060, 0x3f09c,
2081 0x3f0f0, 0x3f148,
2082 0x3f160, 0x3f19c,
2083 0x3f1f0, 0x3f2e4,
2084 0x3f2f8, 0x3f3e4,
2085 0x3f3f8, 0x3f448,
2086 0x3f460, 0x3f49c,
2087 0x3f4f0, 0x3f548,
2088 0x3f560, 0x3f59c,
2089 0x3f5f0, 0x3f6e4,
2090 0x3f6f8, 0x3f7e4,
2091 0x3f7f8, 0x3f7fc,
2092 0x3f814, 0x3f814,
2093 0x3f82c, 0x3f82c,
2094 0x3f880, 0x3f88c,
2095 0x3f8e8, 0x3f8ec,
2096 0x3f900, 0x3f948,
2097 0x3f960, 0x3f99c,
2098 0x3f9f0, 0x3fae4,
2099 0x3faf8, 0x3fb10,
2100 0x3fb28, 0x3fb28,
2101 0x3fb3c, 0x3fb50,
2102 0x3fbf0, 0x3fc10,
2103 0x3fc28, 0x3fc28,
2104 0x3fc3c, 0x3fc50,
2105 0x3fcf0, 0x3fcfc,
2106 0x40000, 0x4000c,
2107 0x40040, 0x40068,
2108 0x40080, 0x40144,
2109 0x40180, 0x4018c,
2110 0x40200, 0x40298,
2111 0x402ac, 0x4033c,
2112 0x403f8, 0x403fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302113 0x41304, 0x413c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002114 0x41400, 0x4141c,
2115 0x41480, 0x414d0,
2116 0x44000, 0x44078,
2117 0x440c0, 0x44278,
2118 0x442c0, 0x44478,
2119 0x444c0, 0x44678,
2120 0x446c0, 0x44878,
2121 0x448c0, 0x449fc,
2122 0x45000, 0x45068,
2123 0x45080, 0x45084,
2124 0x450a0, 0x450b0,
2125 0x45200, 0x45268,
2126 0x45280, 0x45284,
2127 0x452a0, 0x452b0,
2128 0x460c0, 0x460e4,
2129 0x47000, 0x4708c,
2130 0x47200, 0x47250,
2131 0x47400, 0x47420,
2132 0x47600, 0x47618,
2133 0x47800, 0x47814,
2134 0x48000, 0x4800c,
2135 0x48040, 0x48068,
2136 0x48080, 0x48144,
2137 0x48180, 0x4818c,
2138 0x48200, 0x48298,
2139 0x482ac, 0x4833c,
2140 0x483f8, 0x483fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302141 0x49304, 0x493c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002142 0x49400, 0x4941c,
2143 0x49480, 0x494d0,
2144 0x4c000, 0x4c078,
2145 0x4c0c0, 0x4c278,
2146 0x4c2c0, 0x4c478,
2147 0x4c4c0, 0x4c678,
2148 0x4c6c0, 0x4c878,
2149 0x4c8c0, 0x4c9fc,
2150 0x4d000, 0x4d068,
2151 0x4d080, 0x4d084,
2152 0x4d0a0, 0x4d0b0,
2153 0x4d200, 0x4d268,
2154 0x4d280, 0x4d284,
2155 0x4d2a0, 0x4d2b0,
2156 0x4e0c0, 0x4e0e4,
2157 0x4f000, 0x4f08c,
2158 0x4f200, 0x4f250,
2159 0x4f400, 0x4f420,
2160 0x4f600, 0x4f618,
2161 0x4f800, 0x4f814,
2162 0x50000, 0x500cc,
2163 0x50400, 0x50400,
2164 0x50800, 0x508cc,
2165 0x50c00, 0x50c00,
2166 0x51000, 0x5101c,
2167 0x51300, 0x51308,
2168 };
2169
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002170 int i;
2171 struct adapter *ap = netdev2adap(dev);
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002172 static const unsigned int *reg_ranges;
2173 int arr_size = 0, buf_size = 0;
2174
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302175 if (is_t4(ap->params.chip)) {
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002176 reg_ranges = &t4_reg_ranges[0];
2177 arr_size = ARRAY_SIZE(t4_reg_ranges);
2178 buf_size = T4_REGMAP_SIZE;
2179 } else {
2180 reg_ranges = &t5_reg_ranges[0];
2181 arr_size = ARRAY_SIZE(t5_reg_ranges);
2182 buf_size = T5_REGMAP_SIZE;
2183 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002184
2185 regs->version = mk_adap_vers(ap);
2186
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002187 memset(buf, 0, buf_size);
2188 for (i = 0; i < arr_size; i += 2)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002189 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2190}
2191
2192static int restart_autoneg(struct net_device *dev)
2193{
2194 struct port_info *p = netdev_priv(dev);
2195
2196 if (!netif_running(dev))
2197 return -EAGAIN;
2198 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2199 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002200 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002201 return 0;
2202}
2203
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002204static int identify_port(struct net_device *dev,
2205 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002206{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002207 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002208 struct adapter *adap = netdev2adap(dev);
2209
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002210 if (state == ETHTOOL_ID_ACTIVE)
2211 val = 0xffff;
2212 else if (state == ETHTOOL_ID_INACTIVE)
2213 val = 0;
2214 else
2215 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002216
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002217 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002218}
2219
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302220static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002221{
2222 unsigned int v = 0;
2223
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002224 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2225 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002226 v |= SUPPORTED_TP;
2227 if (caps & FW_PORT_CAP_SPEED_100M)
2228 v |= SUPPORTED_100baseT_Full;
2229 if (caps & FW_PORT_CAP_SPEED_1G)
2230 v |= SUPPORTED_1000baseT_Full;
2231 if (caps & FW_PORT_CAP_SPEED_10G)
2232 v |= SUPPORTED_10000baseT_Full;
2233 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2234 v |= SUPPORTED_Backplane;
2235 if (caps & FW_PORT_CAP_SPEED_1G)
2236 v |= SUPPORTED_1000baseKX_Full;
2237 if (caps & FW_PORT_CAP_SPEED_10G)
2238 v |= SUPPORTED_10000baseKX4_Full;
2239 } else if (type == FW_PORT_TYPE_KR)
2240 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002241 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00002242 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2243 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2244 else if (type == FW_PORT_TYPE_BP4_AP)
2245 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2246 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2247 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002248 else if (type == FW_PORT_TYPE_FIBER_XFI ||
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302249 type == FW_PORT_TYPE_FIBER_XAUI ||
2250 type == FW_PORT_TYPE_SFP ||
2251 type == FW_PORT_TYPE_QSFP_10G ||
2252 type == FW_PORT_TYPE_QSA) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002253 v |= SUPPORTED_FIBRE;
Hariprasad Shenai4c2d5182014-11-28 18:35:14 +05302254 if (caps & FW_PORT_CAP_SPEED_1G)
2255 v |= SUPPORTED_1000baseT_Full;
2256 if (caps & FW_PORT_CAP_SPEED_10G)
2257 v |= SUPPORTED_10000baseT_Full;
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302258 } else if (type == FW_PORT_TYPE_BP40_BA ||
2259 type == FW_PORT_TYPE_QSFP) {
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302260 v |= SUPPORTED_40000baseSR4_Full;
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302261 v |= SUPPORTED_FIBRE;
2262 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002263
2264 if (caps & FW_PORT_CAP_ANEG)
2265 v |= SUPPORTED_Autoneg;
2266 return v;
2267}
2268
2269static unsigned int to_fw_linkcaps(unsigned int caps)
2270{
2271 unsigned int v = 0;
2272
2273 if (caps & ADVERTISED_100baseT_Full)
2274 v |= FW_PORT_CAP_SPEED_100M;
2275 if (caps & ADVERTISED_1000baseT_Full)
2276 v |= FW_PORT_CAP_SPEED_1G;
2277 if (caps & ADVERTISED_10000baseT_Full)
2278 v |= FW_PORT_CAP_SPEED_10G;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302279 if (caps & ADVERTISED_40000baseSR4_Full)
2280 v |= FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002281 return v;
2282}
2283
2284static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2285{
2286 const struct port_info *p = netdev_priv(dev);
2287
2288 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002289 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002290 p->port_type == FW_PORT_TYPE_BT_XAUI)
2291 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002292 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2293 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002294 cmd->port = PORT_FIBRE;
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302295 else if (p->port_type == FW_PORT_TYPE_SFP ||
2296 p->port_type == FW_PORT_TYPE_QSFP_10G ||
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302297 p->port_type == FW_PORT_TYPE_QSA ||
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302298 p->port_type == FW_PORT_TYPE_QSFP) {
2299 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2300 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2301 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2302 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2303 cmd->port = PORT_FIBRE;
2304 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2305 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002306 cmd->port = PORT_DA;
2307 else
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302308 cmd->port = PORT_OTHER;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002309 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002310 cmd->port = PORT_OTHER;
2311
2312 if (p->mdio_addr >= 0) {
2313 cmd->phy_address = p->mdio_addr;
2314 cmd->transceiver = XCVR_EXTERNAL;
2315 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2316 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2317 } else {
2318 cmd->phy_address = 0; /* not really, but no better option */
2319 cmd->transceiver = XCVR_INTERNAL;
2320 cmd->mdio_support = 0;
2321 }
2322
2323 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2324 cmd->advertising = from_fw_linkcaps(p->port_type,
2325 p->link_cfg.advertising);
David Decotigny70739492011-04-27 18:32:40 +00002326 ethtool_cmd_speed_set(cmd,
2327 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002328 cmd->duplex = DUPLEX_FULL;
2329 cmd->autoneg = p->link_cfg.autoneg;
2330 cmd->maxtxpkt = 0;
2331 cmd->maxrxpkt = 0;
2332 return 0;
2333}
2334
2335static unsigned int speed_to_caps(int speed)
2336{
Ben Hutchingse8b39012014-02-23 00:03:24 +00002337 if (speed == 100)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002338 return FW_PORT_CAP_SPEED_100M;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002339 if (speed == 1000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002340 return FW_PORT_CAP_SPEED_1G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002341 if (speed == 10000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002342 return FW_PORT_CAP_SPEED_10G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002343 if (speed == 40000)
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302344 return FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002345 return 0;
2346}
2347
2348static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2349{
2350 unsigned int cap;
2351 struct port_info *p = netdev_priv(dev);
2352 struct link_config *lc = &p->link_cfg;
David Decotigny25db0332011-04-27 18:32:39 +00002353 u32 speed = ethtool_cmd_speed(cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002354
2355 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2356 return -EINVAL;
2357
2358 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2359 /*
2360 * PHY offers a single speed. See if that's what's
2361 * being requested.
2362 */
2363 if (cmd->autoneg == AUTONEG_DISABLE &&
David Decotigny25db0332011-04-27 18:32:39 +00002364 (lc->supported & speed_to_caps(speed)))
2365 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002366 return -EINVAL;
2367 }
2368
2369 if (cmd->autoneg == AUTONEG_DISABLE) {
David Decotigny25db0332011-04-27 18:32:39 +00002370 cap = speed_to_caps(speed);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002371
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302372 if (!(lc->supported & cap) ||
Ben Hutchingse8b39012014-02-23 00:03:24 +00002373 (speed == 1000) ||
2374 (speed == 10000) ||
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302375 (speed == 40000))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002376 return -EINVAL;
2377 lc->requested_speed = cap;
2378 lc->advertising = 0;
2379 } else {
2380 cap = to_fw_linkcaps(cmd->advertising);
2381 if (!(lc->supported & cap))
2382 return -EINVAL;
2383 lc->requested_speed = 0;
2384 lc->advertising = cap | FW_PORT_CAP_ANEG;
2385 }
2386 lc->autoneg = cmd->autoneg;
2387
2388 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002389 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2390 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002391 return 0;
2392}
2393
2394static void get_pauseparam(struct net_device *dev,
2395 struct ethtool_pauseparam *epause)
2396{
2397 struct port_info *p = netdev_priv(dev);
2398
2399 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2400 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2401 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2402}
2403
2404static int set_pauseparam(struct net_device *dev,
2405 struct ethtool_pauseparam *epause)
2406{
2407 struct port_info *p = netdev_priv(dev);
2408 struct link_config *lc = &p->link_cfg;
2409
2410 if (epause->autoneg == AUTONEG_DISABLE)
2411 lc->requested_fc = 0;
2412 else if (lc->supported & FW_PORT_CAP_ANEG)
2413 lc->requested_fc = PAUSE_AUTONEG;
2414 else
2415 return -EINVAL;
2416
2417 if (epause->rx_pause)
2418 lc->requested_fc |= PAUSE_RX;
2419 if (epause->tx_pause)
2420 lc->requested_fc |= PAUSE_TX;
2421 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002422 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2423 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002424 return 0;
2425}
2426
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002427static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2428{
2429 const struct port_info *pi = netdev_priv(dev);
2430 const struct sge *s = &pi->adapter->sge;
2431
2432 e->rx_max_pending = MAX_RX_BUFFERS;
2433 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2434 e->rx_jumbo_max_pending = 0;
2435 e->tx_max_pending = MAX_TXQ_ENTRIES;
2436
2437 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2438 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2439 e->rx_jumbo_pending = 0;
2440 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2441}
2442
2443static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2444{
2445 int i;
2446 const struct port_info *pi = netdev_priv(dev);
2447 struct adapter *adapter = pi->adapter;
2448 struct sge *s = &adapter->sge;
2449
2450 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2451 e->tx_pending > MAX_TXQ_ENTRIES ||
2452 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2453 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2454 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2455 return -EINVAL;
2456
2457 if (adapter->flags & FULL_INIT_DONE)
2458 return -EBUSY;
2459
2460 for (i = 0; i < pi->nqsets; ++i) {
2461 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2462 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2463 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2464 }
2465 return 0;
2466}
2467
2468static int closest_timer(const struct sge *s, int time)
2469{
2470 int i, delta, match = 0, min_delta = INT_MAX;
2471
2472 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2473 delta = time - s->timer_val[i];
2474 if (delta < 0)
2475 delta = -delta;
2476 if (delta < min_delta) {
2477 min_delta = delta;
2478 match = i;
2479 }
2480 }
2481 return match;
2482}
2483
2484static int closest_thres(const struct sge *s, int thres)
2485{
2486 int i, delta, match = 0, min_delta = INT_MAX;
2487
2488 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2489 delta = thres - s->counter_val[i];
2490 if (delta < 0)
2491 delta = -delta;
2492 if (delta < min_delta) {
2493 min_delta = delta;
2494 match = i;
2495 }
2496 }
2497 return match;
2498}
2499
2500/*
2501 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2502 */
Hariprasad Shenaidc9daab2015-01-27 13:47:45 +05302503unsigned int qtimer_val(const struct adapter *adap,
2504 const struct sge_rspq *q)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002505{
2506 unsigned int idx = q->intr_params >> 1;
2507
2508 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2509}
2510
2511/**
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302512 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002513 * @q: the Rx queue
2514 * @us: the hold-off time in us, or 0 to disable timer
2515 * @cnt: the hold-off packet count, or 0 to disable counter
2516 *
2517 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2518 * one of the two needs to be enabled for the queue to generate interrupts.
2519 */
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302520static int set_rspq_intr_params(struct sge_rspq *q,
2521 unsigned int us, unsigned int cnt)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002522{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302523 struct adapter *adap = q->adap;
2524
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002525 if ((us | cnt) == 0)
2526 cnt = 1;
2527
2528 if (cnt) {
2529 int err;
2530 u32 v, new_idx;
2531
2532 new_idx = closest_thres(&adap->sge, cnt);
2533 if (q->desc && q->pktcnt_idx != new_idx) {
2534 /* the queue has already been created, update it */
Hariprasad Shenai51678652014-11-21 12:52:02 +05302535 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2536 FW_PARAMS_PARAM_X_V(
2537 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2538 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002539 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2540 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002541 if (err)
2542 return err;
2543 }
2544 q->pktcnt_idx = new_idx;
2545 }
2546
2547 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2548 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2549 return 0;
2550}
2551
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302552/**
2553 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2554 * @dev: the network device
2555 * @us: the hold-off time in us, or 0 to disable timer
2556 * @cnt: the hold-off packet count, or 0 to disable counter
2557 *
2558 * Set the RX interrupt hold-off parameters for a network device.
2559 */
2560static int set_rx_intr_params(struct net_device *dev,
2561 unsigned int us, unsigned int cnt)
2562{
2563 int i, err;
2564 struct port_info *pi = netdev_priv(dev);
2565 struct adapter *adap = pi->adapter;
2566 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2567
2568 for (i = 0; i < pi->nqsets; i++, q++) {
2569 err = set_rspq_intr_params(&q->rspq, us, cnt);
2570 if (err)
2571 return err;
2572 }
2573 return 0;
2574}
2575
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302576static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2577{
2578 int i;
2579 struct port_info *pi = netdev_priv(dev);
2580 struct adapter *adap = pi->adapter;
2581 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2582
2583 for (i = 0; i < pi->nqsets; i++, q++)
2584 q->rspq.adaptive_rx = adaptive_rx;
2585
2586 return 0;
2587}
2588
2589static int get_adaptive_rx_setting(struct net_device *dev)
2590{
2591 struct port_info *pi = netdev_priv(dev);
2592 struct adapter *adap = pi->adapter;
2593 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2594
2595 return q->rspq.adaptive_rx;
2596}
2597
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002598static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2599{
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302600 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302601 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2602 c->rx_max_coalesced_frames);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002603}
2604
2605static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2606{
2607 const struct port_info *pi = netdev_priv(dev);
2608 const struct adapter *adap = pi->adapter;
2609 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2610
2611 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2612 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2613 adap->sge.counter_val[rq->pktcnt_idx] : 0;
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302614 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002615 return 0;
2616}
2617
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002618/**
2619 * eeprom_ptov - translate a physical EEPROM address to virtual
2620 * @phys_addr: the physical EEPROM address
2621 * @fn: the PCI function number
2622 * @sz: size of function-specific area
2623 *
2624 * Translate a physical EEPROM address to virtual. The first 1K is
2625 * accessed through virtual addresses starting at 31K, the rest is
2626 * accessed through virtual addresses starting at 0.
2627 *
2628 * The mapping is as follows:
2629 * [0..1K) -> [31K..32K)
2630 * [1K..1K+A) -> [31K-A..31K)
2631 * [1K+A..ES) -> [0..ES-A-1K)
2632 *
2633 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002634 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002635static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002636{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002637 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002638 if (phys_addr < 1024)
2639 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002640 if (phys_addr < 1024 + fn)
2641 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002642 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002643 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002644 return -EINVAL;
2645}
2646
2647/*
2648 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002649 */
2650static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2651{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002652 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002653
2654 if (vaddr >= 0)
2655 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2656 return vaddr < 0 ? vaddr : 0;
2657}
2658
2659static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2660{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002661 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002662
2663 if (vaddr >= 0)
2664 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2665 return vaddr < 0 ? vaddr : 0;
2666}
2667
2668#define EEPROM_MAGIC 0x38E2F10C
2669
2670static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2671 u8 *data)
2672{
2673 int i, err = 0;
2674 struct adapter *adapter = netdev2adap(dev);
2675
2676 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2677 if (!buf)
2678 return -ENOMEM;
2679
2680 e->magic = EEPROM_MAGIC;
2681 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2682 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2683
2684 if (!err)
2685 memcpy(data, buf + e->offset, e->len);
2686 kfree(buf);
2687 return err;
2688}
2689
2690static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2691 u8 *data)
2692{
2693 u8 *buf;
2694 int err = 0;
2695 u32 aligned_offset, aligned_len, *p;
2696 struct adapter *adapter = netdev2adap(dev);
2697
2698 if (eeprom->magic != EEPROM_MAGIC)
2699 return -EINVAL;
2700
2701 aligned_offset = eeprom->offset & ~3;
2702 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2703
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002704 if (adapter->fn > 0) {
2705 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2706
2707 if (aligned_offset < start ||
2708 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2709 return -EPERM;
2710 }
2711
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002712 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2713 /*
2714 * RMW possibly needed for first or last words.
2715 */
2716 buf = kmalloc(aligned_len, GFP_KERNEL);
2717 if (!buf)
2718 return -ENOMEM;
2719 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2720 if (!err && aligned_len > 4)
2721 err = eeprom_rd_phys(adapter,
2722 aligned_offset + aligned_len - 4,
2723 (u32 *)&buf[aligned_len - 4]);
2724 if (err)
2725 goto out;
2726 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2727 } else
2728 buf = data;
2729
2730 err = t4_seeprom_wp(adapter, false);
2731 if (err)
2732 goto out;
2733
2734 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2735 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2736 aligned_offset += 4;
2737 }
2738
2739 if (!err)
2740 err = t4_seeprom_wp(adapter, true);
2741out:
2742 if (buf != data)
2743 kfree(buf);
2744 return err;
2745}
2746
2747static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2748{
2749 int ret;
2750 const struct firmware *fw;
2751 struct adapter *adap = netdev2adap(netdev);
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302752 unsigned int mbox = PCIE_FW_MASTER_M + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002753
2754 ef->data[sizeof(ef->data) - 1] = '\0';
2755 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2756 if (ret < 0)
2757 return ret;
2758
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302759 /* If the adapter has been fully initialized then we'll go ahead and
2760 * try to get the firmware's cooperation in upgrading to the new
2761 * firmware image otherwise we'll try to do the entire job from the
2762 * host ... and we always "force" the operation in this path.
2763 */
2764 if (adap->flags & FULL_INIT_DONE)
2765 mbox = adap->mbox;
2766
2767 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002768 release_firmware(fw);
2769 if (!ret)
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302770 dev_info(adap->pdev_dev, "loaded firmware %s,"
2771 " reload cxgb4 driver\n", ef->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002772 return ret;
2773}
2774
2775#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2776#define BCAST_CRC 0xa0ccc1a6
2777
2778static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2779{
2780 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2781 wol->wolopts = netdev2adap(dev)->wol;
2782 memset(&wol->sopass, 0, sizeof(wol->sopass));
2783}
2784
2785static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2786{
2787 int err = 0;
2788 struct port_info *pi = netdev_priv(dev);
2789
2790 if (wol->wolopts & ~WOL_SUPPORTED)
2791 return -EINVAL;
2792 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2793 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2794 if (wol->wolopts & WAKE_BCAST) {
2795 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2796 ~0ULL, 0, false);
2797 if (!err)
2798 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2799 ~6ULL, ~0ULL, BCAST_CRC, true);
2800 } else
2801 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2802 return err;
2803}
2804
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002805static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002806{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002807 const struct port_info *pi = netdev_priv(dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002808 netdev_features_t changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002809 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002810
Patrick McHardyf6469682013-04-19 02:04:27 +00002811 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002812 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002813
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002814 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2815 -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +00002816 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002817 if (unlikely(err))
Patrick McHardyf6469682013-04-19 02:04:27 +00002818 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002819 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07002820}
2821
Ben Hutchings7850f632011-12-15 13:55:01 +00002822static u32 get_rss_table_size(struct net_device *dev)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002823{
2824 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002825
Ben Hutchings7850f632011-12-15 13:55:01 +00002826 return pi->rss_size;
2827}
2828
Eyal Perry892311f2014-12-02 18:12:10 +02002829static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
Ben Hutchings7850f632011-12-15 13:55:01 +00002830{
2831 const struct port_info *pi = netdev_priv(dev);
2832 unsigned int n = pi->rss_size;
2833
Eyal Perry892311f2014-12-02 18:12:10 +02002834 if (hfunc)
2835 *hfunc = ETH_RSS_HASH_TOP;
2836 if (!p)
2837 return 0;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002838 while (n--)
Ben Hutchings7850f632011-12-15 13:55:01 +00002839 p[n] = pi->rss[n];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002840 return 0;
2841}
2842
Eyal Perry892311f2014-12-02 18:12:10 +02002843static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
2844 const u8 hfunc)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002845{
2846 unsigned int i;
2847 struct port_info *pi = netdev_priv(dev);
2848
Eyal Perry892311f2014-12-02 18:12:10 +02002849 /* We require at least one supported parameter to be changed and no
2850 * change in any of the unsupported parameters
2851 */
2852 if (key ||
2853 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
2854 return -EOPNOTSUPP;
2855 if (!p)
2856 return 0;
2857
Ben Hutchings7850f632011-12-15 13:55:01 +00002858 for (i = 0; i < pi->rss_size; i++)
2859 pi->rss[i] = p[i];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002860 if (pi->adapter->flags & FULL_INIT_DONE)
2861 return write_rss(pi, pi->rss);
2862 return 0;
2863}
2864
2865static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
Ben Hutchings815c7db2011-09-06 13:49:12 +00002866 u32 *rules)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002867{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002868 const struct port_info *pi = netdev_priv(dev);
2869
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002870 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002871 case ETHTOOL_GRXFH: {
2872 unsigned int v = pi->rss_mode;
2873
2874 info->data = 0;
2875 switch (info->flow_type) {
2876 case TCP_V4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302877 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002878 info->data = RXH_IP_SRC | RXH_IP_DST |
2879 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302880 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002881 info->data = RXH_IP_SRC | RXH_IP_DST;
2882 break;
2883 case UDP_V4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302884 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
2885 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002886 info->data = RXH_IP_SRC | RXH_IP_DST |
2887 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302888 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002889 info->data = RXH_IP_SRC | RXH_IP_DST;
2890 break;
2891 case SCTP_V4_FLOW:
2892 case AH_ESP_V4_FLOW:
2893 case IPV4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302894 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002895 info->data = RXH_IP_SRC | RXH_IP_DST;
2896 break;
2897 case TCP_V6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302898 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002899 info->data = RXH_IP_SRC | RXH_IP_DST |
2900 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302901 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002902 info->data = RXH_IP_SRC | RXH_IP_DST;
2903 break;
2904 case UDP_V6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302905 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
2906 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002907 info->data = RXH_IP_SRC | RXH_IP_DST |
2908 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302909 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002910 info->data = RXH_IP_SRC | RXH_IP_DST;
2911 break;
2912 case SCTP_V6_FLOW:
2913 case AH_ESP_V6_FLOW:
2914 case IPV6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302915 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002916 info->data = RXH_IP_SRC | RXH_IP_DST;
2917 break;
2918 }
2919 return 0;
2920 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002921 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002922 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002923 return 0;
2924 }
2925 return -EOPNOTSUPP;
2926}
2927
stephen hemminger9b07be42012-01-04 12:59:49 +00002928static const struct ethtool_ops cxgb_ethtool_ops = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002929 .get_settings = get_settings,
2930 .set_settings = set_settings,
2931 .get_drvinfo = get_drvinfo,
2932 .get_msglevel = get_msglevel,
2933 .set_msglevel = set_msglevel,
2934 .get_ringparam = get_sge_param,
2935 .set_ringparam = set_sge_param,
2936 .get_coalesce = get_coalesce,
2937 .set_coalesce = set_coalesce,
2938 .get_eeprom_len = get_eeprom_len,
2939 .get_eeprom = get_eeprom,
2940 .set_eeprom = set_eeprom,
2941 .get_pauseparam = get_pauseparam,
2942 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002943 .get_link = ethtool_op_get_link,
2944 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002945 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002946 .nway_reset = restart_autoneg,
2947 .get_sset_count = get_sset_count,
2948 .get_ethtool_stats = get_stats,
2949 .get_regs_len = get_regs_len,
2950 .get_regs = get_regs,
2951 .get_wol = get_wol,
2952 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002953 .get_rxnfc = get_rxnfc,
Ben Hutchings7850f632011-12-15 13:55:01 +00002954 .get_rxfh_indir_size = get_rss_table_size,
Ben Hutchingsfe62d002014-05-15 01:25:27 +01002955 .get_rxfh = get_rss_table,
2956 .set_rxfh = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002957 .flash_device = set_flash,
2958};
2959
Bill Pemberton91744942012-12-03 09:23:02 -05002960static int setup_debugfs(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002961{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002962 if (IS_ERR_OR_NULL(adap->debugfs_root))
2963 return -1;
2964
Hariprasad Shenaifd88b312014-11-07 09:35:23 +05302965#ifdef CONFIG_DEBUG_FS
2966 t4_setup_debugfs(adap);
2967#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002968 return 0;
2969}
2970
2971/*
2972 * upper-layer driver support
2973 */
2974
2975/*
2976 * Allocate an active-open TID and set it to the supplied value.
2977 */
2978int cxgb4_alloc_atid(struct tid_info *t, void *data)
2979{
2980 int atid = -1;
2981
2982 spin_lock_bh(&t->atid_lock);
2983 if (t->afree) {
2984 union aopen_entry *p = t->afree;
2985
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002986 atid = (p - t->atid_tab) + t->atid_base;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002987 t->afree = p->next;
2988 p->data = data;
2989 t->atids_in_use++;
2990 }
2991 spin_unlock_bh(&t->atid_lock);
2992 return atid;
2993}
2994EXPORT_SYMBOL(cxgb4_alloc_atid);
2995
2996/*
2997 * Release an active-open TID.
2998 */
2999void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3000{
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003001 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003002
3003 spin_lock_bh(&t->atid_lock);
3004 p->next = t->afree;
3005 t->afree = p;
3006 t->atids_in_use--;
3007 spin_unlock_bh(&t->atid_lock);
3008}
3009EXPORT_SYMBOL(cxgb4_free_atid);
3010
3011/*
3012 * Allocate a server TID and set it to the supplied value.
3013 */
3014int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3015{
3016 int stid;
3017
3018 spin_lock_bh(&t->stid_lock);
3019 if (family == PF_INET) {
3020 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3021 if (stid < t->nstids)
3022 __set_bit(stid, t->stid_bmap);
3023 else
3024 stid = -1;
3025 } else {
3026 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3027 if (stid < 0)
3028 stid = -1;
3029 }
3030 if (stid >= 0) {
3031 t->stid_tab[stid].data = data;
3032 stid += t->stid_base;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303033 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3034 * This is equivalent to 4 TIDs. With CLIP enabled it
3035 * needs 2 TIDs.
3036 */
3037 if (family == PF_INET)
3038 t->stids_in_use++;
3039 else
3040 t->stids_in_use += 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003041 }
3042 spin_unlock_bh(&t->stid_lock);
3043 return stid;
3044}
3045EXPORT_SYMBOL(cxgb4_alloc_stid);
3046
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003047/* Allocate a server filter TID and set it to the supplied value.
3048 */
3049int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3050{
3051 int stid;
3052
3053 spin_lock_bh(&t->stid_lock);
3054 if (family == PF_INET) {
3055 stid = find_next_zero_bit(t->stid_bmap,
3056 t->nstids + t->nsftids, t->nstids);
3057 if (stid < (t->nstids + t->nsftids))
3058 __set_bit(stid, t->stid_bmap);
3059 else
3060 stid = -1;
3061 } else {
3062 stid = -1;
3063 }
3064 if (stid >= 0) {
3065 t->stid_tab[stid].data = data;
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303066 stid -= t->nstids;
3067 stid += t->sftid_base;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003068 t->stids_in_use++;
3069 }
3070 spin_unlock_bh(&t->stid_lock);
3071 return stid;
3072}
3073EXPORT_SYMBOL(cxgb4_alloc_sftid);
3074
3075/* Release a server TID.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003076 */
3077void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3078{
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303079 /* Is it a server filter TID? */
3080 if (t->nsftids && (stid >= t->sftid_base)) {
3081 stid -= t->sftid_base;
3082 stid += t->nstids;
3083 } else {
3084 stid -= t->stid_base;
3085 }
3086
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003087 spin_lock_bh(&t->stid_lock);
3088 if (family == PF_INET)
3089 __clear_bit(stid, t->stid_bmap);
3090 else
3091 bitmap_release_region(t->stid_bmap, stid, 2);
3092 t->stid_tab[stid].data = NULL;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303093 if (family == PF_INET)
3094 t->stids_in_use--;
3095 else
3096 t->stids_in_use -= 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003097 spin_unlock_bh(&t->stid_lock);
3098}
3099EXPORT_SYMBOL(cxgb4_free_stid);
3100
3101/*
3102 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3103 */
3104static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3105 unsigned int tid)
3106{
3107 struct cpl_tid_release *req;
3108
3109 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3110 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3111 INIT_TP_WR(req, tid);
3112 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3113}
3114
3115/*
3116 * Queue a TID release request and if necessary schedule a work queue to
3117 * process it.
3118 */
stephen hemminger31b9c192010-10-18 05:39:18 +00003119static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3120 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003121{
3122 void **p = &t->tid_tab[tid];
3123 struct adapter *adap = container_of(t, struct adapter, tids);
3124
3125 spin_lock_bh(&adap->tid_release_lock);
3126 *p = adap->tid_release_head;
3127 /* Low 2 bits encode the Tx channel number */
3128 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3129 if (!adap->tid_release_task_busy) {
3130 adap->tid_release_task_busy = true;
Anish Bhatt29aaee62014-08-20 13:44:06 -07003131 queue_work(adap->workq, &adap->tid_release_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003132 }
3133 spin_unlock_bh(&adap->tid_release_lock);
3134}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003135
3136/*
3137 * Process the list of pending TID release requests.
3138 */
3139static void process_tid_release_list(struct work_struct *work)
3140{
3141 struct sk_buff *skb;
3142 struct adapter *adap;
3143
3144 adap = container_of(work, struct adapter, tid_release_task);
3145
3146 spin_lock_bh(&adap->tid_release_lock);
3147 while (adap->tid_release_head) {
3148 void **p = adap->tid_release_head;
3149 unsigned int chan = (uintptr_t)p & 3;
3150 p = (void *)p - chan;
3151
3152 adap->tid_release_head = *p;
3153 *p = NULL;
3154 spin_unlock_bh(&adap->tid_release_lock);
3155
3156 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3157 GFP_KERNEL)))
3158 schedule_timeout_uninterruptible(1);
3159
3160 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3161 t4_ofld_send(adap, skb);
3162 spin_lock_bh(&adap->tid_release_lock);
3163 }
3164 adap->tid_release_task_busy = false;
3165 spin_unlock_bh(&adap->tid_release_lock);
3166}
3167
3168/*
3169 * Release a TID and inform HW. If we are unable to allocate the release
3170 * message we defer to a work queue.
3171 */
3172void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3173{
3174 void *old;
3175 struct sk_buff *skb;
3176 struct adapter *adap = container_of(t, struct adapter, tids);
3177
3178 old = t->tid_tab[tid];
3179 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3180 if (likely(skb)) {
3181 t->tid_tab[tid] = NULL;
3182 mk_tid_release(skb, chan, tid);
3183 t4_ofld_send(adap, skb);
3184 } else
3185 cxgb4_queue_tid_release(t, chan, tid);
3186 if (old)
3187 atomic_dec(&t->tids_in_use);
3188}
3189EXPORT_SYMBOL(cxgb4_remove_tid);
3190
3191/*
3192 * Allocate and initialize the TID tables. Returns 0 on success.
3193 */
3194static int tid_init(struct tid_info *t)
3195{
3196 size_t size;
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003197 unsigned int stid_bmap_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003198 unsigned int natids = t->natids;
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303199 struct adapter *adap = container_of(t, struct adapter, tids);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003200
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003201 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003202 size = t->ntids * sizeof(*t->tid_tab) +
3203 natids * sizeof(*t->atid_tab) +
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003204 t->nstids * sizeof(*t->stid_tab) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003205 t->nsftids * sizeof(*t->stid_tab) +
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003206 stid_bmap_size * sizeof(long) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003207 t->nftids * sizeof(*t->ftid_tab) +
3208 t->nsftids * sizeof(*t->ftid_tab);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003209
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003210 t->tid_tab = t4_alloc_mem(size);
3211 if (!t->tid_tab)
3212 return -ENOMEM;
3213
3214 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3215 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003216 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003217 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003218 spin_lock_init(&t->stid_lock);
3219 spin_lock_init(&t->atid_lock);
3220
3221 t->stids_in_use = 0;
3222 t->afree = NULL;
3223 t->atids_in_use = 0;
3224 atomic_set(&t->tids_in_use, 0);
3225
3226 /* Setup the free list for atid_tab and clear the stid bitmap. */
3227 if (natids) {
3228 while (--natids)
3229 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3230 t->afree = t->atid_tab;
3231 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003232 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303233 /* Reserve stid 0 for T4/T5 adapters */
3234 if (!t->stid_base &&
3235 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3236 __set_bit(0, t->stid_bmap);
3237
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003238 return 0;
3239}
3240
3241/**
3242 * cxgb4_create_server - create an IP server
3243 * @dev: the device
3244 * @stid: the server TID
3245 * @sip: local IP address to bind server to
3246 * @sport: the server's TCP port
3247 * @queue: queue to direct messages from this server to
3248 *
3249 * Create an IP server for the given port and address.
3250 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3251 */
3252int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00003253 __be32 sip, __be16 sport, __be16 vlan,
3254 unsigned int queue)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003255{
3256 unsigned int chan;
3257 struct sk_buff *skb;
3258 struct adapter *adap;
3259 struct cpl_pass_open_req *req;
Vipul Pandya80f40c12013-07-04 16:10:45 +05303260 int ret;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003261
3262 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3263 if (!skb)
3264 return -ENOMEM;
3265
3266 adap = netdev2adap(dev);
3267 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3268 INIT_TP_WR(req, 0);
3269 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3270 req->local_port = sport;
3271 req->peer_port = htons(0);
3272 req->local_ip = sip;
3273 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003274 chan = rxq_to_chan(&adap->sge, queue);
Anish Bhattd7990b02014-11-12 17:15:57 -08003275 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003276 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3277 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303278 ret = t4_mgmt_tx(adap, skb);
3279 return net_xmit_eval(ret);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003280}
3281EXPORT_SYMBOL(cxgb4_create_server);
3282
Vipul Pandya80f40c12013-07-04 16:10:45 +05303283/* cxgb4_create_server6 - create an IPv6 server
3284 * @dev: the device
3285 * @stid: the server TID
3286 * @sip: local IPv6 address to bind server to
3287 * @sport: the server's TCP port
3288 * @queue: queue to direct messages from this server to
3289 *
3290 * Create an IPv6 server for the given port and address.
3291 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3292 */
3293int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3294 const struct in6_addr *sip, __be16 sport,
3295 unsigned int queue)
3296{
3297 unsigned int chan;
3298 struct sk_buff *skb;
3299 struct adapter *adap;
3300 struct cpl_pass_open_req6 *req;
3301 int ret;
3302
3303 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3304 if (!skb)
3305 return -ENOMEM;
3306
3307 adap = netdev2adap(dev);
3308 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3309 INIT_TP_WR(req, 0);
3310 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3311 req->local_port = sport;
3312 req->peer_port = htons(0);
3313 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3314 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3315 req->peer_ip_hi = cpu_to_be64(0);
3316 req->peer_ip_lo = cpu_to_be64(0);
3317 chan = rxq_to_chan(&adap->sge, queue);
Anish Bhattd7990b02014-11-12 17:15:57 -08003318 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003319 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3320 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303321 ret = t4_mgmt_tx(adap, skb);
3322 return net_xmit_eval(ret);
3323}
3324EXPORT_SYMBOL(cxgb4_create_server6);
3325
3326int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3327 unsigned int queue, bool ipv6)
3328{
3329 struct sk_buff *skb;
3330 struct adapter *adap;
3331 struct cpl_close_listsvr_req *req;
3332 int ret;
3333
3334 adap = netdev2adap(dev);
3335
3336 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3337 if (!skb)
3338 return -ENOMEM;
3339
3340 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3341 INIT_TP_WR(req, 0);
3342 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -08003343 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
3344 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303345 ret = t4_mgmt_tx(adap, skb);
3346 return net_xmit_eval(ret);
3347}
3348EXPORT_SYMBOL(cxgb4_remove_server);
3349
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003350/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003351 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3352 * @mtus: the HW MTU table
3353 * @mtu: the target MTU
3354 * @idx: index of selected entry in the MTU table
3355 *
3356 * Returns the index and the value in the HW MTU table that is closest to
3357 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3358 * table, in which case that smallest available value is selected.
3359 */
3360unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3361 unsigned int *idx)
3362{
3363 unsigned int i = 0;
3364
3365 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3366 ++i;
3367 if (idx)
3368 *idx = i;
3369 return mtus[i];
3370}
3371EXPORT_SYMBOL(cxgb4_best_mtu);
3372
3373/**
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05303374 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3375 * @mtus: the HW MTU table
3376 * @header_size: Header Size
3377 * @data_size_max: maximum Data Segment Size
3378 * @data_size_align: desired Data Segment Size Alignment (2^N)
3379 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3380 *
3381 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3382 * MTU Table based solely on a Maximum MTU parameter, we break that
3383 * parameter up into a Header Size and Maximum Data Segment Size, and
3384 * provide a desired Data Segment Size Alignment. If we find an MTU in
3385 * the Hardware MTU Table which will result in a Data Segment Size with
3386 * the requested alignment _and_ that MTU isn't "too far" from the
3387 * closest MTU, then we'll return that rather than the closest MTU.
3388 */
3389unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3390 unsigned short header_size,
3391 unsigned short data_size_max,
3392 unsigned short data_size_align,
3393 unsigned int *mtu_idxp)
3394{
3395 unsigned short max_mtu = header_size + data_size_max;
3396 unsigned short data_size_align_mask = data_size_align - 1;
3397 int mtu_idx, aligned_mtu_idx;
3398
3399 /* Scan the MTU Table till we find an MTU which is larger than our
3400 * Maximum MTU or we reach the end of the table. Along the way,
3401 * record the last MTU found, if any, which will result in a Data
3402 * Segment Length matching the requested alignment.
3403 */
3404 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3405 unsigned short data_size = mtus[mtu_idx] - header_size;
3406
3407 /* If this MTU minus the Header Size would result in a
3408 * Data Segment Size of the desired alignment, remember it.
3409 */
3410 if ((data_size & data_size_align_mask) == 0)
3411 aligned_mtu_idx = mtu_idx;
3412
3413 /* If we're not at the end of the Hardware MTU Table and the
3414 * next element is larger than our Maximum MTU, drop out of
3415 * the loop.
3416 */
3417 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3418 break;
3419 }
3420
3421 /* If we fell out of the loop because we ran to the end of the table,
3422 * then we just have to use the last [largest] entry.
3423 */
3424 if (mtu_idx == NMTUS)
3425 mtu_idx--;
3426
3427 /* If we found an MTU which resulted in the requested Data Segment
3428 * Length alignment and that's "not far" from the largest MTU which is
3429 * less than or equal to the maximum MTU, then use that.
3430 */
3431 if (aligned_mtu_idx >= 0 &&
3432 mtu_idx - aligned_mtu_idx <= 1)
3433 mtu_idx = aligned_mtu_idx;
3434
3435 /* If the caller has passed in an MTU Index pointer, pass the
3436 * MTU Index back. Return the MTU value.
3437 */
3438 if (mtu_idxp)
3439 *mtu_idxp = mtu_idx;
3440 return mtus[mtu_idx];
3441}
3442EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3443
3444/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003445 * cxgb4_port_chan - get the HW channel of a port
3446 * @dev: the net device for the port
3447 *
3448 * Return the HW Tx channel of the given port.
3449 */
3450unsigned int cxgb4_port_chan(const struct net_device *dev)
3451{
3452 return netdev2pinfo(dev)->tx_chan;
3453}
3454EXPORT_SYMBOL(cxgb4_port_chan);
3455
Vipul Pandya881806b2012-05-18 15:29:24 +05303456unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3457{
3458 struct adapter *adap = netdev2adap(dev);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003459 u32 v1, v2, lp_count, hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303460
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303461 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3462 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303463 if (is_t4(adap->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303464 lp_count = LP_COUNT_G(v1);
3465 hp_count = HP_COUNT_G(v1);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003466 } else {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303467 lp_count = LP_COUNT_T5_G(v1);
3468 hp_count = HP_COUNT_T5_G(v2);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003469 }
3470 return lpfifo ? lp_count : hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303471}
3472EXPORT_SYMBOL(cxgb4_dbfifo_count);
3473
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003474/**
3475 * cxgb4_port_viid - get the VI id of a port
3476 * @dev: the net device for the port
3477 *
3478 * Return the VI id of the given port.
3479 */
3480unsigned int cxgb4_port_viid(const struct net_device *dev)
3481{
3482 return netdev2pinfo(dev)->viid;
3483}
3484EXPORT_SYMBOL(cxgb4_port_viid);
3485
3486/**
3487 * cxgb4_port_idx - get the index of a port
3488 * @dev: the net device for the port
3489 *
3490 * Return the index of the given port.
3491 */
3492unsigned int cxgb4_port_idx(const struct net_device *dev)
3493{
3494 return netdev2pinfo(dev)->port_id;
3495}
3496EXPORT_SYMBOL(cxgb4_port_idx);
3497
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003498void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3499 struct tp_tcp_stats *v6)
3500{
3501 struct adapter *adap = pci_get_drvdata(pdev);
3502
3503 spin_lock(&adap->stats_lock);
3504 t4_tp_get_tcp_stats(adap, v4, v6);
3505 spin_unlock(&adap->stats_lock);
3506}
3507EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3508
3509void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3510 const unsigned int *pgsz_order)
3511{
3512 struct adapter *adap = netdev2adap(dev);
3513
Hariprasad Shenai0d804332015-01-05 16:30:47 +05303514 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
3515 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
3516 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
3517 HPZ3_V(pgsz_order[3]));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003518}
3519EXPORT_SYMBOL(cxgb4_iscsi_init);
3520
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303521int cxgb4_flush_eq_cache(struct net_device *dev)
3522{
3523 struct adapter *adap = netdev2adap(dev);
3524 int ret;
3525
3526 ret = t4_fwaddrspace_write(adap, adap->mbox,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303527 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303528 return ret;
3529}
3530EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3531
3532static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3533{
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303534 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303535 __be64 indices;
3536 int ret;
3537
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303538 spin_lock(&adap->win0_lock);
3539 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3540 sizeof(indices), (__be32 *)&indices,
3541 T4_MEMORY_READ);
3542 spin_unlock(&adap->win0_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303543 if (!ret) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00003544 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3545 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303546 }
3547 return ret;
3548}
3549
3550int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3551 u16 size)
3552{
3553 struct adapter *adap = netdev2adap(dev);
3554 u16 hw_pidx, hw_cidx;
3555 int ret;
3556
3557 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3558 if (ret)
3559 goto out;
3560
3561 if (pidx != hw_pidx) {
3562 u16 delta;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303563 u32 val;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303564
3565 if (pidx >= hw_pidx)
3566 delta = pidx - hw_pidx;
3567 else
3568 delta = size - hw_pidx + pidx;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303569
3570 if (is_t4(adap->params.chip))
3571 val = PIDX_V(delta);
3572 else
3573 val = PIDX_T5_V(delta);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303574 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303575 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3576 QID_V(qid) | val);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303577 }
3578out:
3579 return ret;
3580}
3581EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3582
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003583void cxgb4_disable_db_coalescing(struct net_device *dev)
3584{
3585 struct adapter *adap;
3586
3587 adap = netdev2adap(dev);
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303588 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303589 NOCOALESCE_F);
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003590}
3591EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3592
3593void cxgb4_enable_db_coalescing(struct net_device *dev)
3594{
3595 struct adapter *adap;
3596
3597 adap = netdev2adap(dev);
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303598 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003599}
3600EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3601
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303602int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3603{
3604 struct adapter *adap;
3605 u32 offset, memtype, memaddr;
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303606 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303607 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3608 int ret;
3609
3610 adap = netdev2adap(dev);
3611
3612 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3613
3614 /* Figure out where the offset lands in the Memory Type/Address scheme.
3615 * This code assumes that the memory is laid out starting at offset 0
3616 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3617 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3618 * MC0, and some have both MC0 and MC1.
3619 */
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303620 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3621 edc0_size = EDRAM0_SIZE_G(size) << 20;
3622 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3623 edc1_size = EDRAM1_SIZE_G(size) << 20;
3624 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3625 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303626
3627 edc0_end = edc0_size;
3628 edc1_end = edc0_end + edc1_size;
3629 mc0_end = edc1_end + mc0_size;
3630
3631 if (offset < edc0_end) {
3632 memtype = MEM_EDC0;
3633 memaddr = offset;
3634 } else if (offset < edc1_end) {
3635 memtype = MEM_EDC1;
3636 memaddr = offset - edc0_end;
3637 } else {
3638 if (offset < mc0_end) {
3639 memtype = MEM_MC0;
3640 memaddr = offset - edc1_end;
3641 } else if (is_t4(adap->params.chip)) {
3642 /* T4 only has a single memory channel */
3643 goto err;
3644 } else {
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303645 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3646 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303647 mc1_end = mc0_end + mc1_size;
3648 if (offset < mc1_end) {
3649 memtype = MEM_MC1;
3650 memaddr = offset - mc0_end;
3651 } else {
3652 /* offset beyond the end of any memory */
3653 goto err;
3654 }
3655 }
3656 }
3657
3658 spin_lock(&adap->win0_lock);
3659 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3660 spin_unlock(&adap->win0_lock);
3661 return ret;
3662
3663err:
3664 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3665 stag, offset);
3666 return -EINVAL;
3667}
3668EXPORT_SYMBOL(cxgb4_read_tpte);
3669
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303670u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3671{
3672 u32 hi, lo;
3673 struct adapter *adap;
3674
3675 adap = netdev2adap(dev);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303676 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
3677 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303678
3679 return ((u64)hi << 32) | (u64)lo;
3680}
3681EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3682
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303683int cxgb4_bar2_sge_qregs(struct net_device *dev,
3684 unsigned int qid,
3685 enum cxgb4_bar2_qtype qtype,
3686 u64 *pbar2_qoffset,
3687 unsigned int *pbar2_qid)
3688{
Stephen Rothwelldd0bcc02014-12-10 19:48:02 +11003689 return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303690 qid,
3691 (qtype == CXGB4_BAR2_QTYPE_EGRESS
3692 ? T4_BAR2_QTYPE_EGRESS
3693 : T4_BAR2_QTYPE_INGRESS),
3694 pbar2_qoffset,
3695 pbar2_qid);
3696}
3697EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
3698
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003699static struct pci_driver cxgb4_driver;
3700
3701static void check_neigh_update(struct neighbour *neigh)
3702{
3703 const struct device *parent;
3704 const struct net_device *netdev = neigh->dev;
3705
3706 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3707 netdev = vlan_dev_real_dev(netdev);
3708 parent = netdev->dev.parent;
3709 if (parent && parent->driver == &cxgb4_driver.driver)
3710 t4_l2t_update(dev_get_drvdata(parent), neigh);
3711}
3712
3713static int netevent_cb(struct notifier_block *nb, unsigned long event,
3714 void *data)
3715{
3716 switch (event) {
3717 case NETEVENT_NEIGH_UPDATE:
3718 check_neigh_update(data);
3719 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003720 case NETEVENT_REDIRECT:
3721 default:
3722 break;
3723 }
3724 return 0;
3725}
3726
3727static bool netevent_registered;
3728static struct notifier_block cxgb4_netevent_nb = {
3729 .notifier_call = netevent_cb
3730};
3731
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303732static void drain_db_fifo(struct adapter *adap, int usecs)
3733{
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003734 u32 v1, v2, lp_count, hp_count;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303735
3736 do {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303737 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3738 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303739 if (is_t4(adap->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303740 lp_count = LP_COUNT_G(v1);
3741 hp_count = HP_COUNT_G(v1);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003742 } else {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303743 lp_count = LP_COUNT_T5_G(v1);
3744 hp_count = HP_COUNT_T5_G(v2);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003745 }
3746
3747 if (lp_count == 0 && hp_count == 0)
3748 break;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303749 set_current_state(TASK_UNINTERRUPTIBLE);
3750 schedule_timeout(usecs_to_jiffies(usecs));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303751 } while (1);
3752}
3753
3754static void disable_txq_db(struct sge_txq *q)
3755{
Steve Wise05eb2382014-03-14 21:52:08 +05303756 unsigned long flags;
3757
3758 spin_lock_irqsave(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303759 q->db_disabled = 1;
Steve Wise05eb2382014-03-14 21:52:08 +05303760 spin_unlock_irqrestore(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303761}
3762
Steve Wise05eb2382014-03-14 21:52:08 +05303763static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303764{
3765 spin_lock_irq(&q->db_lock);
Steve Wise05eb2382014-03-14 21:52:08 +05303766 if (q->db_pidx_inc) {
3767 /* Make sure that all writes to the TX descriptors
3768 * are committed before we tell HW about them.
3769 */
3770 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303771 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3772 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
Steve Wise05eb2382014-03-14 21:52:08 +05303773 q->db_pidx_inc = 0;
3774 }
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303775 q->db_disabled = 0;
3776 spin_unlock_irq(&q->db_lock);
3777}
3778
3779static void disable_dbs(struct adapter *adap)
3780{
3781 int i;
3782
3783 for_each_ethrxq(&adap->sge, i)
3784 disable_txq_db(&adap->sge.ethtxq[i].q);
3785 for_each_ofldrxq(&adap->sge, i)
3786 disable_txq_db(&adap->sge.ofldtxq[i].q);
3787 for_each_port(adap, i)
3788 disable_txq_db(&adap->sge.ctrlq[i].q);
3789}
3790
3791static void enable_dbs(struct adapter *adap)
3792{
3793 int i;
3794
3795 for_each_ethrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303796 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303797 for_each_ofldrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303798 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303799 for_each_port(adap, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303800 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3801}
3802
3803static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3804{
3805 if (adap->uld_handle[CXGB4_ULD_RDMA])
3806 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3807 cmd);
3808}
3809
3810static void process_db_full(struct work_struct *work)
3811{
3812 struct adapter *adap;
3813
3814 adap = container_of(work, struct adapter, db_full_task);
3815
3816 drain_db_fifo(adap, dbfifo_drain_delay);
3817 enable_dbs(adap);
3818 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303819 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3820 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
3821 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303822}
3823
3824static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3825{
3826 u16 hw_pidx, hw_cidx;
3827 int ret;
3828
Steve Wise05eb2382014-03-14 21:52:08 +05303829 spin_lock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303830 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3831 if (ret)
3832 goto out;
3833 if (q->db_pidx != hw_pidx) {
3834 u16 delta;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303835 u32 val;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303836
3837 if (q->db_pidx >= hw_pidx)
3838 delta = q->db_pidx - hw_pidx;
3839 else
3840 delta = q->size - hw_pidx + q->db_pidx;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303841
3842 if (is_t4(adap->params.chip))
3843 val = PIDX_V(delta);
3844 else
3845 val = PIDX_T5_V(delta);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303846 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303847 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3848 QID_V(q->cntxt_id) | val);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303849 }
3850out:
3851 q->db_disabled = 0;
Steve Wise05eb2382014-03-14 21:52:08 +05303852 q->db_pidx_inc = 0;
3853 spin_unlock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303854 if (ret)
3855 CH_WARN(adap, "DB drop recovery failed.\n");
3856}
3857static void recover_all_queues(struct adapter *adap)
3858{
3859 int i;
3860
3861 for_each_ethrxq(&adap->sge, i)
3862 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3863 for_each_ofldrxq(&adap->sge, i)
3864 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3865 for_each_port(adap, i)
3866 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3867}
3868
Vipul Pandya881806b2012-05-18 15:29:24 +05303869static void process_db_drop(struct work_struct *work)
3870{
3871 struct adapter *adap;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303872
Vipul Pandya881806b2012-05-18 15:29:24 +05303873 adap = container_of(work, struct adapter, db_drop_task);
3874
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303875 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05303876 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003877 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
Steve Wise05eb2382014-03-14 21:52:08 +05303878 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003879 recover_all_queues(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05303880 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003881 enable_dbs(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05303882 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003883 } else {
3884 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3885 u16 qid = (dropped_db >> 15) & 0x1ffff;
3886 u16 pidx_inc = dropped_db & 0x1fff;
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303887 u64 bar2_qoffset;
3888 unsigned int bar2_qid;
3889 int ret;
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003890
Stephen Rothwelldd0bcc02014-12-10 19:48:02 +11003891 ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303892 &bar2_qoffset, &bar2_qid);
3893 if (ret)
3894 dev_err(adap->pdev_dev, "doorbell drop recovery: "
3895 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
3896 else
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303897 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303898 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003899
3900 /* Re-enable BAR2 WC */
3901 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3902 }
3903
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303904 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
Vipul Pandya881806b2012-05-18 15:29:24 +05303905}
3906
3907void t4_db_full(struct adapter *adap)
3908{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303909 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05303910 disable_dbs(adap);
3911 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303912 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3913 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
Anish Bhatt29aaee62014-08-20 13:44:06 -07003914 queue_work(adap->workq, &adap->db_full_task);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003915 }
Vipul Pandya881806b2012-05-18 15:29:24 +05303916}
3917
3918void t4_db_dropped(struct adapter *adap)
3919{
Steve Wise05eb2382014-03-14 21:52:08 +05303920 if (is_t4(adap->params.chip)) {
3921 disable_dbs(adap);
3922 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3923 }
Anish Bhatt29aaee62014-08-20 13:44:06 -07003924 queue_work(adap->workq, &adap->db_drop_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303925}
3926
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003927static void uld_attach(struct adapter *adap, unsigned int uld)
3928{
3929 void *handle;
3930 struct cxgb4_lld_info lli;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003931 unsigned short i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003932
3933 lli.pdev = adap->pdev;
Hariprasad Shenai35b1de52014-06-27 19:23:47 +05303934 lli.pf = adap->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003935 lli.l2t = adap->l2t;
3936 lli.tids = &adap->tids;
3937 lli.ports = adap->port;
3938 lli.vr = &adap->vres;
3939 lli.mtus = adap->params.mtus;
3940 if (uld == CXGB4_ULD_RDMA) {
3941 lli.rxq_ids = adap->sge.rdma_rxq;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05303942 lli.ciq_ids = adap->sge.rdma_ciq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003943 lli.nrxq = adap->sge.rdmaqs;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05303944 lli.nciq = adap->sge.rdmaciqs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003945 } else if (uld == CXGB4_ULD_ISCSI) {
3946 lli.rxq_ids = adap->sge.ofld_rxq;
3947 lli.nrxq = adap->sge.ofldqsets;
3948 }
3949 lli.ntxq = adap->sge.ofldqsets;
3950 lli.nchan = adap->params.nports;
3951 lli.nports = adap->params.nports;
3952 lli.wr_cred = adap->params.ofldq_wr_cred;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303953 lli.adapter_type = adap->params.chip;
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05303954 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303955 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303956 lli.udb_density = 1 << adap->params.sge.eq_qpp;
3957 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05303958 lli.filt_mode = adap->params.tp.vlan_pri_map;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003959 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3960 for (i = 0; i < NCHAN; i++)
3961 lli.tx_modq[i] = i;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303962 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
3963 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003964 lli.fw_vers = adap->params.fw_vers;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303965 lli.dbfifo_int_thresh = dbfifo_int_thresh;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +05303966 lli.sge_ingpadboundary = adap->sge.fl_align;
3967 lli.sge_egrstatuspagesize = adap->sge.stat_len;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003968 lli.sge_pktshift = adap->sge.pktshift;
3969 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05303970 lli.max_ordird_qp = adap->params.max_ordird_qp;
3971 lli.max_ird_adapter = adap->params.max_ird_adapter;
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05303972 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003973
3974 handle = ulds[uld].add(&lli);
3975 if (IS_ERR(handle)) {
3976 dev_warn(adap->pdev_dev,
3977 "could not attach to the %s driver, error %ld\n",
3978 uld_str[uld], PTR_ERR(handle));
3979 return;
3980 }
3981
3982 adap->uld_handle[uld] = handle;
3983
3984 if (!netevent_registered) {
3985 register_netevent_notifier(&cxgb4_netevent_nb);
3986 netevent_registered = true;
3987 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00003988
3989 if (adap->flags & FULL_INIT_DONE)
3990 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003991}
3992
3993static void attach_ulds(struct adapter *adap)
3994{
3995 unsigned int i;
3996
Vipul Pandya01bcca62013-07-04 16:10:46 +05303997 spin_lock(&adap_rcu_lock);
3998 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3999 spin_unlock(&adap_rcu_lock);
4000
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004001 mutex_lock(&uld_mutex);
4002 list_add_tail(&adap->list_node, &adapter_list);
4003 for (i = 0; i < CXGB4_ULD_MAX; i++)
4004 if (ulds[i].add)
4005 uld_attach(adap, i);
4006 mutex_unlock(&uld_mutex);
4007}
4008
4009static void detach_ulds(struct adapter *adap)
4010{
4011 unsigned int i;
4012
4013 mutex_lock(&uld_mutex);
4014 list_del(&adap->list_node);
4015 for (i = 0; i < CXGB4_ULD_MAX; i++)
4016 if (adap->uld_handle[i]) {
4017 ulds[i].state_change(adap->uld_handle[i],
4018 CXGB4_STATE_DETACH);
4019 adap->uld_handle[i] = NULL;
4020 }
4021 if (netevent_registered && list_empty(&adapter_list)) {
4022 unregister_netevent_notifier(&cxgb4_netevent_nb);
4023 netevent_registered = false;
4024 }
4025 mutex_unlock(&uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304026
4027 spin_lock(&adap_rcu_lock);
4028 list_del_rcu(&adap->rcu_node);
4029 spin_unlock(&adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004030}
4031
4032static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4033{
4034 unsigned int i;
4035
4036 mutex_lock(&uld_mutex);
4037 for (i = 0; i < CXGB4_ULD_MAX; i++)
4038 if (adap->uld_handle[i])
4039 ulds[i].state_change(adap->uld_handle[i], new_state);
4040 mutex_unlock(&uld_mutex);
4041}
4042
4043/**
4044 * cxgb4_register_uld - register an upper-layer driver
4045 * @type: the ULD type
4046 * @p: the ULD methods
4047 *
4048 * Registers an upper-layer driver with this driver and notifies the ULD
4049 * about any presently available devices that support its type. Returns
4050 * %-EBUSY if a ULD of the same type is already registered.
4051 */
4052int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4053{
4054 int ret = 0;
4055 struct adapter *adap;
4056
4057 if (type >= CXGB4_ULD_MAX)
4058 return -EINVAL;
4059 mutex_lock(&uld_mutex);
4060 if (ulds[type].add) {
4061 ret = -EBUSY;
4062 goto out;
4063 }
4064 ulds[type] = *p;
4065 list_for_each_entry(adap, &adapter_list, list_node)
4066 uld_attach(adap, type);
4067out: mutex_unlock(&uld_mutex);
4068 return ret;
4069}
4070EXPORT_SYMBOL(cxgb4_register_uld);
4071
4072/**
4073 * cxgb4_unregister_uld - unregister an upper-layer driver
4074 * @type: the ULD type
4075 *
4076 * Unregisters an existing upper-layer driver.
4077 */
4078int cxgb4_unregister_uld(enum cxgb4_uld type)
4079{
4080 struct adapter *adap;
4081
4082 if (type >= CXGB4_ULD_MAX)
4083 return -EINVAL;
4084 mutex_lock(&uld_mutex);
4085 list_for_each_entry(adap, &adapter_list, list_node)
4086 adap->uld_handle[type] = NULL;
4087 ulds[type].add = NULL;
4088 mutex_unlock(&uld_mutex);
4089 return 0;
4090}
4091EXPORT_SYMBOL(cxgb4_unregister_uld);
4092
Anish Bhatt1bb60372014-10-14 20:07:22 -07004093#if IS_ENABLED(CONFIG_IPV6)
Anish Bhattb5a02f52015-01-14 15:17:34 -08004094static int cxgb4_inet6addr_handler(struct notifier_block *this,
4095 unsigned long event, void *data)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304096{
Anish Bhattb5a02f52015-01-14 15:17:34 -08004097 struct inet6_ifaddr *ifa = data;
4098 struct net_device *event_dev = ifa->idev->dev;
4099 const struct device *parent = NULL;
4100#if IS_ENABLED(CONFIG_BONDING)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304101 struct adapter *adap;
Anish Bhattb5a02f52015-01-14 15:17:34 -08004102#endif
4103 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
4104 event_dev = vlan_dev_real_dev(event_dev);
4105#if IS_ENABLED(CONFIG_BONDING)
4106 if (event_dev->flags & IFF_MASTER) {
4107 list_for_each_entry(adap, &adapter_list, list_node) {
4108 switch (event) {
4109 case NETDEV_UP:
4110 cxgb4_clip_get(adap->port[0],
4111 (const u32 *)ifa, 1);
4112 break;
4113 case NETDEV_DOWN:
4114 cxgb4_clip_release(adap->port[0],
4115 (const u32 *)ifa, 1);
4116 break;
4117 default:
4118 break;
4119 }
4120 }
4121 return NOTIFY_OK;
4122 }
4123#endif
Vipul Pandya01bcca62013-07-04 16:10:46 +05304124
Anish Bhattb5a02f52015-01-14 15:17:34 -08004125 if (event_dev)
4126 parent = event_dev->dev.parent;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304127
Anish Bhattb5a02f52015-01-14 15:17:34 -08004128 if (parent && parent->driver == &cxgb4_driver.driver) {
Vipul Pandya01bcca62013-07-04 16:10:46 +05304129 switch (event) {
4130 case NETDEV_UP:
Anish Bhattb5a02f52015-01-14 15:17:34 -08004131 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304132 break;
4133 case NETDEV_DOWN:
Anish Bhattb5a02f52015-01-14 15:17:34 -08004134 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304135 break;
4136 default:
4137 break;
4138 }
4139 }
Anish Bhattb5a02f52015-01-14 15:17:34 -08004140 return NOTIFY_OK;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304141}
4142
Anish Bhattb5a02f52015-01-14 15:17:34 -08004143static bool inet6addr_registered;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304144static struct notifier_block cxgb4_inet6addr_notifier = {
4145 .notifier_call = cxgb4_inet6addr_handler
4146};
4147
Vipul Pandya01bcca62013-07-04 16:10:46 +05304148static void update_clip(const struct adapter *adap)
4149{
4150 int i;
4151 struct net_device *dev;
4152 int ret;
4153
4154 rcu_read_lock();
4155
4156 for (i = 0; i < MAX_NPORTS; i++) {
4157 dev = adap->port[i];
4158 ret = 0;
4159
4160 if (dev)
Anish Bhattb5a02f52015-01-14 15:17:34 -08004161 ret = cxgb4_update_root_dev_clip(dev);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304162
4163 if (ret < 0)
4164 break;
4165 }
4166 rcu_read_unlock();
4167}
Anish Bhatt1bb60372014-10-14 20:07:22 -07004168#endif /* IS_ENABLED(CONFIG_IPV6) */
Vipul Pandya01bcca62013-07-04 16:10:46 +05304169
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004170/**
4171 * cxgb_up - enable the adapter
4172 * @adap: adapter being enabled
4173 *
4174 * Called when the first port is enabled, this function performs the
4175 * actions necessary to make an adapter operational, such as completing
4176 * the initialization of HW modules, and enabling interrupts.
4177 *
4178 * Must be called with the rtnl lock held.
4179 */
4180static int cxgb_up(struct adapter *adap)
4181{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004182 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004183
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004184 err = setup_sge_queues(adap);
4185 if (err)
4186 goto out;
4187 err = setup_rss(adap);
4188 if (err)
4189 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004190
4191 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004192 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004193 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4194 adap->msix_info[0].desc, adap);
4195 if (err)
4196 goto irq_err;
4197
4198 err = request_msix_queue_irqs(adap);
4199 if (err) {
4200 free_irq(adap->msix_info[0].vec, adap);
4201 goto irq_err;
4202 }
4203 } else {
4204 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4205 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00004206 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004207 if (err)
4208 goto irq_err;
4209 }
4210 enable_rx(adap);
4211 t4_sge_start(adap);
4212 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004213 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004214 notify_ulds(adap, CXGB4_STATE_UP);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004215#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304216 update_clip(adap);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004217#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004218 out:
4219 return err;
4220 irq_err:
4221 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004222 freeq:
4223 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004224 goto out;
4225}
4226
4227static void cxgb_down(struct adapter *adapter)
4228{
4229 t4_intr_disable(adapter);
4230 cancel_work_sync(&adapter->tid_release_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05304231 cancel_work_sync(&adapter->db_full_task);
4232 cancel_work_sync(&adapter->db_drop_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004233 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004234 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004235
4236 if (adapter->flags & USING_MSIX) {
4237 free_msix_queue_irqs(adapter);
4238 free_irq(adapter->msix_info[0].vec, adapter);
4239 } else
4240 free_irq(adapter->pdev->irq, adapter);
4241 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004242 t4_sge_stop(adapter);
4243 t4_free_sge_resources(adapter);
4244 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004245}
4246
4247/*
4248 * net_device operations
4249 */
4250static int cxgb_open(struct net_device *dev)
4251{
4252 int err;
4253 struct port_info *pi = netdev_priv(dev);
4254 struct adapter *adapter = pi->adapter;
4255
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00004256 netif_carrier_off(dev);
4257
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004258 if (!(adapter->flags & FULL_INIT_DONE)) {
4259 err = cxgb_up(adapter);
4260 if (err < 0)
4261 return err;
4262 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004263
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00004264 err = link_start(dev);
4265 if (!err)
4266 netif_tx_start_all_queues(dev);
4267 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004268}
4269
4270static int cxgb_close(struct net_device *dev)
4271{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004272 struct port_info *pi = netdev_priv(dev);
4273 struct adapter *adapter = pi->adapter;
4274
4275 netif_tx_stop_all_queues(dev);
4276 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004277 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004278}
4279
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004280/* Return an error number if the indicated filter isn't writable ...
4281 */
4282static int writable_filter(struct filter_entry *f)
4283{
4284 if (f->locked)
4285 return -EPERM;
4286 if (f->pending)
4287 return -EBUSY;
4288
4289 return 0;
4290}
4291
4292/* Delete the filter at the specified index (if valid). The checks for all
4293 * the common problems with doing this like the filter being locked, currently
4294 * pending in another operation, etc.
4295 */
4296static int delete_filter(struct adapter *adapter, unsigned int fidx)
4297{
4298 struct filter_entry *f;
4299 int ret;
4300
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004301 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004302 return -EINVAL;
4303
4304 f = &adapter->tids.ftid_tab[fidx];
4305 ret = writable_filter(f);
4306 if (ret)
4307 return ret;
4308 if (f->valid)
4309 return del_filter_wr(adapter, fidx);
4310
4311 return 0;
4312}
4313
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004314int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00004315 __be32 sip, __be16 sport, __be16 vlan,
4316 unsigned int queue, unsigned char port, unsigned char mask)
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004317{
4318 int ret;
4319 struct filter_entry *f;
4320 struct adapter *adap;
4321 int i;
4322 u8 *val;
4323
4324 adap = netdev2adap(dev);
4325
Vipul Pandya1cab7752012-12-10 09:30:55 +00004326 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304327 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004328 stid += adap->tids.nftids;
4329
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004330 /* Check to make sure the filter requested is writable ...
4331 */
4332 f = &adap->tids.ftid_tab[stid];
4333 ret = writable_filter(f);
4334 if (ret)
4335 return ret;
4336
4337 /* Clear out any old resources being used by the filter before
4338 * we start constructing the new filter.
4339 */
4340 if (f->valid)
4341 clear_filter(adap, f);
4342
4343 /* Clear out filter specifications */
4344 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4345 f->fs.val.lport = cpu_to_be16(sport);
4346 f->fs.mask.lport = ~0;
4347 val = (u8 *)&sip;
Vipul Pandya793dad92012-12-10 09:30:56 +00004348 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004349 for (i = 0; i < 4; i++) {
4350 f->fs.val.lip[i] = val[i];
4351 f->fs.mask.lip[i] = ~0;
4352 }
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304353 if (adap->params.tp.vlan_pri_map & PORT_F) {
Vipul Pandya793dad92012-12-10 09:30:56 +00004354 f->fs.val.iport = port;
4355 f->fs.mask.iport = mask;
4356 }
4357 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004358
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304359 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
Kumar Sanghvi7c89e552013-12-18 16:38:20 +05304360 f->fs.val.proto = IPPROTO_TCP;
4361 f->fs.mask.proto = ~0;
4362 }
4363
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004364 f->fs.dirsteer = 1;
4365 f->fs.iq = queue;
4366 /* Mark filter as locked */
4367 f->locked = 1;
4368 f->fs.rpttid = 1;
4369
4370 ret = set_filter_wr(adap, stid);
4371 if (ret) {
4372 clear_filter(adap, f);
4373 return ret;
4374 }
4375
4376 return 0;
4377}
4378EXPORT_SYMBOL(cxgb4_create_server_filter);
4379
4380int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4381 unsigned int queue, bool ipv6)
4382{
4383 int ret;
4384 struct filter_entry *f;
4385 struct adapter *adap;
4386
4387 adap = netdev2adap(dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00004388
4389 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304390 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004391 stid += adap->tids.nftids;
4392
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004393 f = &adap->tids.ftid_tab[stid];
4394 /* Unlock the filter */
4395 f->locked = 0;
4396
4397 ret = delete_filter(adap, stid);
4398 if (ret)
4399 return ret;
4400
4401 return 0;
4402}
4403EXPORT_SYMBOL(cxgb4_remove_server_filter);
4404
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00004405static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4406 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004407{
4408 struct port_stats stats;
4409 struct port_info *p = netdev_priv(dev);
4410 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004411
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004412 /* Block retrieving statistics during EEH error
4413 * recovery. Otherwise, the recovery might fail
4414 * and the PCI device will be removed permanently
4415 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004416 spin_lock(&adapter->stats_lock);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004417 if (!netif_device_present(dev)) {
4418 spin_unlock(&adapter->stats_lock);
4419 return ns;
4420 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004421 t4_get_port_stats(adapter, p->tx_chan, &stats);
4422 spin_unlock(&adapter->stats_lock);
4423
4424 ns->tx_bytes = stats.tx_octets;
4425 ns->tx_packets = stats.tx_frames;
4426 ns->rx_bytes = stats.rx_octets;
4427 ns->rx_packets = stats.rx_frames;
4428 ns->multicast = stats.rx_mcast_frames;
4429
4430 /* detailed rx_errors */
4431 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4432 stats.rx_runt;
4433 ns->rx_over_errors = 0;
4434 ns->rx_crc_errors = stats.rx_fcs_err;
4435 ns->rx_frame_errors = stats.rx_symbol_err;
4436 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4437 stats.rx_ovflow2 + stats.rx_ovflow3 +
4438 stats.rx_trunc0 + stats.rx_trunc1 +
4439 stats.rx_trunc2 + stats.rx_trunc3;
4440 ns->rx_missed_errors = 0;
4441
4442 /* detailed tx_errors */
4443 ns->tx_aborted_errors = 0;
4444 ns->tx_carrier_errors = 0;
4445 ns->tx_fifo_errors = 0;
4446 ns->tx_heartbeat_errors = 0;
4447 ns->tx_window_errors = 0;
4448
4449 ns->tx_errors = stats.tx_error_frames;
4450 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4451 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4452 return ns;
4453}
4454
4455static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4456{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004457 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004458 int ret = 0, prtad, devad;
4459 struct port_info *pi = netdev_priv(dev);
4460 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4461
4462 switch (cmd) {
4463 case SIOCGMIIPHY:
4464 if (pi->mdio_addr < 0)
4465 return -EOPNOTSUPP;
4466 data->phy_id = pi->mdio_addr;
4467 break;
4468 case SIOCGMIIREG:
4469 case SIOCSMIIREG:
4470 if (mdio_phy_id_is_c45(data->phy_id)) {
4471 prtad = mdio_phy_id_prtad(data->phy_id);
4472 devad = mdio_phy_id_devad(data->phy_id);
4473 } else if (data->phy_id < 32) {
4474 prtad = data->phy_id;
4475 devad = 0;
4476 data->reg_num &= 0x1f;
4477 } else
4478 return -EINVAL;
4479
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004480 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004481 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004482 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004483 data->reg_num, &data->val_out);
4484 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004485 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004486 data->reg_num, data->val_in);
4487 break;
4488 default:
4489 return -EOPNOTSUPP;
4490 }
4491 return ret;
4492}
4493
4494static void cxgb_set_rxmode(struct net_device *dev)
4495{
4496 /* unfortunately we can't return errors to the stack */
4497 set_rxmode(dev, -1, false);
4498}
4499
4500static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4501{
4502 int ret;
4503 struct port_info *pi = netdev_priv(dev);
4504
4505 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4506 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004507 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4508 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004509 if (!ret)
4510 dev->mtu = new_mtu;
4511 return ret;
4512}
4513
4514static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4515{
4516 int ret;
4517 struct sockaddr *addr = p;
4518 struct port_info *pi = netdev_priv(dev);
4519
4520 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00004521 return -EADDRNOTAVAIL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004522
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004523 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4524 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004525 if (ret < 0)
4526 return ret;
4527
4528 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4529 pi->xact_addr_filt = ret;
4530 return 0;
4531}
4532
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004533#ifdef CONFIG_NET_POLL_CONTROLLER
4534static void cxgb_netpoll(struct net_device *dev)
4535{
4536 struct port_info *pi = netdev_priv(dev);
4537 struct adapter *adap = pi->adapter;
4538
4539 if (adap->flags & USING_MSIX) {
4540 int i;
4541 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4542
4543 for (i = pi->nqsets; i; i--, rx++)
4544 t4_sge_intr_msix(0, &rx->rspq);
4545 } else
4546 t4_intr_handler(adap)(0, adap);
4547}
4548#endif
4549
4550static const struct net_device_ops cxgb4_netdev_ops = {
4551 .ndo_open = cxgb_open,
4552 .ndo_stop = cxgb_close,
4553 .ndo_start_xmit = t4_eth_xmit,
Anish Bhatt688848b2014-06-19 21:37:13 -07004554 .ndo_select_queue = cxgb_select_queue,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00004555 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004556 .ndo_set_rx_mode = cxgb_set_rxmode,
4557 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00004558 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004559 .ndo_validate_addr = eth_validate_addr,
4560 .ndo_do_ioctl = cxgb_ioctl,
4561 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004562#ifdef CONFIG_NET_POLL_CONTROLLER
4563 .ndo_poll_controller = cxgb_netpoll,
4564#endif
4565};
4566
4567void t4_fatal_err(struct adapter *adap)
4568{
Hariprasad Shenaif612b812015-01-05 16:30:43 +05304569 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004570 t4_intr_disable(adap);
4571 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4572}
4573
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304574/* Return the specified PCI-E Configuration Space register from our Physical
4575 * Function. We try first via a Firmware LDST Command since we prefer to let
4576 * the firmware own all of these registers, but if that fails we go for it
4577 * directly ourselves.
4578 */
4579static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4580{
4581 struct fw_ldst_cmd ldst_cmd;
4582 u32 val;
4583 int ret;
4584
4585 /* Construct and send the Firmware LDST Command to retrieve the
4586 * specified PCI-E Configuration Space register.
4587 */
4588 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4589 ldst_cmd.op_to_addrspace =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304590 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4591 FW_CMD_REQUEST_F |
4592 FW_CMD_READ_F |
Hariprasad Shenai51678652014-11-21 12:52:02 +05304593 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304594 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
Hariprasad Shenai51678652014-11-21 12:52:02 +05304595 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304596 ldst_cmd.u.pcie.ctrl_to_fn =
Hariprasad Shenai51678652014-11-21 12:52:02 +05304597 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304598 ldst_cmd.u.pcie.r = reg;
4599 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4600 &ldst_cmd);
4601
4602 /* If the LDST Command suucceeded, exctract the returned register
4603 * value. Otherwise read it directly ourself.
4604 */
4605 if (ret == 0)
4606 val = ntohl(ldst_cmd.u.pcie.data[0]);
4607 else
4608 t4_hw_pci_read_cfg4(adap, reg, &val);
4609
4610 return val;
4611}
4612
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004613static void setup_memwin(struct adapter *adap)
4614{
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304615 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004616
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304617 if (is_t4(adap->params.chip)) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304618 u32 bar0;
4619
4620 /* Truncation intentional: we only read the bottom 32-bits of
4621 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4622 * mechanism to read BAR0 instead of using
4623 * pci_resource_start() because we could be operating from
4624 * within a Virtual Machine which is trapping our accesses to
4625 * our Configuration Space and we need to set up the PCI-E
4626 * Memory Window decoders with the actual addresses which will
4627 * be coming across the PCI-E link.
4628 */
4629 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4630 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4631 adap->t4_bar0 = bar0;
4632
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004633 mem_win0_base = bar0 + MEMWIN0_BASE;
4634 mem_win1_base = bar0 + MEMWIN1_BASE;
4635 mem_win2_base = bar0 + MEMWIN2_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304636 mem_win2_aperture = MEMWIN2_APERTURE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004637 } else {
4638 /* For T5, only relative offset inside the PCIe BAR is passed */
4639 mem_win0_base = MEMWIN0_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304640 mem_win1_base = MEMWIN1_BASE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004641 mem_win2_base = MEMWIN2_BASE_T5;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304642 mem_win2_aperture = MEMWIN2_APERTURE_T5;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004643 }
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304644 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
4645 mem_win0_base | BIR_V(0) |
4646 WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
4647 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
4648 mem_win1_base | BIR_V(0) |
4649 WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
4650 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
4651 mem_win2_base | BIR_V(0) |
4652 WINDOW_V(ilog2(mem_win2_aperture) - 10));
4653 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004654}
4655
4656static void setup_memwin_rdma(struct adapter *adap)
4657{
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004658 if (adap->vres.ocq.size) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304659 u32 start;
4660 unsigned int sz_kb;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004661
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304662 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4663 start &= PCI_BASE_ADDRESS_MEM_MASK;
4664 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004665 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4666 t4_write_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304667 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
4668 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004669 t4_write_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304670 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004671 adap->vres.ocq.start);
4672 t4_read_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304673 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004674 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004675}
4676
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004677static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4678{
4679 u32 v;
4680 int ret;
4681
4682 /* get device capabilities */
4683 memset(c, 0, sizeof(*c));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304684 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4685 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304686 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004687 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004688 if (ret < 0)
4689 return ret;
4690
4691 /* select capabilities we'll be using */
4692 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4693 if (!vf_acls)
4694 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4695 else
4696 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4697 } else if (vf_acls) {
4698 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4699 return ret;
4700 }
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304701 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4702 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004703 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004704 if (ret < 0)
4705 return ret;
4706
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004707 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004708 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05304709 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4710 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004711 if (ret < 0)
4712 return ret;
4713
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004714 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4715 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004716 if (ret < 0)
4717 return ret;
4718
4719 t4_sge_init(adap);
4720
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004721 /* tweak some settings */
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304722 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304723 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304724 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4725 v = t4_read_reg(adap, TP_PIO_DATA_A);
4726 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004727
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004728 /* first 4 Tx modulation queues point to consecutive Tx channels */
4729 adap->params.tp.tx_modq_map = 0xE4;
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304730 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4731 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004732
4733 /* associate each Tx modulation queue with consecutive Tx channels */
4734 v = 0x84218421;
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304735 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304736 &v, 1, TP_TX_SCHED_HDR_A);
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304737 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304738 &v, 1, TP_TX_SCHED_FIFO_A);
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304739 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304740 &v, 1, TP_TX_SCHED_PCMD_A);
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004741
4742#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4743 if (is_offload(adap)) {
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304744 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4745 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4746 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4747 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4748 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4749 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4750 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4751 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4752 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4753 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004754 }
4755
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004756 /* get basic stuff going */
4757 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004758}
4759
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004760/*
4761 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4762 */
4763#define MAX_ATIDS 8192U
4764
4765/*
4766 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Vipul Pandya636f9d32012-09-26 02:39:39 +00004767 *
4768 * If the firmware we're dealing with has Configuration File support, then
4769 * we use that to perform all configuration
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004770 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00004771
4772/*
4773 * Tweak configuration based on module parameters, etc. Most of these have
4774 * defaults assigned to them by Firmware Configuration Files (if we're using
4775 * them) but need to be explicitly set if we're using hard-coded
4776 * initialization. But even in the case of using Firmware Configuration
4777 * Files, we'd like to expose the ability to change these via module
4778 * parameters so these are essentially common tweaks/settings for
4779 * Configuration Files and hard-coded initialization ...
4780 */
4781static int adap_init0_tweaks(struct adapter *adapter)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004782{
Vipul Pandya636f9d32012-09-26 02:39:39 +00004783 /*
4784 * Fix up various Host-Dependent Parameters like Page Size, Cache
4785 * Line Size, etc. The firmware default is for a 4KB Page Size and
4786 * 64B Cache Line Size ...
4787 */
4788 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004789
Vipul Pandya636f9d32012-09-26 02:39:39 +00004790 /*
4791 * Process module parameters which affect early initialization.
4792 */
4793 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4794 dev_err(&adapter->pdev->dev,
4795 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4796 rx_dma_offset);
4797 rx_dma_offset = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004798 }
Hariprasad Shenaif612b812015-01-05 16:30:43 +05304799 t4_set_reg_field(adapter, SGE_CONTROL_A,
4800 PKTSHIFT_V(PKTSHIFT_M),
4801 PKTSHIFT_V(rx_dma_offset));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004802
Vipul Pandya636f9d32012-09-26 02:39:39 +00004803 /*
4804 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4805 * adds the pseudo header itself.
4806 */
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304807 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4808 CSUM_HAS_PSEUDO_HDR_F, 0);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004809
4810 return 0;
4811}
4812
4813/*
4814 * Attempt to initialize the adapter via a Firmware Configuration File.
4815 */
4816static int adap_init0_config(struct adapter *adapter, int reset)
4817{
4818 struct fw_caps_config_cmd caps_cmd;
4819 const struct firmware *cf;
4820 unsigned long mtype = 0, maddr = 0;
4821 u32 finiver, finicsum, cfcsum;
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304822 int ret;
4823 int config_issued = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004824 char *fw_config_file, fw_config_file_path[256];
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304825 char *config_name = NULL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004826
4827 /*
4828 * Reset device if necessary.
4829 */
4830 if (reset) {
4831 ret = t4_fw_reset(adapter, adapter->mbox,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304832 PIORSTMODE_F | PIORST_F);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004833 if (ret < 0)
4834 goto bye;
4835 }
4836
4837 /*
4838 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4839 * then use that. Otherwise, use the configuration file stored
4840 * in the adapter flash ...
4841 */
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304842 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004843 case CHELSIO_T4:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304844 fw_config_file = FW4_CFNAME;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004845 break;
4846 case CHELSIO_T5:
4847 fw_config_file = FW5_CFNAME;
4848 break;
4849 default:
4850 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4851 adapter->pdev->device);
4852 ret = -EINVAL;
4853 goto bye;
4854 }
4855
4856 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004857 if (ret < 0) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304858 config_name = "On FLASH";
Vipul Pandya636f9d32012-09-26 02:39:39 +00004859 mtype = FW_MEMTYPE_CF_FLASH;
4860 maddr = t4_flash_cfg_addr(adapter);
4861 } else {
4862 u32 params[7], val[7];
4863
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304864 sprintf(fw_config_file_path,
4865 "/lib/firmware/%s", fw_config_file);
4866 config_name = fw_config_file_path;
4867
Vipul Pandya636f9d32012-09-26 02:39:39 +00004868 if (cf->size >= FLASH_CFG_MAX_SIZE)
4869 ret = -ENOMEM;
4870 else {
Hariprasad Shenai51678652014-11-21 12:52:02 +05304871 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4872 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004873 ret = t4_query_params(adapter, adapter->mbox,
4874 adapter->fn, 0, 1, params, val);
4875 if (ret == 0) {
4876 /*
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304877 * For t4_memory_rw() below addresses and
Vipul Pandya636f9d32012-09-26 02:39:39 +00004878 * sizes have to be in terms of multiples of 4
4879 * bytes. So, if the Configuration File isn't
4880 * a multiple of 4 bytes in length we'll have
4881 * to write that out separately since we can't
4882 * guarantee that the bytes following the
4883 * residual byte in the buffer returned by
4884 * request_firmware() are zeroed out ...
4885 */
4886 size_t resid = cf->size & 0x3;
4887 size_t size = cf->size & ~0x3;
4888 __be32 *data = (__be32 *)cf->data;
4889
Hariprasad Shenai51678652014-11-21 12:52:02 +05304890 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4891 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004892
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304893 spin_lock(&adapter->win0_lock);
4894 ret = t4_memory_rw(adapter, 0, mtype, maddr,
4895 size, data, T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004896 if (ret == 0 && resid != 0) {
4897 union {
4898 __be32 word;
4899 char buf[4];
4900 } last;
4901 int i;
4902
4903 last.word = data[size >> 2];
4904 for (i = resid; i < 4; i++)
4905 last.buf[i] = 0;
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304906 ret = t4_memory_rw(adapter, 0, mtype,
4907 maddr + size,
4908 4, &last.word,
4909 T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004910 }
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304911 spin_unlock(&adapter->win0_lock);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004912 }
4913 }
4914
4915 release_firmware(cf);
4916 if (ret)
4917 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004918 }
4919
Vipul Pandya636f9d32012-09-26 02:39:39 +00004920 /*
4921 * Issue a Capability Configuration command to the firmware to get it
4922 * to parse the Configuration File. We don't use t4_fw_config_file()
4923 * because we want the ability to modify various features after we've
4924 * processed the configuration file ...
4925 */
4926 memset(&caps_cmd, 0, sizeof(caps_cmd));
4927 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304928 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4929 FW_CMD_REQUEST_F |
4930 FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304931 caps_cmd.cfvalid_to_len16 =
Hariprasad Shenai51678652014-11-21 12:52:02 +05304932 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4933 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4934 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00004935 FW_LEN16(caps_cmd));
4936 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4937 &caps_cmd);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304938
4939 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4940 * Configuration File in FLASH), our last gasp effort is to use the
4941 * Firmware Configuration File which is embedded in the firmware. A
4942 * very few early versions of the firmware didn't have one embedded
4943 * but we can ignore those.
4944 */
4945 if (ret == -ENOENT) {
4946 memset(&caps_cmd, 0, sizeof(caps_cmd));
4947 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304948 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4949 FW_CMD_REQUEST_F |
4950 FW_CMD_READ_F);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304951 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4952 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4953 sizeof(caps_cmd), &caps_cmd);
4954 config_name = "Firmware Default";
4955 }
4956
4957 config_issued = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004958 if (ret < 0)
4959 goto bye;
4960
Vipul Pandya636f9d32012-09-26 02:39:39 +00004961 finiver = ntohl(caps_cmd.finiver);
4962 finicsum = ntohl(caps_cmd.finicsum);
4963 cfcsum = ntohl(caps_cmd.cfcsum);
4964 if (finicsum != cfcsum)
4965 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4966 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4967 finicsum, cfcsum);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004968
Vipul Pandya636f9d32012-09-26 02:39:39 +00004969 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00004970 * And now tell the firmware to use the configuration we just loaded.
4971 */
4972 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304973 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4974 FW_CMD_REQUEST_F |
4975 FW_CMD_WRITE_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304976 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004977 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4978 NULL);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004979 if (ret < 0)
4980 goto bye;
4981
Vipul Pandya636f9d32012-09-26 02:39:39 +00004982 /*
4983 * Tweak configuration based on system architecture, module
4984 * parameters, etc.
4985 */
4986 ret = adap_init0_tweaks(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004987 if (ret < 0)
4988 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004989
Vipul Pandya636f9d32012-09-26 02:39:39 +00004990 /*
4991 * And finally tell the firmware to initialize itself using the
4992 * parameters from the Configuration File.
4993 */
4994 ret = t4_fw_initialize(adapter, adapter->mbox);
4995 if (ret < 0)
4996 goto bye;
4997
Hariprasad Shenai06640312015-01-13 15:19:25 +05304998 /* Emit Firmware Configuration File information and return
4999 * successfully.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005000 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00005001 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305002 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5003 config_name, finiver, cfcsum);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005004 return 0;
5005
5006 /*
5007 * Something bad happened. Return the error ... (If the "error"
5008 * is that there's no Configuration File on the adapter we don't
5009 * want to issue a warning since this is fairly common.)
5010 */
5011bye:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305012 if (config_issued && ret != -ENOENT)
5013 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5014 config_name, -ret);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005015 return ret;
5016}
5017
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305018static struct fw_info fw_info_array[] = {
5019 {
5020 .chip = CHELSIO_T4,
5021 .fs_name = FW4_CFNAME,
5022 .fw_mod_name = FW4_FNAME,
5023 .fw_hdr = {
5024 .chip = FW_HDR_CHIP_T4,
5025 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5026 .intfver_nic = FW_INTFVER(T4, NIC),
5027 .intfver_vnic = FW_INTFVER(T4, VNIC),
5028 .intfver_ri = FW_INTFVER(T4, RI),
5029 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5030 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5031 },
5032 }, {
5033 .chip = CHELSIO_T5,
5034 .fs_name = FW5_CFNAME,
5035 .fw_mod_name = FW5_FNAME,
5036 .fw_hdr = {
5037 .chip = FW_HDR_CHIP_T5,
5038 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5039 .intfver_nic = FW_INTFVER(T5, NIC),
5040 .intfver_vnic = FW_INTFVER(T5, VNIC),
5041 .intfver_ri = FW_INTFVER(T5, RI),
5042 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5043 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5044 },
5045 }
5046};
5047
5048static struct fw_info *find_fw_info(int chip)
5049{
5050 int i;
5051
5052 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5053 if (fw_info_array[i].chip == chip)
5054 return &fw_info_array[i];
5055 }
5056 return NULL;
5057}
5058
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005059/*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005060 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005061 */
5062static int adap_init0(struct adapter *adap)
5063{
5064 int ret;
5065 u32 v, port_vec;
5066 enum dev_state state;
5067 u32 params[7], val[7];
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005068 struct fw_caps_config_cmd caps_cmd;
Hariprasad Shenai49aa2842015-01-07 08:48:00 +05305069 struct fw_devlog_cmd devlog_cmd;
5070 u32 devlog_meminfo;
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05305071 int reset = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005072
Hariprasad Shenai666224d2014-12-11 11:11:43 +05305073 /* Contact FW, advertising Master capability */
5074 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005075 if (ret < 0) {
5076 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5077 ret);
5078 return ret;
5079 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005080 if (ret == adap->mbox)
5081 adap->flags |= MASTER_PF;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005082
Vipul Pandya636f9d32012-09-26 02:39:39 +00005083 /*
5084 * If we're the Master PF Driver and the device is uninitialized,
5085 * then let's consider upgrading the firmware ... (We always want
5086 * to check the firmware version number in order to A. get it for
5087 * later reporting and B. to warn if the currently loaded firmware
5088 * is excessively mismatched relative to the driver.)
5089 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305090 t4_get_fw_version(adap, &adap->params.fw_vers);
5091 t4_get_tp_version(adap, &adap->params.tp_vers);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005092 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305093 struct fw_info *fw_info;
5094 struct fw_hdr *card_fw;
5095 const struct firmware *fw;
5096 const u8 *fw_data = NULL;
5097 unsigned int fw_size = 0;
5098
5099 /* This is the firmware whose headers the driver was compiled
5100 * against
5101 */
5102 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5103 if (fw_info == NULL) {
5104 dev_err(adap->pdev_dev,
5105 "unable to get firmware info for chip %d.\n",
5106 CHELSIO_CHIP_VERSION(adap->params.chip));
5107 return -EINVAL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005108 }
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305109
5110 /* allocate memory to read the header of the firmware on the
5111 * card
5112 */
5113 card_fw = t4_alloc_mem(sizeof(*card_fw));
5114
5115 /* Get FW from from /lib/firmware/ */
5116 ret = request_firmware(&fw, fw_info->fw_mod_name,
5117 adap->pdev_dev);
5118 if (ret < 0) {
5119 dev_err(adap->pdev_dev,
5120 "unable to load firmware image %s, error %d\n",
5121 fw_info->fw_mod_name, ret);
5122 } else {
5123 fw_data = fw->data;
5124 fw_size = fw->size;
5125 }
5126
5127 /* upgrade FW logic */
5128 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5129 state, &reset);
5130
5131 /* Cleaning up */
5132 if (fw != NULL)
5133 release_firmware(fw);
5134 t4_free_mem(card_fw);
5135
Vipul Pandya636f9d32012-09-26 02:39:39 +00005136 if (ret < 0)
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305137 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005138 }
5139
5140 /*
5141 * Grab VPD parameters. This should be done after we establish a
5142 * connection to the firmware since some of the VPD parameters
5143 * (notably the Core Clock frequency) are retrieved via requests to
5144 * the firmware. On the other hand, we need these fairly early on
5145 * so we do this right after getting ahold of the firmware.
5146 */
5147 ret = get_vpd_params(adap, &adap->params.vpd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005148 if (ret < 0)
5149 goto bye;
5150
Hariprasad Shenai49aa2842015-01-07 08:48:00 +05305151 /* Read firmware device log parameters. We really need to find a way
5152 * to get these parameters initialized with some default values (which
5153 * are likely to be correct) for the case where we either don't
5154 * attache to the firmware or it's crashed when we probe the adapter.
5155 * That way we'll still be able to perform early firmware startup
5156 * debugging ... If the request to get the Firmware's Device Log
5157 * parameters fails, we'll live so we don't make that a fatal error.
5158 */
5159 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5160 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5161 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5162 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5163 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5164 &devlog_cmd);
5165 if (ret == 0) {
5166 devlog_meminfo =
5167 ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5168 adap->params.devlog.memtype =
5169 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5170 adap->params.devlog.start =
5171 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5172 adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
5173 }
5174
Vipul Pandya636f9d32012-09-26 02:39:39 +00005175 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005176 * Find out what ports are available to us. Note that we need to do
5177 * this before calling adap_init0_no_config() since it needs nports
5178 * and portvec ...
Vipul Pandya636f9d32012-09-26 02:39:39 +00005179 */
5180 v =
Hariprasad Shenai51678652014-11-21 12:52:02 +05305181 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5182 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005183 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5184 if (ret < 0)
5185 goto bye;
5186
5187 adap->params.nports = hweight32(port_vec);
5188 adap->params.portvec = port_vec;
5189
Hariprasad Shenai06640312015-01-13 15:19:25 +05305190 /* If the firmware is initialized already, emit a simply note to that
5191 * effect. Otherwise, it's time to try initializing the adapter.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005192 */
5193 if (state == DEV_STATE_INIT) {
5194 dev_info(adap->pdev_dev, "Coming up as %s: "\
5195 "Adapter already initialized\n",
5196 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
Vipul Pandya636f9d32012-09-26 02:39:39 +00005197 } else {
5198 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5199 "Initializing adapter\n");
Hariprasad Shenai06640312015-01-13 15:19:25 +05305200
5201 /* Find out whether we're dealing with a version of the
5202 * firmware which has configuration file support.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005203 */
Hariprasad Shenai06640312015-01-13 15:19:25 +05305204 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5205 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
5206 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5207 params, val);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005208
Hariprasad Shenai06640312015-01-13 15:19:25 +05305209 /* If the firmware doesn't support Configuration Files,
5210 * return an error.
5211 */
5212 if (ret < 0) {
5213 dev_err(adap->pdev_dev, "firmware doesn't support "
5214 "Firmware Configuration Files\n");
5215 goto bye;
5216 }
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005217
Hariprasad Shenai06640312015-01-13 15:19:25 +05305218 /* The firmware provides us with a memory buffer where we can
5219 * load a Configuration File from the host if we want to
5220 * override the Configuration File in flash.
5221 */
5222 ret = adap_init0_config(adap, reset);
5223 if (ret == -ENOENT) {
5224 dev_err(adap->pdev_dev, "no Configuration File "
5225 "present on adapter.\n");
5226 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005227 }
5228 if (ret < 0) {
Hariprasad Shenai06640312015-01-13 15:19:25 +05305229 dev_err(adap->pdev_dev, "could not initialize "
5230 "adapter, error %d\n", -ret);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005231 goto bye;
5232 }
5233 }
5234
Hariprasad Shenai06640312015-01-13 15:19:25 +05305235 /* Give the SGE code a chance to pull in anything that it needs ...
5236 * Note that this must be called after we retrieve our VPD parameters
5237 * in order to know how to convert core ticks to seconds, etc.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005238 */
Hariprasad Shenai06640312015-01-13 15:19:25 +05305239 ret = t4_sge_init(adap);
5240 if (ret < 0)
5241 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005242
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005243 if (is_bypass_device(adap->pdev->device))
5244 adap->params.bypass = 1;
5245
Vipul Pandya636f9d32012-09-26 02:39:39 +00005246 /*
5247 * Grab some of our basic fundamental operating parameters.
5248 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005249#define FW_PARAM_DEV(param) \
Hariprasad Shenai51678652014-11-21 12:52:02 +05305250 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
5251 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005252
5253#define FW_PARAM_PFVF(param) \
Hariprasad Shenai51678652014-11-21 12:52:02 +05305254 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
5255 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
5256 FW_PARAMS_PARAM_Y_V(0) | \
5257 FW_PARAMS_PARAM_Z_V(0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005258
Vipul Pandya636f9d32012-09-26 02:39:39 +00005259 params[0] = FW_PARAM_PFVF(EQ_START);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005260 params[1] = FW_PARAM_PFVF(L2T_START);
5261 params[2] = FW_PARAM_PFVF(L2T_END);
5262 params[3] = FW_PARAM_PFVF(FILTER_START);
5263 params[4] = FW_PARAM_PFVF(FILTER_END);
5264 params[5] = FW_PARAM_PFVF(IQFLINT_START);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005265 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005266 if (ret < 0)
5267 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005268 adap->sge.egr_start = val[0];
5269 adap->l2t_start = val[1];
5270 adap->l2t_end = val[2];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005271 adap->tids.ftid_base = val[3];
5272 adap->tids.nftids = val[4] - val[3] + 1;
5273 adap->sge.ingr_start = val[5];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005274
Anish Bhattb5a02f52015-01-14 15:17:34 -08005275 params[0] = FW_PARAM_PFVF(CLIP_START);
5276 params[1] = FW_PARAM_PFVF(CLIP_END);
5277 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5278 if (ret < 0)
5279 goto bye;
5280 adap->clipt_start = val[0];
5281 adap->clipt_end = val[1];
5282
Vipul Pandya636f9d32012-09-26 02:39:39 +00005283 /* query params related to active filter region */
5284 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5285 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5286 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5287 /* If Active filter size is set we enable establishing
5288 * offload connection through firmware work request
5289 */
5290 if ((val[0] != val[1]) && (ret >= 0)) {
5291 adap->flags |= FW_OFLD_CONN;
5292 adap->tids.aftid_base = val[0];
5293 adap->tids.aftid_end = val[1];
5294 }
5295
Vipul Pandyab407a4a2013-04-29 04:04:40 +00005296 /* If we're running on newer firmware, let it know that we're
5297 * prepared to deal with encapsulated CPL messages. Older
5298 * firmware won't understand this and we'll just get
5299 * unencapsulated messages ...
5300 */
5301 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5302 val[0] = 1;
5303 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5304
Vipul Pandya636f9d32012-09-26 02:39:39 +00005305 /*
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05305306 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5307 * capability. Earlier versions of the firmware didn't have the
5308 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5309 * permission to use ULPTX MEMWRITE DSGL.
5310 */
5311 if (is_t4(adap->params.chip)) {
5312 adap->params.ulptx_memwrite_dsgl = false;
5313 } else {
5314 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5315 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5316 1, params, val);
5317 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5318 }
5319
5320 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005321 * Get device capabilities so we can determine what resources we need
5322 * to manage.
5323 */
5324 memset(&caps_cmd, 0, sizeof(caps_cmd));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305325 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5326 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305327 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005328 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5329 &caps_cmd);
5330 if (ret < 0)
5331 goto bye;
5332
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005333 if (caps_cmd.ofldcaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005334 /* query offload-related parameters */
5335 params[0] = FW_PARAM_DEV(NTID);
5336 params[1] = FW_PARAM_PFVF(SERVER_START);
5337 params[2] = FW_PARAM_PFVF(SERVER_END);
5338 params[3] = FW_PARAM_PFVF(TDDP_START);
5339 params[4] = FW_PARAM_PFVF(TDDP_END);
5340 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005341 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5342 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005343 if (ret < 0)
5344 goto bye;
5345 adap->tids.ntids = val[0];
5346 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5347 adap->tids.stid_base = val[1];
5348 adap->tids.nstids = val[2] - val[1] + 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005349 /*
5350 * Setup server filter region. Divide the availble filter
5351 * region into two parts. Regular filters get 1/3rd and server
5352 * filters get 2/3rd part. This is only enabled if workarond
5353 * path is enabled.
5354 * 1. For regular filters.
5355 * 2. Server filter: This are special filters which are used
5356 * to redirect SYN packets to offload queue.
5357 */
5358 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5359 adap->tids.sftid_base = adap->tids.ftid_base +
5360 DIV_ROUND_UP(adap->tids.nftids, 3);
5361 adap->tids.nsftids = adap->tids.nftids -
5362 DIV_ROUND_UP(adap->tids.nftids, 3);
5363 adap->tids.nftids = adap->tids.sftid_base -
5364 adap->tids.ftid_base;
5365 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005366 adap->vres.ddp.start = val[3];
5367 adap->vres.ddp.size = val[4] - val[3] + 1;
5368 adap->params.ofldq_wr_cred = val[5];
Vipul Pandya636f9d32012-09-26 02:39:39 +00005369
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005370 adap->params.offload = 1;
5371 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005372 if (caps_cmd.rdmacaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005373 params[0] = FW_PARAM_PFVF(STAG_START);
5374 params[1] = FW_PARAM_PFVF(STAG_END);
5375 params[2] = FW_PARAM_PFVF(RQ_START);
5376 params[3] = FW_PARAM_PFVF(RQ_END);
5377 params[4] = FW_PARAM_PFVF(PBL_START);
5378 params[5] = FW_PARAM_PFVF(PBL_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005379 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5380 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005381 if (ret < 0)
5382 goto bye;
5383 adap->vres.stag.start = val[0];
5384 adap->vres.stag.size = val[1] - val[0] + 1;
5385 adap->vres.rq.start = val[2];
5386 adap->vres.rq.size = val[3] - val[2] + 1;
5387 adap->vres.pbl.start = val[4];
5388 adap->vres.pbl.size = val[5] - val[4] + 1;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005389
5390 params[0] = FW_PARAM_PFVF(SQRQ_START);
5391 params[1] = FW_PARAM_PFVF(SQRQ_END);
5392 params[2] = FW_PARAM_PFVF(CQ_START);
5393 params[3] = FW_PARAM_PFVF(CQ_END);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005394 params[4] = FW_PARAM_PFVF(OCQ_START);
5395 params[5] = FW_PARAM_PFVF(OCQ_END);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05305396 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5397 val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005398 if (ret < 0)
5399 goto bye;
5400 adap->vres.qp.start = val[0];
5401 adap->vres.qp.size = val[1] - val[0] + 1;
5402 adap->vres.cq.start = val[2];
5403 adap->vres.cq.size = val[3] - val[2] + 1;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005404 adap->vres.ocq.start = val[4];
5405 adap->vres.ocq.size = val[5] - val[4] + 1;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05305406
5407 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5408 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05305409 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5410 val);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05305411 if (ret < 0) {
5412 adap->params.max_ordird_qp = 8;
5413 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5414 ret = 0;
5415 } else {
5416 adap->params.max_ordird_qp = val[0];
5417 adap->params.max_ird_adapter = val[1];
5418 }
5419 dev_info(adap->pdev_dev,
5420 "max_ordird_qp %d max_ird_adapter %d\n",
5421 adap->params.max_ordird_qp,
5422 adap->params.max_ird_adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005423 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005424 if (caps_cmd.iscsicaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005425 params[0] = FW_PARAM_PFVF(ISCSI_START);
5426 params[1] = FW_PARAM_PFVF(ISCSI_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005427 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5428 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005429 if (ret < 0)
5430 goto bye;
5431 adap->vres.iscsi.start = val[0];
5432 adap->vres.iscsi.size = val[1] - val[0] + 1;
5433 }
5434#undef FW_PARAM_PFVF
5435#undef FW_PARAM_DEV
5436
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305437 /* The MTU/MSS Table is initialized by now, so load their values. If
5438 * we're initializing the adapter, then we'll make any modifications
5439 * we want to the MTU/MSS Table and also initialize the congestion
5440 * parameters.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005441 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005442 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305443 if (state != DEV_STATE_INIT) {
5444 int i;
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005445
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305446 /* The default MTU Table contains values 1492 and 1500.
5447 * However, for TCP, it's better to have two values which are
5448 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5449 * This allows us to have a TCP Data Payload which is a
5450 * multiple of 8 regardless of what combination of TCP Options
5451 * are in use (always a multiple of 4 bytes) which is
5452 * important for performance reasons. For instance, if no
5453 * options are in use, then we have a 20-byte IP header and a
5454 * 20-byte TCP header. In this case, a 1500-byte MSS would
5455 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5456 * which is not a multiple of 8. So using an MSS of 1488 in
5457 * this case results in a TCP Data Payload of 1448 bytes which
5458 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5459 * Stamps have been negotiated, then an MTU of 1500 bytes
5460 * results in a TCP Data Payload of 1448 bytes which, as
5461 * above, is a multiple of 8 bytes ...
5462 */
5463 for (i = 0; i < NMTUS; i++)
5464 if (adap->params.mtus[i] == 1492) {
5465 adap->params.mtus[i] = 1488;
5466 break;
5467 }
5468
5469 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5470 adap->params.b_wnd);
5471 }
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05305472 t4_init_sge_params(adap);
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05305473 t4_init_tp_params(adap);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005474 adap->flags |= FW_OK;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005475 return 0;
5476
5477 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005478 * Something bad happened. If a command timed out or failed with EIO
5479 * FW does not operate within its spec or something catastrophic
5480 * happened to HW/FW, stop issuing commands.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005481 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00005482bye:
5483 if (ret != -ETIMEDOUT && ret != -EIO)
5484 t4_fw_bye(adap, adap->mbox);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005485 return ret;
5486}
5487
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005488/* EEH callbacks */
5489
5490static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5491 pci_channel_state_t state)
5492{
5493 int i;
5494 struct adapter *adap = pci_get_drvdata(pdev);
5495
5496 if (!adap)
5497 goto out;
5498
5499 rtnl_lock();
5500 adap->flags &= ~FW_OK;
5501 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08005502 spin_lock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005503 for_each_port(adap, i) {
5504 struct net_device *dev = adap->port[i];
5505
5506 netif_device_detach(dev);
5507 netif_carrier_off(dev);
5508 }
Gavin Shan9fe6cb52014-01-23 12:27:35 +08005509 spin_unlock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005510 if (adap->flags & FULL_INIT_DONE)
5511 cxgb_down(adap);
5512 rtnl_unlock();
Gavin Shan144be3d2014-01-23 12:27:34 +08005513 if ((adap->flags & DEV_ENABLED)) {
5514 pci_disable_device(pdev);
5515 adap->flags &= ~DEV_ENABLED;
5516 }
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005517out: return state == pci_channel_io_perm_failure ?
5518 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5519}
5520
5521static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5522{
5523 int i, ret;
5524 struct fw_caps_config_cmd c;
5525 struct adapter *adap = pci_get_drvdata(pdev);
5526
5527 if (!adap) {
5528 pci_restore_state(pdev);
5529 pci_save_state(pdev);
5530 return PCI_ERS_RESULT_RECOVERED;
5531 }
5532
Gavin Shan144be3d2014-01-23 12:27:34 +08005533 if (!(adap->flags & DEV_ENABLED)) {
5534 if (pci_enable_device(pdev)) {
5535 dev_err(&pdev->dev, "Cannot reenable PCI "
5536 "device after reset\n");
5537 return PCI_ERS_RESULT_DISCONNECT;
5538 }
5539 adap->flags |= DEV_ENABLED;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005540 }
5541
5542 pci_set_master(pdev);
5543 pci_restore_state(pdev);
5544 pci_save_state(pdev);
5545 pci_cleanup_aer_uncorrect_error_status(pdev);
5546
Hariprasad Shenai8203b502014-10-09 05:48:47 +05305547 if (t4_wait_dev_ready(adap->regs) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005548 return PCI_ERS_RESULT_DISCONNECT;
Thadeu Lima de Souza Cascardo777c2302013-05-03 08:11:04 +00005549 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005550 return PCI_ERS_RESULT_DISCONNECT;
5551 adap->flags |= FW_OK;
5552 if (adap_init1(adap, &c))
5553 return PCI_ERS_RESULT_DISCONNECT;
5554
5555 for_each_port(adap, i) {
5556 struct port_info *p = adap2pinfo(adap, i);
5557
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005558 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5559 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005560 if (ret < 0)
5561 return PCI_ERS_RESULT_DISCONNECT;
5562 p->viid = ret;
5563 p->xact_addr_filt = -1;
5564 }
5565
5566 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5567 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005568 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005569 if (cxgb_up(adap))
5570 return PCI_ERS_RESULT_DISCONNECT;
5571 return PCI_ERS_RESULT_RECOVERED;
5572}
5573
5574static void eeh_resume(struct pci_dev *pdev)
5575{
5576 int i;
5577 struct adapter *adap = pci_get_drvdata(pdev);
5578
5579 if (!adap)
5580 return;
5581
5582 rtnl_lock();
5583 for_each_port(adap, i) {
5584 struct net_device *dev = adap->port[i];
5585
5586 if (netif_running(dev)) {
5587 link_start(dev);
5588 cxgb_set_rxmode(dev);
5589 }
5590 netif_device_attach(dev);
5591 }
5592 rtnl_unlock();
5593}
5594
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005595static const struct pci_error_handlers cxgb4_eeh = {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005596 .error_detected = eeh_err_detected,
5597 .slot_reset = eeh_slot_reset,
5598 .resume = eeh_resume,
5599};
5600
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305601static inline bool is_x_10g_port(const struct link_config *lc)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005602{
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305603 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5604 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005605}
5606
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305607static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5608 unsigned int us, unsigned int cnt,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005609 unsigned int size, unsigned int iqe_size)
5610{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305611 q->adap = adap;
5612 set_rspq_intr_params(q, us, cnt);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005613 q->iqe_len = iqe_size;
5614 q->size = size;
5615}
5616
5617/*
5618 * Perform default configuration of DMA queues depending on the number and type
5619 * of ports we found and the number of available CPUs. Most settings can be
5620 * modified by the admin prior to actual use.
5621 */
Bill Pemberton91744942012-12-03 09:23:02 -05005622static void cfg_queues(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005623{
5624 struct sge *s = &adap->sge;
Anish Bhatt688848b2014-06-19 21:37:13 -07005625 int i, n10g = 0, qidx = 0;
5626#ifndef CONFIG_CHELSIO_T4_DCB
5627 int q10g = 0;
5628#endif
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305629 int ciq_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005630
5631 for_each_port(adap, i)
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305632 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
Anish Bhatt688848b2014-06-19 21:37:13 -07005633#ifdef CONFIG_CHELSIO_T4_DCB
5634 /* For Data Center Bridging support we need to be able to support up
5635 * to 8 Traffic Priorities; each of which will be assigned to its
5636 * own TX Queue in order to prevent Head-Of-Line Blocking.
5637 */
5638 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
5639 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
5640 MAX_ETH_QSETS, adap->params.nports * 8);
5641 BUG_ON(1);
5642 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005643
Anish Bhatt688848b2014-06-19 21:37:13 -07005644 for_each_port(adap, i) {
5645 struct port_info *pi = adap2pinfo(adap, i);
5646
5647 pi->first_qset = qidx;
5648 pi->nqsets = 8;
5649 qidx += pi->nqsets;
5650 }
5651#else /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005652 /*
5653 * We default to 1 queue per non-10G port and up to # of cores queues
5654 * per 10G port.
5655 */
5656 if (n10g)
5657 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
Yuval Mintz5952dde2012-07-01 03:18:55 +00005658 if (q10g > netif_get_num_default_rss_queues())
5659 q10g = netif_get_num_default_rss_queues();
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005660
5661 for_each_port(adap, i) {
5662 struct port_info *pi = adap2pinfo(adap, i);
5663
5664 pi->first_qset = qidx;
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305665 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005666 qidx += pi->nqsets;
5667 }
Anish Bhatt688848b2014-06-19 21:37:13 -07005668#endif /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005669
5670 s->ethqsets = qidx;
5671 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5672
5673 if (is_offload(adap)) {
5674 /*
5675 * For offload we use 1 queue/channel if all ports are up to 1G,
5676 * otherwise we divide all available queues amongst the channels
5677 * capped by the number of available cores.
5678 */
5679 if (n10g) {
5680 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5681 num_online_cpus());
5682 s->ofldqsets = roundup(i, adap->params.nports);
5683 } else
5684 s->ofldqsets = adap->params.nports;
5685 /* For RDMA one Rx queue per channel suffices */
5686 s->rdmaqs = adap->params.nports;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305687 s->rdmaciqs = adap->params.nports;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005688 }
5689
5690 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5691 struct sge_eth_rxq *r = &s->ethrxq[i];
5692
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305693 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005694 r->fl.size = 72;
5695 }
5696
5697 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5698 s->ethtxq[i].q.size = 1024;
5699
5700 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5701 s->ctrlq[i].q.size = 512;
5702
5703 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5704 s->ofldtxq[i].q.size = 1024;
5705
5706 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5707 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5708
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305709 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005710 r->rspq.uld = CXGB4_ULD_ISCSI;
5711 r->fl.size = 72;
5712 }
5713
5714 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5715 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5716
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305717 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005718 r->rspq.uld = CXGB4_ULD_RDMA;
5719 r->fl.size = 72;
5720 }
5721
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305722 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5723 if (ciq_size > SGE_MAX_IQ_SIZE) {
5724 CH_WARN(adap, "CIQ size too small for available IQs\n");
5725 ciq_size = SGE_MAX_IQ_SIZE;
5726 }
5727
5728 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5729 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5730
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305731 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305732 r->rspq.uld = CXGB4_ULD_RDMA;
5733 }
5734
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305735 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5736 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005737}
5738
5739/*
5740 * Reduce the number of Ethernet queues across all ports to at most n.
5741 * n provides at least one queue per port.
5742 */
Bill Pemberton91744942012-12-03 09:23:02 -05005743static void reduce_ethqs(struct adapter *adap, int n)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005744{
5745 int i;
5746 struct port_info *pi;
5747
5748 while (n < adap->sge.ethqsets)
5749 for_each_port(adap, i) {
5750 pi = adap2pinfo(adap, i);
5751 if (pi->nqsets > 1) {
5752 pi->nqsets--;
5753 adap->sge.ethqsets--;
5754 if (adap->sge.ethqsets <= n)
5755 break;
5756 }
5757 }
5758
5759 n = 0;
5760 for_each_port(adap, i) {
5761 pi = adap2pinfo(adap, i);
5762 pi->first_qset = n;
5763 n += pi->nqsets;
5764 }
5765}
5766
5767/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5768#define EXTRA_VECS 2
5769
Bill Pemberton91744942012-12-03 09:23:02 -05005770static int enable_msix(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005771{
5772 int ofld_need = 0;
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005773 int i, want, need;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005774 struct sge *s = &adap->sge;
5775 unsigned int nchan = adap->params.nports;
5776 struct msix_entry entries[MAX_INGQ + 1];
5777
5778 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5779 entries[i].entry = i;
5780
5781 want = s->max_ethqsets + EXTRA_VECS;
5782 if (is_offload(adap)) {
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305783 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005784 /* need nchan for each possible ULD */
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305785 ofld_need = 3 * nchan;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005786 }
Anish Bhatt688848b2014-06-19 21:37:13 -07005787#ifdef CONFIG_CHELSIO_T4_DCB
5788 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5789 * each port.
5790 */
5791 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
5792#else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005793 need = adap->params.nports + EXTRA_VECS + ofld_need;
Anish Bhatt688848b2014-06-19 21:37:13 -07005794#endif
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005795 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5796 if (want < 0)
5797 return want;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005798
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005799 /*
5800 * Distribute available vectors to the various queue groups.
5801 * Every group gets its minimum requirement and NIC gets top
5802 * priority for leftovers.
5803 */
5804 i = want - EXTRA_VECS - ofld_need;
5805 if (i < s->max_ethqsets) {
5806 s->max_ethqsets = i;
5807 if (i < s->ethqsets)
5808 reduce_ethqs(adap, i);
5809 }
5810 if (is_offload(adap)) {
5811 i = want - EXTRA_VECS - s->max_ethqsets;
5812 i -= ofld_need - nchan;
5813 s->ofldqsets = (i / nchan) * nchan; /* round down */
5814 }
5815 for (i = 0; i < want; ++i)
5816 adap->msix_info[i].vec = entries[i].vector;
5817
5818 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005819}
5820
5821#undef EXTRA_VECS
5822
Bill Pemberton91744942012-12-03 09:23:02 -05005823static int init_rss(struct adapter *adap)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005824{
5825 unsigned int i, j;
5826
5827 for_each_port(adap, i) {
5828 struct port_info *pi = adap2pinfo(adap, i);
5829
5830 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5831 if (!pi->rss)
5832 return -ENOMEM;
5833 for (j = 0; j < pi->rss_size; j++)
Ben Hutchings278bc422011-12-15 13:56:49 +00005834 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005835 }
5836 return 0;
5837}
5838
Bill Pemberton91744942012-12-03 09:23:02 -05005839static void print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005840{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005841 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005842 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00005843 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005844 const struct port_info *pi = netdev_priv(dev);
5845 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00005846
5847 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5848 spd = " 2.5 GT/s";
5849 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5850 spd = " 5 GT/s";
Roland Dreierd2e752d2014-04-28 17:36:20 -07005851 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5852 spd = " 8 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005853
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005854 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5855 bufp += sprintf(bufp, "100/");
5856 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5857 bufp += sprintf(bufp, "1000/");
5858 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5859 bufp += sprintf(bufp, "10G/");
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05305860 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5861 bufp += sprintf(bufp, "40G/");
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005862 if (bufp != buf)
5863 --bufp;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05305864 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005865
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005866 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005867 adap->params.vpd.id,
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05305868 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005869 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5870 (adap->flags & USING_MSIX) ? " MSI-X" :
5871 (adap->flags & USING_MSI) ? " MSI" : "");
Kumar Sanghvia94cd702014-02-18 17:56:09 +05305872 netdev_info(dev, "S/N: %s, P/N: %s\n",
5873 adap->params.vpd.sn, adap->params.vpd.pn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005874}
5875
Bill Pemberton91744942012-12-03 09:23:02 -05005876static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005877{
Jiang Liue5c8ae52012-08-20 13:53:19 -06005878 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005879}
5880
Dimitris Michailidis06546392010-07-11 12:01:16 +00005881/*
5882 * Free the following resources:
5883 * - memory used for tables
5884 * - MSI/MSI-X
5885 * - net devices
5886 * - resources FW is holding for us
5887 */
5888static void free_some_resources(struct adapter *adapter)
5889{
5890 unsigned int i;
5891
5892 t4_free_mem(adapter->l2t);
5893 t4_free_mem(adapter->tids.tid_tab);
5894 disable_msi(adapter);
5895
5896 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005897 if (adapter->port[i]) {
5898 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00005899 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005900 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00005901 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005902 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00005903}
5904
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00005905#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00005906#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005907 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005908#define SEGMENT_SIZE 128
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005909
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005910static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005911{
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005912 int func, i, err, s_qpp, qpp, num_seg;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005913 struct port_info *pi;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005914 bool highdma = false;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005915 struct adapter *adapter = NULL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305916 void __iomem *regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005917
5918 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5919
5920 err = pci_request_regions(pdev, KBUILD_MODNAME);
5921 if (err) {
5922 /* Just info, some other driver may have claimed the device. */
5923 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5924 return err;
5925 }
5926
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005927 err = pci_enable_device(pdev);
5928 if (err) {
5929 dev_err(&pdev->dev, "cannot enable PCI device\n");
5930 goto out_release_regions;
5931 }
5932
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305933 regs = pci_ioremap_bar(pdev, 0);
5934 if (!regs) {
5935 dev_err(&pdev->dev, "cannot map device registers\n");
5936 err = -ENOMEM;
5937 goto out_disable_device;
5938 }
5939
Hariprasad Shenai8203b502014-10-09 05:48:47 +05305940 err = t4_wait_dev_ready(regs);
5941 if (err < 0)
5942 goto out_unmap_bar0;
5943
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305944 /* We control everything through one PF */
Hariprasad Shenai0d804332015-01-05 16:30:47 +05305945 func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305946 if (func != ent->driver_data) {
5947 iounmap(regs);
5948 pci_disable_device(pdev);
5949 pci_save_state(pdev); /* to restore SR-IOV later */
5950 goto sriov;
5951 }
5952
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005953 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005954 highdma = true;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005955 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5956 if (err) {
5957 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5958 "coherent allocations\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305959 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005960 }
5961 } else {
5962 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5963 if (err) {
5964 dev_err(&pdev->dev, "no usable DMA configuration\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305965 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005966 }
5967 }
5968
5969 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005970 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005971 pci_set_master(pdev);
5972 pci_save_state(pdev);
5973
5974 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5975 if (!adapter) {
5976 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305977 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005978 }
5979
Anish Bhatt29aaee62014-08-20 13:44:06 -07005980 adapter->workq = create_singlethread_workqueue("cxgb4");
5981 if (!adapter->workq) {
5982 err = -ENOMEM;
5983 goto out_free_adapter;
5984 }
5985
Gavin Shan144be3d2014-01-23 12:27:34 +08005986 /* PCI device has been enabled */
5987 adapter->flags |= DEV_ENABLED;
5988
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05305989 adapter->regs = regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005990 adapter->pdev = pdev;
5991 adapter->pdev_dev = &pdev->dev;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05305992 adapter->mbox = func;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005993 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005994 adapter->msg_enable = dflt_msg_enable;
5995 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5996
5997 spin_lock_init(&adapter->stats_lock);
5998 spin_lock_init(&adapter->tid_release_lock);
Anish Bhatte327c222014-10-29 17:54:03 -07005999 spin_lock_init(&adapter->win0_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006000
6001 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
Vipul Pandya881806b2012-05-18 15:29:24 +05306002 INIT_WORK(&adapter->db_full_task, process_db_full);
6003 INIT_WORK(&adapter->db_drop_task, process_db_drop);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006004
6005 err = t4_prep_adapter(adapter);
6006 if (err)
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306007 goto out_free_adapter;
6008
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006009
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306010 if (!is_t4(adapter->params.chip)) {
Hariprasad Shenaif612b812015-01-05 16:30:43 +05306011 s_qpp = (QUEUESPERPAGEPF0_S +
6012 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6013 adapter->fn);
6014 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6015 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006016 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6017
6018 /* Each segment size is 128B. Write coalescing is enabled only
6019 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6020 * queue is less no of segments that can be accommodated in
6021 * a page size.
6022 */
6023 if (qpp > num_seg) {
6024 dev_err(&pdev->dev,
6025 "Incorrect number of egress queues per page\n");
6026 err = -EINVAL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306027 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006028 }
6029 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6030 pci_resource_len(pdev, 2));
6031 if (!adapter->bar2) {
6032 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6033 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306034 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006035 }
6036 }
6037
Vipul Pandya636f9d32012-09-26 02:39:39 +00006038 setup_memwin(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006039 err = adap_init0(adapter);
Vipul Pandya636f9d32012-09-26 02:39:39 +00006040 setup_memwin_rdma(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006041 if (err)
6042 goto out_unmap_bar;
6043
6044 for_each_port(adapter, i) {
6045 struct net_device *netdev;
6046
6047 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6048 MAX_ETH_QSETS);
6049 if (!netdev) {
6050 err = -ENOMEM;
6051 goto out_free_dev;
6052 }
6053
6054 SET_NETDEV_DEV(netdev, &pdev->dev);
6055
6056 adapter->port[i] = netdev;
6057 pi = netdev_priv(netdev);
6058 pi->adapter = adapter;
6059 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006060 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006061 netdev->irq = pdev->irq;
6062
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00006063 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6064 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6065 NETIF_F_RXCSUM | NETIF_F_RXHASH |
Patrick McHardyf6469682013-04-19 02:04:27 +00006066 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006067 if (highdma)
6068 netdev->hw_features |= NETIF_F_HIGHDMA;
6069 netdev->features |= netdev->hw_features;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006070 netdev->vlan_features = netdev->features & VLAN_FEAT;
6071
Jiri Pirko01789342011-08-16 06:29:00 +00006072 netdev->priv_flags |= IFF_UNICAST_FLT;
6073
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006074 netdev->netdev_ops = &cxgb4_netdev_ops;
Anish Bhatt688848b2014-06-19 21:37:13 -07006075#ifdef CONFIG_CHELSIO_T4_DCB
6076 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6077 cxgb4_dcb_state_init(netdev);
6078#endif
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00006079 netdev->ethtool_ops = &cxgb_ethtool_ops;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006080 }
6081
6082 pci_set_drvdata(pdev, adapter);
6083
6084 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006085 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006086 if (err)
6087 goto out_free_dev;
6088 }
6089
6090 /*
6091 * Configure queues and allocate tables now, they can be needed as
6092 * soon as the first register_netdev completes.
6093 */
6094 cfg_queues(adapter);
6095
6096 adapter->l2t = t4_init_l2t();
6097 if (!adapter->l2t) {
6098 /* We tolerate a lack of L2T, giving up some functionality */
6099 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6100 adapter->params.offload = 0;
6101 }
6102
Anish Bhattb5a02f52015-01-14 15:17:34 -08006103#if IS_ENABLED(CONFIG_IPV6)
6104 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6105 adapter->clipt_end);
6106 if (!adapter->clipt) {
6107 /* We tolerate a lack of clip_table, giving up
6108 * some functionality
6109 */
6110 dev_warn(&pdev->dev,
6111 "could not allocate Clip table, continuing\n");
6112 adapter->params.offload = 0;
6113 }
6114#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006115 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6116 dev_warn(&pdev->dev, "could not allocate TID table, "
6117 "continuing\n");
6118 adapter->params.offload = 0;
6119 }
6120
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00006121 /* See what interrupts we'll be using */
6122 if (msi > 1 && enable_msix(adapter) == 0)
6123 adapter->flags |= USING_MSIX;
6124 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6125 adapter->flags |= USING_MSI;
6126
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006127 err = init_rss(adapter);
6128 if (err)
6129 goto out_free_dev;
6130
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006131 /*
6132 * The card is now ready to go. If any errors occur during device
6133 * registration we do not fail the whole card but rather proceed only
6134 * with the ports we manage to register successfully. However we must
6135 * register at least one net device.
6136 */
6137 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00006138 pi = adap2pinfo(adapter, i);
6139 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6140 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6141
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006142 err = register_netdev(adapter->port[i]);
6143 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006144 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006145 adapter->chan_map[pi->tx_chan] = i;
6146 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006147 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006148 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006149 dev_err(&pdev->dev, "could not register any net devices\n");
6150 goto out_free_dev;
6151 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006152 if (err) {
6153 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6154 err = 0;
Joe Perches6403eab2011-06-03 11:51:20 +00006155 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006156
6157 if (cxgb4_debugfs_root) {
6158 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6159 cxgb4_debugfs_root);
6160 setup_debugfs(adapter);
6161 }
6162
David S. Miller88c51002011-10-07 13:38:43 -04006163 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6164 pdev->needs_freset = 1;
6165
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006166 if (is_offload(adapter))
6167 attach_ulds(adapter);
6168
Hariprasad Shenai8e1e6052014-08-06 17:10:59 +05306169sriov:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006170#ifdef CONFIG_PCI_IOV
Santosh Rastapur7d6727c2013-03-14 05:08:56 +00006171 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006172 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6173 dev_info(&pdev->dev,
6174 "instantiated %u virtual functions\n",
6175 num_vf[func]);
6176#endif
6177 return 0;
6178
6179 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00006180 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006181 out_unmap_bar:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306182 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006183 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006184 out_free_adapter:
Anish Bhatt29aaee62014-08-20 13:44:06 -07006185 if (adapter->workq)
6186 destroy_workqueue(adapter->workq);
6187
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006188 kfree(adapter);
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306189 out_unmap_bar0:
6190 iounmap(regs);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006191 out_disable_device:
6192 pci_disable_pcie_error_reporting(pdev);
6193 pci_disable_device(pdev);
6194 out_release_regions:
6195 pci_release_regions(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006196 return err;
6197}
6198
Bill Pemberton91744942012-12-03 09:23:02 -05006199static void remove_one(struct pci_dev *pdev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006200{
6201 struct adapter *adapter = pci_get_drvdata(pdev);
6202
Vipul Pandya636f9d32012-09-26 02:39:39 +00006203#ifdef CONFIG_PCI_IOV
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006204 pci_disable_sriov(pdev);
6205
Vipul Pandya636f9d32012-09-26 02:39:39 +00006206#endif
6207
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006208 if (adapter) {
6209 int i;
6210
Anish Bhatt29aaee62014-08-20 13:44:06 -07006211 /* Tear down per-adapter Work Queue first since it can contain
6212 * references to our adapter data structure.
6213 */
6214 destroy_workqueue(adapter->workq);
6215
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006216 if (is_offload(adapter))
6217 detach_ulds(adapter);
6218
6219 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00006220 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006221 unregister_netdev(adapter->port[i]);
6222
Fabian Frederick9f16dc22014-06-27 22:51:52 +02006223 debugfs_remove_recursive(adapter->debugfs_root);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006224
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006225 /* If we allocated filters, free up state associated with any
6226 * valid filters ...
6227 */
6228 if (adapter->tids.ftid_tab) {
6229 struct filter_entry *f = &adapter->tids.ftid_tab[0];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00006230 for (i = 0; i < (adapter->tids.nftids +
6231 adapter->tids.nsftids); i++, f++)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006232 if (f->valid)
6233 clear_filter(adapter, f);
6234 }
6235
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00006236 if (adapter->flags & FULL_INIT_DONE)
6237 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006238
Dimitris Michailidis06546392010-07-11 12:01:16 +00006239 free_some_resources(adapter);
Anish Bhattb5a02f52015-01-14 15:17:34 -08006240#if IS_ENABLED(CONFIG_IPV6)
6241 t4_cleanup_clip_tbl(adapter);
6242#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006243 iounmap(adapter->regs);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306244 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006245 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006246 pci_disable_pcie_error_reporting(pdev);
Gavin Shan144be3d2014-01-23 12:27:34 +08006247 if ((adapter->flags & DEV_ENABLED)) {
6248 pci_disable_device(pdev);
6249 adapter->flags &= ~DEV_ENABLED;
6250 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006251 pci_release_regions(pdev);
Li RongQingee9a33b2014-06-20 17:32:36 +08006252 synchronize_rcu();
Gavin Shan8b662fe2014-01-24 17:12:03 +08006253 kfree(adapter);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00006254 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006255 pci_release_regions(pdev);
6256}
6257
6258static struct pci_driver cxgb4_driver = {
6259 .name = KBUILD_MODNAME,
6260 .id_table = cxgb4_pci_tbl,
6261 .probe = init_one,
Bill Pemberton91744942012-12-03 09:23:02 -05006262 .remove = remove_one,
Thadeu Lima de Souza Cascardo687d7052014-02-24 17:04:52 -03006263 .shutdown = remove_one,
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006264 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006265};
6266
6267static int __init cxgb4_init_module(void)
6268{
6269 int ret;
6270
6271 /* Debugfs support is optional, just warn if this fails */
6272 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6273 if (!cxgb4_debugfs_root)
Joe Perches428ac432013-01-06 13:34:49 +00006274 pr_warn("could not create debugfs entry, continuing\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006275
6276 ret = pci_register_driver(&cxgb4_driver);
Anish Bhatt29aaee62014-08-20 13:44:06 -07006277 if (ret < 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006278 debugfs_remove(cxgb4_debugfs_root);
Vipul Pandya01bcca62013-07-04 16:10:46 +05306279
Anish Bhatt1bb60372014-10-14 20:07:22 -07006280#if IS_ENABLED(CONFIG_IPV6)
Anish Bhattb5a02f52015-01-14 15:17:34 -08006281 if (!inet6addr_registered) {
6282 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6283 inet6addr_registered = true;
6284 }
Anish Bhatt1bb60372014-10-14 20:07:22 -07006285#endif
Vipul Pandya01bcca62013-07-04 16:10:46 +05306286
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006287 return ret;
6288}
6289
6290static void __exit cxgb4_cleanup_module(void)
6291{
Anish Bhatt1bb60372014-10-14 20:07:22 -07006292#if IS_ENABLED(CONFIG_IPV6)
Hariprasad Shenai1793c792015-01-21 20:57:52 +05306293 if (inet6addr_registered) {
Anish Bhattb5a02f52015-01-14 15:17:34 -08006294 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6295 inet6addr_registered = false;
6296 }
Anish Bhatt1bb60372014-10-14 20:07:22 -07006297#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006298 pci_unregister_driver(&cxgb4_driver);
6299 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006300}
6301
6302module_init(cxgb4_init_module);
6303module_exit(cxgb4_cleanup_module);