blob: 082a596a42645e5f4517a2547edf04ff401e1130 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000045#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
Vipul Pandya01bcca62013-07-04 16:10:46 +053063#include <net/addrconf.h>
David S. Miller1ef80192014-11-10 13:27:49 -050064#include <net/bonding.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000065#include <asm/uaccess.h>
66
67#include "cxgb4.h"
68#include "t4_regs.h"
Hariprasad Shenaif612b812015-01-05 16:30:43 +053069#include "t4_values.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000070#include "t4_msg.h"
71#include "t4fw_api.h"
Anish Bhatt688848b2014-06-19 21:37:13 -070072#include "cxgb4_dcb.h"
Hariprasad Shenaifd88b312014-11-07 09:35:23 +053073#include "cxgb4_debugfs.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000074#include "l2t.h"
75
Vipul Pandya01bcca62013-07-04 16:10:46 +053076#ifdef DRV_VERSION
77#undef DRV_VERSION
78#endif
Santosh Rastapur3a7f8552013-03-14 05:08:55 +000079#define DRV_VERSION "2.0.0-ko"
80#define DRV_DESC "Chelsio T4/T5 Network Driver"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000081
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000082enum {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000083 MAX_TXQ_ENTRIES = 16384,
84 MAX_CTRL_TXQ_ENTRIES = 1024,
85 MAX_RSPQ_ENTRIES = 16384,
86 MAX_RX_BUFFERS = 16384,
87 MIN_TXQ_ENTRIES = 32,
88 MIN_CTRL_TXQ_ENTRIES = 32,
89 MIN_RSPQ_ENTRIES = 128,
90 MIN_FL_ENTRIES = 16
91};
92
Vipul Pandyaf2b7e782012-12-10 09:30:52 +000093/* Host shadow copy of ingress filter entry. This is in host native format
94 * and doesn't match the ordering or bit order, etc. of the hardware of the
95 * firmware command. The use of bit-field structure elements is purely to
96 * remind ourselves of the field size limitations and save memory in the case
97 * where the filter table is large.
98 */
99struct filter_entry {
100 /* Administrative fields for filter.
101 */
102 u32 valid:1; /* filter allocated and valid */
103 u32 locked:1; /* filter is administratively locked */
104
105 u32 pending:1; /* filter action is pending firmware reply */
106 u32 smtidx:8; /* Source MAC Table index for smac */
107 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
108
109 /* The filter itself. Most of this is a straight copy of information
110 * provided by the extended ioctl(). Some fields are translated to
111 * internal forms -- for instance the Ingress Queue ID passed in from
112 * the ioctl() is translated into the Absolute Ingress Queue ID.
113 */
114 struct ch_filter_specification fs;
115};
116
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000117#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
118 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
119 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
120
Hariprasad Shenai3fedeab2014-11-25 08:33:58 +0530121/* Macros needed to support the PCI Device ID Table ...
122 */
123#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
124 static struct pci_device_id cxgb4_pci_tbl[] = {
125#define CH_PCI_DEVICE_ID_FUNCTION 0x4
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000126
Hariprasad Shenai3fedeab2014-11-25 08:33:58 +0530127/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
128 * called for both.
129 */
130#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
131
132#define CH_PCI_ID_TABLE_ENTRY(devid) \
133 {PCI_VDEVICE(CHELSIO, (devid)), 4}
134
135#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
136 { 0, } \
137 }
138
139#include "t4_pci_id_tbl.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000140
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530141#define FW4_FNAME "cxgb4/t4fw.bin"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000142#define FW5_FNAME "cxgb4/t5fw.bin"
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530143#define FW4_CFNAME "cxgb4/t4-config.txt"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000144#define FW5_CFNAME "cxgb4/t5-config.txt"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000145
146MODULE_DESCRIPTION(DRV_DESC);
147MODULE_AUTHOR("Chelsio Communications");
148MODULE_LICENSE("Dual BSD/GPL");
149MODULE_VERSION(DRV_VERSION);
150MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530151MODULE_FIRMWARE(FW4_FNAME);
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000152MODULE_FIRMWARE(FW5_FNAME);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000153
Vipul Pandya636f9d32012-09-26 02:39:39 +0000154/*
155 * Normally we're willing to become the firmware's Master PF but will be happy
156 * if another PF has already become the Master and initialized the adapter.
157 * Setting "force_init" will cause this driver to forcibly establish itself as
158 * the Master PF and initialize the adapter.
159 */
160static uint force_init;
161
162module_param(force_init, uint, 0644);
163MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
164
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000165/*
166 * Normally if the firmware we connect to has Configuration File support, we
167 * use that and only fall back to the old Driver-based initialization if the
168 * Configuration File fails for some reason. If force_old_init is set, then
169 * we'll always use the old Driver-based initialization sequence.
170 */
171static uint force_old_init;
172
173module_param(force_old_init, uint, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530174MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
175 " parameter");
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000176
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000177static int dflt_msg_enable = DFLT_MSG_ENABLE;
178
179module_param(dflt_msg_enable, int, 0644);
180MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
181
182/*
183 * The driver uses the best interrupt scheme available on a platform in the
184 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
185 * of these schemes the driver may consider as follows:
186 *
187 * msi = 2: choose from among all three options
188 * msi = 1: only consider MSI and INTx interrupts
189 * msi = 0: force INTx interrupts
190 */
191static int msi = 2;
192
193module_param(msi, int, 0644);
194MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
195
196/*
197 * Queue interrupt hold-off timer values. Queues default to the first of these
198 * upon creation.
199 */
200static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
201
202module_param_array(intr_holdoff, uint, NULL, 0644);
203MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
Hariprasad Shenai06640312015-01-13 15:19:25 +0530204 "0..4 in microseconds, deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000205
206static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
207
208module_param_array(intr_cnt, uint, NULL, 0644);
209MODULE_PARM_DESC(intr_cnt,
Hariprasad Shenai06640312015-01-13 15:19:25 +0530210 "thresholds 1..3 for queue interrupt packet counters, "
211 "deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000212
Vipul Pandya636f9d32012-09-26 02:39:39 +0000213/*
214 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
215 * offset by 2 bytes in order to have the IP headers line up on 4-byte
216 * boundaries. This is a requirement for many architectures which will throw
217 * a machine check fault if an attempt is made to access one of the 4-byte IP
218 * header fields on a non-4-byte boundary. And it's a major performance issue
219 * even on some architectures which allow it like some implementations of the
220 * x86 ISA. However, some architectures don't mind this and for some very
221 * edge-case performance sensitive applications (like forwarding large volumes
222 * of small packets), setting this DMA offset to 0 will decrease the number of
223 * PCI-E Bus transfers enough to measurably affect performance.
224 */
225static int rx_dma_offset = 2;
226
Rusty Russelleb939922011-12-19 14:08:01 +0000227static bool vf_acls;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000228
229#ifdef CONFIG_PCI_IOV
230module_param(vf_acls, bool, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530231MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
232 "deprecated parameter");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000233
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000234/* Configure the number of PCI-E Virtual Function which are to be instantiated
235 * on SR-IOV Capable Physical Functions.
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000236 */
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000237static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000238
239module_param_array(num_vf, uint, NULL, 0644);
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000240MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000241#endif
242
Anish Bhatt688848b2014-06-19 21:37:13 -0700243/* TX Queue select used to determine what algorithm to use for selecting TX
244 * queue. Select between the kernel provided function (select_queue=0) or user
245 * cxgb_select_queue function (select_queue=1)
246 *
247 * Default: select_queue=0
248 */
249static int select_queue;
250module_param(select_queue, int, 0644);
251MODULE_PARM_DESC(select_queue,
252 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
253
Hariprasad Shenai06640312015-01-13 15:19:25 +0530254static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000255
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000256module_param(tp_vlan_pri_map, uint, 0644);
Hariprasad Shenai06640312015-01-13 15:19:25 +0530257MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
258 "deprecated parameter");
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000259
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000260static struct dentry *cxgb4_debugfs_root;
261
262static LIST_HEAD(adapter_list);
263static DEFINE_MUTEX(uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +0530264/* Adapter list to be accessed from atomic context */
265static LIST_HEAD(adap_rcu_list);
266static DEFINE_SPINLOCK(adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000267static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
268static const char *uld_str[] = { "RDMA", "iSCSI" };
269
270static void link_report(struct net_device *dev)
271{
272 if (!netif_carrier_ok(dev))
273 netdev_info(dev, "link down\n");
274 else {
275 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
276
277 const char *s = "10Mbps";
278 const struct port_info *p = netdev_priv(dev);
279
280 switch (p->link_cfg.speed) {
Ben Hutchingse8b39012014-02-23 00:03:24 +0000281 case 10000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000282 s = "10Gbps";
283 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000284 case 1000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000285 s = "1000Mbps";
286 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000287 case 100:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000288 s = "100Mbps";
289 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000290 case 40000:
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +0530291 s = "40Gbps";
292 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000293 }
294
295 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
296 fc[p->link_cfg.fc]);
297 }
298}
299
Anish Bhatt688848b2014-06-19 21:37:13 -0700300#ifdef CONFIG_CHELSIO_T4_DCB
301/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
302static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
303{
304 struct port_info *pi = netdev_priv(dev);
305 struct adapter *adap = pi->adapter;
306 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
307 int i;
308
309 /* We use a simple mapping of Port TX Queue Index to DCB
310 * Priority when we're enabling DCB.
311 */
312 for (i = 0; i < pi->nqsets; i++, txq++) {
313 u32 name, value;
314 int err;
315
Hariprasad Shenai51678652014-11-21 12:52:02 +0530316 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
317 FW_PARAMS_PARAM_X_V(
318 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
319 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
Anish Bhatt688848b2014-06-19 21:37:13 -0700320 value = enable ? i : 0xffffffff;
321
322 /* Since we can be called while atomic (from "interrupt
323 * level") we need to issue the Set Parameters Commannd
324 * without sleeping (timeout < 0).
325 */
326 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
327 &name, &value);
328
329 if (err)
330 dev_err(adap->pdev_dev,
331 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
332 enable ? "set" : "unset", pi->port_id, i, -err);
Anish Bhatt10b00462014-08-07 16:14:03 -0700333 else
334 txq->dcb_prio = value;
Anish Bhatt688848b2014-06-19 21:37:13 -0700335 }
336}
337#endif /* CONFIG_CHELSIO_T4_DCB */
338
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000339void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
340{
341 struct net_device *dev = adapter->port[port_id];
342
343 /* Skip changes from disabled ports. */
344 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
345 if (link_stat)
346 netif_carrier_on(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700347 else {
348#ifdef CONFIG_CHELSIO_T4_DCB
349 cxgb4_dcb_state_init(dev);
350 dcb_tx_queue_prio_enable(dev, false);
351#endif /* CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000352 netif_carrier_off(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700353 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000354
355 link_report(dev);
356 }
357}
358
359void t4_os_portmod_changed(const struct adapter *adap, int port_id)
360{
361 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000362 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000363 };
364
365 const struct net_device *dev = adap->port[port_id];
366 const struct port_info *pi = netdev_priv(dev);
367
368 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
369 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000370 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000371 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
372}
373
374/*
375 * Configure the exact and hash address filters to handle a port's multicast
376 * and secondary unicast MAC addresses.
377 */
378static int set_addr_filters(const struct net_device *dev, bool sleep)
379{
380 u64 mhash = 0;
381 u64 uhash = 0;
382 bool free = true;
383 u16 filt_idx[7];
384 const u8 *addr[7];
385 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000386 const struct netdev_hw_addr *ha;
387 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700388 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000389 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000390 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000391
392 /* first do the secondary unicast addresses */
393 netdev_for_each_uc_addr(ha, dev) {
394 addr[naddr++] = ha->addr;
395 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000396 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000397 naddr, addr, filt_idx, &uhash, sleep);
398 if (ret < 0)
399 return ret;
400
401 free = false;
402 naddr = 0;
403 }
404 }
405
406 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700407 netdev_for_each_mc_addr(ha, dev) {
408 addr[naddr++] = ha->addr;
409 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000410 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000411 naddr, addr, filt_idx, &mhash, sleep);
412 if (ret < 0)
413 return ret;
414
415 free = false;
416 naddr = 0;
417 }
418 }
419
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000420 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000421 uhash | mhash, sleep);
422}
423
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530424int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
425module_param(dbfifo_int_thresh, int, 0644);
426MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
427
Vipul Pandya404d9e32012-10-08 02:59:43 +0000428/*
429 * usecs to sleep while draining the dbfifo
430 */
431static int dbfifo_drain_delay = 1000;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530432module_param(dbfifo_drain_delay, int, 0644);
433MODULE_PARM_DESC(dbfifo_drain_delay,
434 "usecs to sleep while draining the dbfifo");
435
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000436/*
437 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
438 * If @mtu is -1 it is left unchanged.
439 */
440static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
441{
442 int ret;
443 struct port_info *pi = netdev_priv(dev);
444
445 ret = set_addr_filters(dev, sleep_ok);
446 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000447 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000448 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000449 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000450 sleep_ok);
451 return ret;
452}
453
454/**
455 * link_start - enable a port
456 * @dev: the port to enable
457 *
458 * Performs the MAC and PHY actions needed to enable a port.
459 */
460static int link_start(struct net_device *dev)
461{
462 int ret;
463 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000464 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000465
466 /*
467 * We do not set address filters and promiscuity here, the stack does
468 * that step explicitly.
469 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000470 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +0000471 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000472 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000473 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000474 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000475 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000476 if (ret >= 0) {
477 pi->xact_addr_filt = ret;
478 ret = 0;
479 }
480 }
481 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000482 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
483 &pi->link_cfg);
Anish Bhatt30f00842014-08-05 16:05:23 -0700484 if (ret == 0) {
485 local_bh_disable();
Anish Bhatt688848b2014-06-19 21:37:13 -0700486 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
487 true, CXGB4_DCB_ENABLED);
Anish Bhatt30f00842014-08-05 16:05:23 -0700488 local_bh_enable();
489 }
Anish Bhatt688848b2014-06-19 21:37:13 -0700490
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000491 return ret;
492}
493
Anish Bhatt688848b2014-06-19 21:37:13 -0700494int cxgb4_dcb_enabled(const struct net_device *dev)
495{
496#ifdef CONFIG_CHELSIO_T4_DCB
497 struct port_info *pi = netdev_priv(dev);
498
Anish Bhatt3bb06262014-10-23 14:37:31 -0700499 if (!pi->dcb.enabled)
500 return 0;
501
502 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
503 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
Anish Bhatt688848b2014-06-19 21:37:13 -0700504#else
505 return 0;
506#endif
507}
508EXPORT_SYMBOL(cxgb4_dcb_enabled);
509
510#ifdef CONFIG_CHELSIO_T4_DCB
511/* Handle a Data Center Bridging update message from the firmware. */
512static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
513{
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530514 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
Anish Bhatt688848b2014-06-19 21:37:13 -0700515 struct net_device *dev = adap->port[port];
516 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
517 int new_dcb_enabled;
518
519 cxgb4_dcb_handle_fw_update(adap, pcmd);
520 new_dcb_enabled = cxgb4_dcb_enabled(dev);
521
522 /* If the DCB has become enabled or disabled on the port then we're
523 * going to need to set up/tear down DCB Priority parameters for the
524 * TX Queues associated with the port.
525 */
526 if (new_dcb_enabled != old_dcb_enabled)
527 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
528}
529#endif /* CONFIG_CHELSIO_T4_DCB */
530
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000531/* Clear a filter and release any of its resources that we own. This also
532 * clears the filter's "pending" status.
533 */
534static void clear_filter(struct adapter *adap, struct filter_entry *f)
535{
536 /* If the new or old filter have loopback rewriteing rules then we'll
537 * need to free any existing Layer Two Table (L2T) entries of the old
538 * filter rule. The firmware will handle freeing up any Source MAC
539 * Table (SMT) entries used for rewriting Source MAC Addresses in
540 * loopback rules.
541 */
542 if (f->l2t)
543 cxgb4_l2t_release(f->l2t);
544
545 /* The zeroing of the filter rule below clears the filter valid,
546 * pending, locked flags, l2t pointer, etc. so it's all we need for
547 * this operation.
548 */
549 memset(f, 0, sizeof(*f));
550}
551
552/* Handle a filter write/deletion reply.
553 */
554static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
555{
556 unsigned int idx = GET_TID(rpl);
557 unsigned int nidx = idx - adap->tids.ftid_base;
558 unsigned int ret;
559 struct filter_entry *f;
560
561 if (idx >= adap->tids.ftid_base && nidx <
562 (adap->tids.nftids + adap->tids.nsftids)) {
563 idx = nidx;
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -0800564 ret = TCB_COOKIE_G(rpl->cookie);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000565 f = &adap->tids.ftid_tab[idx];
566
567 if (ret == FW_FILTER_WR_FLT_DELETED) {
568 /* Clear the filter when we get confirmation from the
569 * hardware that the filter has been deleted.
570 */
571 clear_filter(adap, f);
572 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
573 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
574 idx);
575 clear_filter(adap, f);
576 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
577 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
578 f->pending = 0; /* asynchronous setup completed */
579 f->valid = 1;
580 } else {
581 /* Something went wrong. Issue a warning about the
582 * problem and clear everything out.
583 */
584 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
585 idx, ret);
586 clear_filter(adap, f);
587 }
588 }
589}
590
591/* Response queue handler for the FW event queue.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000592 */
593static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
594 const struct pkt_gl *gl)
595{
596 u8 opcode = ((const struct rss_header *)rsp)->opcode;
597
598 rsp++; /* skip RSS header */
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000599
600 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
601 */
602 if (unlikely(opcode == CPL_FW4_MSG &&
603 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
604 rsp++;
605 opcode = ((const struct rss_header *)rsp)->opcode;
606 rsp++;
607 if (opcode != CPL_SGE_EGR_UPDATE) {
608 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
609 , opcode);
610 goto out;
611 }
612 }
613
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000614 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
615 const struct cpl_sge_egr_update *p = (void *)rsp;
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -0800616 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000617 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000618
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000619 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000620 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000621 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000622 struct sge_eth_txq *eq;
623
624 eq = container_of(txq, struct sge_eth_txq, q);
625 netif_tx_wake_queue(eq->txq);
626 } else {
627 struct sge_ofld_txq *oq;
628
629 oq = container_of(txq, struct sge_ofld_txq, q);
630 tasklet_schedule(&oq->qresume_tsk);
631 }
632 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
633 const struct cpl_fw6_msg *p = (void *)rsp;
634
Anish Bhatt688848b2014-06-19 21:37:13 -0700635#ifdef CONFIG_CHELSIO_T4_DCB
636 const struct fw_port_cmd *pcmd = (const void *)p->data;
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530637 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
Anish Bhatt688848b2014-06-19 21:37:13 -0700638 unsigned int action =
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530639 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
Anish Bhatt688848b2014-06-19 21:37:13 -0700640
641 if (cmd == FW_PORT_CMD &&
642 action == FW_PORT_ACTION_GET_PORT_INFO) {
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530643 int port = FW_PORT_CMD_PORTID_G(
Anish Bhatt688848b2014-06-19 21:37:13 -0700644 be32_to_cpu(pcmd->op_to_portid));
645 struct net_device *dev = q->adap->port[port];
646 int state_input = ((pcmd->u.info.dcbxdis_pkd &
Hariprasad Shenai2b5fb1f2014-11-21 12:52:04 +0530647 FW_PORT_CMD_DCBXDIS_F)
Anish Bhatt688848b2014-06-19 21:37:13 -0700648 ? CXGB4_DCB_INPUT_FW_DISABLED
649 : CXGB4_DCB_INPUT_FW_ENABLED);
650
651 cxgb4_dcb_state_fsm(dev, state_input);
652 }
653
654 if (cmd == FW_PORT_CMD &&
655 action == FW_PORT_ACTION_L2_DCB_CFG)
656 dcb_rpl(q->adap, pcmd);
657 else
658#endif
659 if (p->type == 0)
660 t4_handle_fw_rpl(q->adap, p->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000661 } else if (opcode == CPL_L2T_WRITE_RPL) {
662 const struct cpl_l2t_write_rpl *p = (void *)rsp;
663
664 do_l2t_write_rpl(q->adap, p);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000665 } else if (opcode == CPL_SET_TCB_RPL) {
666 const struct cpl_set_tcb_rpl *p = (void *)rsp;
667
668 filter_rpl(q->adap, p);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000669 } else
670 dev_err(q->adap->pdev_dev,
671 "unexpected CPL %#x on FW event queue\n", opcode);
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000672out:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000673 return 0;
674}
675
676/**
677 * uldrx_handler - response queue handler for ULD queues
678 * @q: the response queue that received the packet
679 * @rsp: the response queue descriptor holding the offload message
680 * @gl: the gather list of packet fragments
681 *
682 * Deliver an ingress offload packet to a ULD. All processing is done by
683 * the ULD, we just maintain statistics.
684 */
685static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
686 const struct pkt_gl *gl)
687{
688 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
689
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000690 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
691 */
692 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
693 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
694 rsp += 2;
695
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000696 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
697 rxq->stats.nomem++;
698 return -1;
699 }
700 if (gl == NULL)
701 rxq->stats.imm++;
702 else if (gl == CXGB4_MSG_AN)
703 rxq->stats.an++;
704 else
705 rxq->stats.pkts++;
706 return 0;
707}
708
709static void disable_msi(struct adapter *adapter)
710{
711 if (adapter->flags & USING_MSIX) {
712 pci_disable_msix(adapter->pdev);
713 adapter->flags &= ~USING_MSIX;
714 } else if (adapter->flags & USING_MSI) {
715 pci_disable_msi(adapter->pdev);
716 adapter->flags &= ~USING_MSI;
717 }
718}
719
720/*
721 * Interrupt handler for non-data events used with MSI-X.
722 */
723static irqreturn_t t4_nondata_intr(int irq, void *cookie)
724{
725 struct adapter *adap = cookie;
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530726 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000727
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530728 if (v & PFSW_F) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000729 adap->swintr = 1;
Hariprasad Shenai0d804332015-01-05 16:30:47 +0530730 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000731 }
732 t4_slow_intr_handler(adap);
733 return IRQ_HANDLED;
734}
735
736/*
737 * Name the MSI-X interrupts.
738 */
739static void name_msix_vecs(struct adapter *adap)
740{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000741 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000742
743 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000744 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000745
746 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000747 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
748 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000749
750 /* Ethernet queues */
751 for_each_port(adap, j) {
752 struct net_device *d = adap->port[j];
753 const struct port_info *pi = netdev_priv(d);
754
Dimitris Michailidisba278162010-12-14 21:36:50 +0000755 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000756 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
757 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000758 }
759
760 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000761 for_each_ofldrxq(&adap->sge, i)
762 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000763 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000764
765 for_each_rdmarxq(&adap->sge, i)
766 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000767 adap->port[0]->name, i);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530768
769 for_each_rdmaciq(&adap->sge, i)
770 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
771 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000772}
773
774static int request_msix_queue_irqs(struct adapter *adap)
775{
776 struct sge *s = &adap->sge;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530777 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
778 int msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000779
780 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
781 adap->msix_info[1].desc, &s->fw_evtq);
782 if (err)
783 return err;
784
785 for_each_ethrxq(s, ethqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000786 err = request_irq(adap->msix_info[msi_index].vec,
787 t4_sge_intr_msix, 0,
788 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000789 &s->ethrxq[ethqidx].rspq);
790 if (err)
791 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000792 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000793 }
794 for_each_ofldrxq(s, ofldqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000795 err = request_irq(adap->msix_info[msi_index].vec,
796 t4_sge_intr_msix, 0,
797 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000798 &s->ofldrxq[ofldqidx].rspq);
799 if (err)
800 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000801 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000802 }
803 for_each_rdmarxq(s, rdmaqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000804 err = request_irq(adap->msix_info[msi_index].vec,
805 t4_sge_intr_msix, 0,
806 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000807 &s->rdmarxq[rdmaqidx].rspq);
808 if (err)
809 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000810 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000811 }
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530812 for_each_rdmaciq(s, rdmaciqqidx) {
813 err = request_irq(adap->msix_info[msi_index].vec,
814 t4_sge_intr_msix, 0,
815 adap->msix_info[msi_index].desc,
816 &s->rdmaciq[rdmaciqqidx].rspq);
817 if (err)
818 goto unwind;
819 msi_index++;
820 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000821 return 0;
822
823unwind:
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530824 while (--rdmaciqqidx >= 0)
825 free_irq(adap->msix_info[--msi_index].vec,
826 &s->rdmaciq[rdmaciqqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000827 while (--rdmaqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000828 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000829 &s->rdmarxq[rdmaqidx].rspq);
830 while (--ofldqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000831 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000832 &s->ofldrxq[ofldqidx].rspq);
833 while (--ethqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000834 free_irq(adap->msix_info[--msi_index].vec,
835 &s->ethrxq[ethqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000836 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
837 return err;
838}
839
840static void free_msix_queue_irqs(struct adapter *adap)
841{
Vipul Pandya404d9e32012-10-08 02:59:43 +0000842 int i, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000843 struct sge *s = &adap->sge;
844
845 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
846 for_each_ethrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000847 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000848 for_each_ofldrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000849 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000850 for_each_rdmarxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000851 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530852 for_each_rdmaciq(s, i)
853 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000854}
855
856/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000857 * write_rss - write the RSS table for a given port
858 * @pi: the port
859 * @queues: array of queue indices for RSS
860 *
861 * Sets up the portion of the HW RSS table for the port's VI to distribute
862 * packets to the Rx queues in @queues.
863 */
864static int write_rss(const struct port_info *pi, const u16 *queues)
865{
866 u16 *rss;
867 int i, err;
868 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
869
870 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
871 if (!rss)
872 return -ENOMEM;
873
874 /* map the queue indices to queue ids */
875 for (i = 0; i < pi->rss_size; i++, queues++)
876 rss[i] = q[*queues].rspq.abs_id;
877
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000878 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
879 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000880 kfree(rss);
881 return err;
882}
883
884/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000885 * setup_rss - configure RSS
886 * @adap: the adapter
887 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000888 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000889 */
890static int setup_rss(struct adapter *adap)
891{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000892 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000893
894 for_each_port(adap, i) {
895 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000896
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000897 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000898 if (err)
899 return err;
900 }
901 return 0;
902}
903
904/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000905 * Return the channel of the ingress queue with the given qid.
906 */
907static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
908{
909 qid -= p->ingr_start;
910 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
911}
912
913/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000914 * Wait until all NAPI handlers are descheduled.
915 */
916static void quiesce_rx(struct adapter *adap)
917{
918 int i;
919
920 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
921 struct sge_rspq *q = adap->sge.ingr_map[i];
922
923 if (q && q->handler)
924 napi_disable(&q->napi);
925 }
926}
927
928/*
929 * Enable NAPI scheduling and interrupt generation for all Rx queues.
930 */
931static void enable_rx(struct adapter *adap)
932{
933 int i;
934
935 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
936 struct sge_rspq *q = adap->sge.ingr_map[i];
937
938 if (!q)
939 continue;
940 if (q->handler)
941 napi_enable(&q->napi);
942 /* 0-increment GTS to start the timer and enable interrupts */
Hariprasad Shenaif612b812015-01-05 16:30:43 +0530943 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
944 SEINTARM_V(q->intr_params) |
945 INGRESSQID_V(q->cntxt_id));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000946 }
947}
948
949/**
950 * setup_sge_queues - configure SGE Tx/Rx/response queues
951 * @adap: the adapter
952 *
953 * Determines how many sets of SGE queues to use and initializes them.
954 * We support multiple queue sets per port if we have MSI-X, otherwise
955 * just one queue set per port.
956 */
957static int setup_sge_queues(struct adapter *adap)
958{
959 int err, msi_idx, i, j;
960 struct sge *s = &adap->sge;
961
962 bitmap_zero(s->starving_fl, MAX_EGRQ);
963 bitmap_zero(s->txq_maperr, MAX_EGRQ);
964
965 if (adap->flags & USING_MSIX)
966 msi_idx = 1; /* vector 0 is for non-queue interrupts */
967 else {
968 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
969 NULL, NULL);
970 if (err)
971 return err;
972 msi_idx = -((int)s->intrq.abs_id + 1);
973 }
974
975 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
976 msi_idx, NULL, fwevtq_handler);
977 if (err) {
978freeout: t4_free_sge_resources(adap);
979 return err;
980 }
981
982 for_each_port(adap, i) {
983 struct net_device *dev = adap->port[i];
984 struct port_info *pi = netdev_priv(dev);
985 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
986 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
987
988 for (j = 0; j < pi->nqsets; j++, q++) {
989 if (msi_idx > 0)
990 msi_idx++;
991 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
992 msi_idx, &q->fl,
993 t4_ethrx_handler);
994 if (err)
995 goto freeout;
996 q->rspq.idx = j;
997 memset(&q->stats, 0, sizeof(q->stats));
998 }
999 for (j = 0; j < pi->nqsets; j++, t++) {
1000 err = t4_sge_alloc_eth_txq(adap, t, dev,
1001 netdev_get_tx_queue(dev, j),
1002 s->fw_evtq.cntxt_id);
1003 if (err)
1004 goto freeout;
1005 }
1006 }
1007
1008 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1009 for_each_ofldrxq(s, i) {
1010 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1011 struct net_device *dev = adap->port[i / j];
1012
1013 if (msi_idx > 0)
1014 msi_idx++;
1015 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301016 q->fl.size ? &q->fl : NULL,
1017 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001018 if (err)
1019 goto freeout;
1020 memset(&q->stats, 0, sizeof(q->stats));
1021 s->ofld_rxq[i] = q->rspq.abs_id;
1022 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1023 s->fw_evtq.cntxt_id);
1024 if (err)
1025 goto freeout;
1026 }
1027
1028 for_each_rdmarxq(s, i) {
1029 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1030
1031 if (msi_idx > 0)
1032 msi_idx++;
1033 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301034 msi_idx, q->fl.size ? &q->fl : NULL,
1035 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001036 if (err)
1037 goto freeout;
1038 memset(&q->stats, 0, sizeof(q->stats));
1039 s->rdma_rxq[i] = q->rspq.abs_id;
1040 }
1041
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301042 for_each_rdmaciq(s, i) {
1043 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1044
1045 if (msi_idx > 0)
1046 msi_idx++;
1047 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1048 msi_idx, q->fl.size ? &q->fl : NULL,
1049 uldrx_handler);
1050 if (err)
1051 goto freeout;
1052 memset(&q->stats, 0, sizeof(q->stats));
1053 s->rdma_ciq[i] = q->rspq.abs_id;
1054 }
1055
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001056 for_each_port(adap, i) {
1057 /*
1058 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1059 * have RDMA queues, and that's the right value.
1060 */
1061 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1062 s->fw_evtq.cntxt_id,
1063 s->rdmarxq[i].rspq.cntxt_id);
1064 if (err)
1065 goto freeout;
1066 }
1067
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301068 t4_write_reg(adap, is_t4(adap->params.chip) ?
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05301069 MPS_TRC_RSS_CONTROL_A :
1070 MPS_T5_TRC_RSS_CONTROL_A,
1071 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1072 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001073 return 0;
1074}
1075
1076/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001077 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1078 * The allocated memory is cleared.
1079 */
1080void *t4_alloc_mem(size_t size)
1081{
Joe Perches8be04b92013-06-19 12:15:53 -07001082 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001083
1084 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001085 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001086 return p;
1087}
1088
1089/*
1090 * Free memory allocated through alloc_mem().
1091 */
Hariprasad Shenaifd88b312014-11-07 09:35:23 +05301092void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001093{
1094 if (is_vmalloc_addr(addr))
1095 vfree(addr);
1096 else
1097 kfree(addr);
1098}
1099
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001100/* Send a Work Request to write the filter at a specified index. We construct
1101 * a Firmware Filter Work Request to have the work done and put the indicated
1102 * filter into "pending" mode which will prevent any further actions against
1103 * it till we get a reply from the firmware on the completion status of the
1104 * request.
1105 */
1106static int set_filter_wr(struct adapter *adapter, int fidx)
1107{
1108 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1109 struct sk_buff *skb;
1110 struct fw_filter_wr *fwr;
1111 unsigned int ftid;
1112
1113 /* If the new filter requires loopback Destination MAC and/or VLAN
1114 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1115 * the filter.
1116 */
1117 if (f->fs.newdmac || f->fs.newvlan) {
1118 /* allocate L2T entry for new filter */
1119 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1120 if (f->l2t == NULL)
1121 return -EAGAIN;
1122 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1123 f->fs.eport, f->fs.dmac)) {
1124 cxgb4_l2t_release(f->l2t);
1125 f->l2t = NULL;
1126 return -ENOMEM;
1127 }
1128 }
1129
1130 ftid = adapter->tids.ftid_base + fidx;
1131
1132 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1133 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1134 memset(fwr, 0, sizeof(*fwr));
1135
1136 /* It would be nice to put most of the following in t4_hw.c but most
1137 * of the work is translating the cxgbtool ch_filter_specification
1138 * into the Work Request and the definition of that structure is
1139 * currently in cxgbtool.h which isn't appropriate to pull into the
1140 * common code. We may eventually try to come up with a more neutral
1141 * filter specification structure but for now it's easiest to simply
1142 * put this fairly direct code in line ...
1143 */
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301144 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1145 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001146 fwr->tid_to_iq =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301147 htonl(FW_FILTER_WR_TID_V(ftid) |
1148 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1149 FW_FILTER_WR_NOREPLY_V(0) |
1150 FW_FILTER_WR_IQ_V(f->fs.iq));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001151 fwr->del_filter_to_l2tix =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301152 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1153 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1154 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1155 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1156 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1157 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1158 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1159 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1160 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001161 f->fs.newvlan == VLAN_REWRITE) |
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301162 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001163 f->fs.newvlan == VLAN_REWRITE) |
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301164 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1165 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1166 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1167 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001168 fwr->ethtype = htons(f->fs.val.ethtype);
1169 fwr->ethtypem = htons(f->fs.mask.ethtype);
1170 fwr->frag_to_ovlan_vldm =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301171 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1172 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1173 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1174 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1175 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1176 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001177 fwr->smac_sel = 0;
1178 fwr->rx_chan_rx_rpl_iq =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301179 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1180 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001181 fwr->maci_to_matchtypem =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301182 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1183 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1184 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1185 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1186 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1187 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1188 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1189 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001190 fwr->ptcl = f->fs.val.proto;
1191 fwr->ptclm = f->fs.mask.proto;
1192 fwr->ttyp = f->fs.val.tos;
1193 fwr->ttypm = f->fs.mask.tos;
1194 fwr->ivlan = htons(f->fs.val.ivlan);
1195 fwr->ivlanm = htons(f->fs.mask.ivlan);
1196 fwr->ovlan = htons(f->fs.val.ovlan);
1197 fwr->ovlanm = htons(f->fs.mask.ovlan);
1198 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1199 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1200 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1201 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1202 fwr->lp = htons(f->fs.val.lport);
1203 fwr->lpm = htons(f->fs.mask.lport);
1204 fwr->fp = htons(f->fs.val.fport);
1205 fwr->fpm = htons(f->fs.mask.fport);
1206 if (f->fs.newsmac)
1207 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1208
1209 /* Mark the filter as "pending" and ship off the Filter Work Request.
1210 * When we get the Work Request Reply we'll clear the pending status.
1211 */
1212 f->pending = 1;
1213 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1214 t4_ofld_send(adapter, skb);
1215 return 0;
1216}
1217
1218/* Delete the filter at a specified index.
1219 */
1220static int del_filter_wr(struct adapter *adapter, int fidx)
1221{
1222 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1223 struct sk_buff *skb;
1224 struct fw_filter_wr *fwr;
1225 unsigned int len, ftid;
1226
1227 len = sizeof(*fwr);
1228 ftid = adapter->tids.ftid_base + fidx;
1229
1230 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1231 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1232 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1233
1234 /* Mark the filter as "pending" and ship off the Filter Work Request.
1235 * When we get the Work Request Reply we'll clear the pending status.
1236 */
1237 f->pending = 1;
1238 t4_mgmt_tx(adapter, skb);
1239 return 0;
1240}
1241
Anish Bhatt688848b2014-06-19 21:37:13 -07001242static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1243 void *accel_priv, select_queue_fallback_t fallback)
1244{
1245 int txq;
1246
1247#ifdef CONFIG_CHELSIO_T4_DCB
1248 /* If a Data Center Bridging has been successfully negotiated on this
1249 * link then we'll use the skb's priority to map it to a TX Queue.
1250 * The skb's priority is determined via the VLAN Tag Priority Code
1251 * Point field.
1252 */
1253 if (cxgb4_dcb_enabled(dev)) {
1254 u16 vlan_tci;
1255 int err;
1256
1257 err = vlan_get_tag(skb, &vlan_tci);
1258 if (unlikely(err)) {
1259 if (net_ratelimit())
1260 netdev_warn(dev,
1261 "TX Packet without VLAN Tag on DCB Link\n");
1262 txq = 0;
1263 } else {
1264 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1265 }
1266 return txq;
1267 }
1268#endif /* CONFIG_CHELSIO_T4_DCB */
1269
1270 if (select_queue) {
1271 txq = (skb_rx_queue_recorded(skb)
1272 ? skb_get_rx_queue(skb)
1273 : smp_processor_id());
1274
1275 while (unlikely(txq >= dev->real_num_tx_queues))
1276 txq -= dev->real_num_tx_queues;
1277
1278 return txq;
1279 }
1280
1281 return fallback(dev, skb) % dev->real_num_tx_queues;
1282}
1283
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001284static inline int is_offload(const struct adapter *adap)
1285{
1286 return adap->params.offload;
1287}
1288
1289/*
1290 * Implementation of ethtool operations.
1291 */
1292
1293static u32 get_msglevel(struct net_device *dev)
1294{
1295 return netdev2adap(dev)->msg_enable;
1296}
1297
1298static void set_msglevel(struct net_device *dev, u32 val)
1299{
1300 netdev2adap(dev)->msg_enable = val;
1301}
1302
1303static char stats_strings[][ETH_GSTRING_LEN] = {
1304 "TxOctetsOK ",
1305 "TxFramesOK ",
1306 "TxBroadcastFrames ",
1307 "TxMulticastFrames ",
1308 "TxUnicastFrames ",
1309 "TxErrorFrames ",
1310
1311 "TxFrames64 ",
1312 "TxFrames65To127 ",
1313 "TxFrames128To255 ",
1314 "TxFrames256To511 ",
1315 "TxFrames512To1023 ",
1316 "TxFrames1024To1518 ",
1317 "TxFrames1519ToMax ",
1318
1319 "TxFramesDropped ",
1320 "TxPauseFrames ",
1321 "TxPPP0Frames ",
1322 "TxPPP1Frames ",
1323 "TxPPP2Frames ",
1324 "TxPPP3Frames ",
1325 "TxPPP4Frames ",
1326 "TxPPP5Frames ",
1327 "TxPPP6Frames ",
1328 "TxPPP7Frames ",
1329
1330 "RxOctetsOK ",
1331 "RxFramesOK ",
1332 "RxBroadcastFrames ",
1333 "RxMulticastFrames ",
1334 "RxUnicastFrames ",
1335
1336 "RxFramesTooLong ",
1337 "RxJabberErrors ",
1338 "RxFCSErrors ",
1339 "RxLengthErrors ",
1340 "RxSymbolErrors ",
1341 "RxRuntFrames ",
1342
1343 "RxFrames64 ",
1344 "RxFrames65To127 ",
1345 "RxFrames128To255 ",
1346 "RxFrames256To511 ",
1347 "RxFrames512To1023 ",
1348 "RxFrames1024To1518 ",
1349 "RxFrames1519ToMax ",
1350
1351 "RxPauseFrames ",
1352 "RxPPP0Frames ",
1353 "RxPPP1Frames ",
1354 "RxPPP2Frames ",
1355 "RxPPP3Frames ",
1356 "RxPPP4Frames ",
1357 "RxPPP5Frames ",
1358 "RxPPP6Frames ",
1359 "RxPPP7Frames ",
1360
1361 "RxBG0FramesDropped ",
1362 "RxBG1FramesDropped ",
1363 "RxBG2FramesDropped ",
1364 "RxBG3FramesDropped ",
1365 "RxBG0FramesTrunc ",
1366 "RxBG1FramesTrunc ",
1367 "RxBG2FramesTrunc ",
1368 "RxBG3FramesTrunc ",
1369
1370 "TSO ",
1371 "TxCsumOffload ",
1372 "RxCsumGood ",
1373 "VLANextractions ",
1374 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001375 "GROpackets ",
1376 "GROmerged ",
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001377 "WriteCoalSuccess ",
1378 "WriteCoalFail ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001379};
1380
1381static int get_sset_count(struct net_device *dev, int sset)
1382{
1383 switch (sset) {
1384 case ETH_SS_STATS:
1385 return ARRAY_SIZE(stats_strings);
1386 default:
1387 return -EOPNOTSUPP;
1388 }
1389}
1390
1391#define T4_REGMAP_SIZE (160 * 1024)
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001392#define T5_REGMAP_SIZE (332 * 1024)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001393
1394static int get_regs_len(struct net_device *dev)
1395{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001396 struct adapter *adap = netdev2adap(dev);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301397 if (is_t4(adap->params.chip))
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001398 return T4_REGMAP_SIZE;
1399 else
1400 return T5_REGMAP_SIZE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001401}
1402
1403static int get_eeprom_len(struct net_device *dev)
1404{
1405 return EEPROMSIZE;
1406}
1407
1408static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1409{
1410 struct adapter *adapter = netdev2adap(dev);
1411
Rick Jones23020ab2011-11-09 09:58:07 +00001412 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1413 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1414 strlcpy(info->bus_info, pci_name(adapter->pdev),
1415 sizeof(info->bus_info));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001416
Rick Jones84b40502011-11-21 10:54:05 +00001417 if (adapter->params.fw_vers)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001418 snprintf(info->fw_version, sizeof(info->fw_version),
1419 "%u.%u.%u.%u, TP %u.%u.%u.%u",
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05301420 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
1421 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
1422 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
1423 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
1424 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
1425 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1426 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1427 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001428}
1429
1430static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1431{
1432 if (stringset == ETH_SS_STATS)
1433 memcpy(data, stats_strings, sizeof(stats_strings));
1434}
1435
1436/*
1437 * port stats maintained per queue of the port. They should be in the same
1438 * order as in stats_strings above.
1439 */
1440struct queue_port_stats {
1441 u64 tso;
1442 u64 tx_csum;
1443 u64 rx_csum;
1444 u64 vlan_ex;
1445 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001446 u64 gro_pkts;
1447 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001448};
1449
1450static void collect_sge_port_stats(const struct adapter *adap,
1451 const struct port_info *p, struct queue_port_stats *s)
1452{
1453 int i;
1454 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1455 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1456
1457 memset(s, 0, sizeof(*s));
1458 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1459 s->tso += tx->tso;
1460 s->tx_csum += tx->tx_cso;
1461 s->rx_csum += rx->stats.rx_cso;
1462 s->vlan_ex += rx->stats.vlan_ex;
1463 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001464 s->gro_pkts += rx->stats.lro_pkts;
1465 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001466 }
1467}
1468
1469static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1470 u64 *data)
1471{
1472 struct port_info *pi = netdev_priv(dev);
1473 struct adapter *adapter = pi->adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001474 u32 val1, val2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001475
1476 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1477
1478 data += sizeof(struct port_stats) / sizeof(u64);
1479 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001480 data += sizeof(struct queue_port_stats) / sizeof(u64);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301481 if (!is_t4(adapter->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05301482 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
1483 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
1484 val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001485 *data = val1 - val2;
1486 data++;
1487 *data = val2;
1488 data++;
1489 } else {
1490 memset(data, 0, 2 * sizeof(u64));
1491 *data += 2;
1492 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001493}
1494
1495/*
1496 * Return a version number to identify the type of adapter. The scheme is:
1497 * - bits 0..9: chip version
1498 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001499 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001500 */
1501static inline unsigned int mk_adap_vers(const struct adapter *ap)
1502{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301503 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1504 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001505}
1506
1507static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1508 unsigned int end)
1509{
1510 u32 *p = buf + start;
1511
1512 for ( ; start <= end; start += sizeof(u32))
1513 *p++ = t4_read_reg(ap, start);
1514}
1515
1516static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1517 void *buf)
1518{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001519 static const unsigned int t4_reg_ranges[] = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001520 0x1008, 0x1108,
1521 0x1180, 0x11b4,
1522 0x11fc, 0x123c,
1523 0x1300, 0x173c,
1524 0x1800, 0x18fc,
1525 0x3000, 0x30d8,
1526 0x30e0, 0x5924,
1527 0x5960, 0x59d4,
1528 0x5a00, 0x5af8,
1529 0x6000, 0x6098,
1530 0x6100, 0x6150,
1531 0x6200, 0x6208,
1532 0x6240, 0x6248,
1533 0x6280, 0x6338,
1534 0x6370, 0x638c,
1535 0x6400, 0x643c,
1536 0x6500, 0x6524,
1537 0x6a00, 0x6a38,
1538 0x6a60, 0x6a78,
1539 0x6b00, 0x6b84,
1540 0x6bf0, 0x6c84,
1541 0x6cf0, 0x6d84,
1542 0x6df0, 0x6e84,
1543 0x6ef0, 0x6f84,
1544 0x6ff0, 0x7084,
1545 0x70f0, 0x7184,
1546 0x71f0, 0x7284,
1547 0x72f0, 0x7384,
1548 0x73f0, 0x7450,
1549 0x7500, 0x7530,
1550 0x7600, 0x761c,
1551 0x7680, 0x76cc,
1552 0x7700, 0x7798,
1553 0x77c0, 0x77fc,
1554 0x7900, 0x79fc,
1555 0x7b00, 0x7c38,
1556 0x7d00, 0x7efc,
1557 0x8dc0, 0x8e1c,
1558 0x8e30, 0x8e78,
1559 0x8ea0, 0x8f6c,
1560 0x8fc0, 0x9074,
1561 0x90fc, 0x90fc,
1562 0x9400, 0x9458,
1563 0x9600, 0x96bc,
1564 0x9800, 0x9808,
1565 0x9820, 0x983c,
1566 0x9850, 0x9864,
1567 0x9c00, 0x9c6c,
1568 0x9c80, 0x9cec,
1569 0x9d00, 0x9d6c,
1570 0x9d80, 0x9dec,
1571 0x9e00, 0x9e6c,
1572 0x9e80, 0x9eec,
1573 0x9f00, 0x9f6c,
1574 0x9f80, 0x9fec,
1575 0xd004, 0xd03c,
1576 0xdfc0, 0xdfe0,
1577 0xe000, 0xea7c,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301578 0xf000, 0x11110,
1579 0x11118, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001580 0x19040, 0x1906c,
1581 0x19078, 0x19080,
1582 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001583 0x19150, 0x191b0,
1584 0x191d0, 0x191e8,
1585 0x19238, 0x1924c,
1586 0x193f8, 0x19474,
1587 0x19490, 0x194f8,
1588 0x19800, 0x19f30,
1589 0x1a000, 0x1a06c,
1590 0x1a0b0, 0x1a120,
1591 0x1a128, 0x1a138,
1592 0x1a190, 0x1a1c4,
1593 0x1a1fc, 0x1a1fc,
1594 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001595 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001596 0x1e2c0, 0x1e2c0,
1597 0x1e2e0, 0x1e2e0,
1598 0x1e300, 0x1e384,
1599 0x1e3c0, 0x1e3c8,
1600 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001601 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001602 0x1e6c0, 0x1e6c0,
1603 0x1e6e0, 0x1e6e0,
1604 0x1e700, 0x1e784,
1605 0x1e7c0, 0x1e7c8,
1606 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001607 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001608 0x1eac0, 0x1eac0,
1609 0x1eae0, 0x1eae0,
1610 0x1eb00, 0x1eb84,
1611 0x1ebc0, 0x1ebc8,
1612 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001613 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001614 0x1eec0, 0x1eec0,
1615 0x1eee0, 0x1eee0,
1616 0x1ef00, 0x1ef84,
1617 0x1efc0, 0x1efc8,
1618 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001619 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001620 0x1f2c0, 0x1f2c0,
1621 0x1f2e0, 0x1f2e0,
1622 0x1f300, 0x1f384,
1623 0x1f3c0, 0x1f3c8,
1624 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001625 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001626 0x1f6c0, 0x1f6c0,
1627 0x1f6e0, 0x1f6e0,
1628 0x1f700, 0x1f784,
1629 0x1f7c0, 0x1f7c8,
1630 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001631 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001632 0x1fac0, 0x1fac0,
1633 0x1fae0, 0x1fae0,
1634 0x1fb00, 0x1fb84,
1635 0x1fbc0, 0x1fbc8,
1636 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001637 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001638 0x1fec0, 0x1fec0,
1639 0x1fee0, 0x1fee0,
1640 0x1ff00, 0x1ff84,
1641 0x1ffc0, 0x1ffc8,
1642 0x20000, 0x2002c,
1643 0x20100, 0x2013c,
1644 0x20190, 0x201c8,
1645 0x20200, 0x20318,
1646 0x20400, 0x20528,
1647 0x20540, 0x20614,
1648 0x21000, 0x21040,
1649 0x2104c, 0x21060,
1650 0x210c0, 0x210ec,
1651 0x21200, 0x21268,
1652 0x21270, 0x21284,
1653 0x212fc, 0x21388,
1654 0x21400, 0x21404,
1655 0x21500, 0x21518,
1656 0x2152c, 0x2153c,
1657 0x21550, 0x21554,
1658 0x21600, 0x21600,
1659 0x21608, 0x21628,
1660 0x21630, 0x2163c,
1661 0x21700, 0x2171c,
1662 0x21780, 0x2178c,
1663 0x21800, 0x21c38,
1664 0x21c80, 0x21d7c,
1665 0x21e00, 0x21e04,
1666 0x22000, 0x2202c,
1667 0x22100, 0x2213c,
1668 0x22190, 0x221c8,
1669 0x22200, 0x22318,
1670 0x22400, 0x22528,
1671 0x22540, 0x22614,
1672 0x23000, 0x23040,
1673 0x2304c, 0x23060,
1674 0x230c0, 0x230ec,
1675 0x23200, 0x23268,
1676 0x23270, 0x23284,
1677 0x232fc, 0x23388,
1678 0x23400, 0x23404,
1679 0x23500, 0x23518,
1680 0x2352c, 0x2353c,
1681 0x23550, 0x23554,
1682 0x23600, 0x23600,
1683 0x23608, 0x23628,
1684 0x23630, 0x2363c,
1685 0x23700, 0x2371c,
1686 0x23780, 0x2378c,
1687 0x23800, 0x23c38,
1688 0x23c80, 0x23d7c,
1689 0x23e00, 0x23e04,
1690 0x24000, 0x2402c,
1691 0x24100, 0x2413c,
1692 0x24190, 0x241c8,
1693 0x24200, 0x24318,
1694 0x24400, 0x24528,
1695 0x24540, 0x24614,
1696 0x25000, 0x25040,
1697 0x2504c, 0x25060,
1698 0x250c0, 0x250ec,
1699 0x25200, 0x25268,
1700 0x25270, 0x25284,
1701 0x252fc, 0x25388,
1702 0x25400, 0x25404,
1703 0x25500, 0x25518,
1704 0x2552c, 0x2553c,
1705 0x25550, 0x25554,
1706 0x25600, 0x25600,
1707 0x25608, 0x25628,
1708 0x25630, 0x2563c,
1709 0x25700, 0x2571c,
1710 0x25780, 0x2578c,
1711 0x25800, 0x25c38,
1712 0x25c80, 0x25d7c,
1713 0x25e00, 0x25e04,
1714 0x26000, 0x2602c,
1715 0x26100, 0x2613c,
1716 0x26190, 0x261c8,
1717 0x26200, 0x26318,
1718 0x26400, 0x26528,
1719 0x26540, 0x26614,
1720 0x27000, 0x27040,
1721 0x2704c, 0x27060,
1722 0x270c0, 0x270ec,
1723 0x27200, 0x27268,
1724 0x27270, 0x27284,
1725 0x272fc, 0x27388,
1726 0x27400, 0x27404,
1727 0x27500, 0x27518,
1728 0x2752c, 0x2753c,
1729 0x27550, 0x27554,
1730 0x27600, 0x27600,
1731 0x27608, 0x27628,
1732 0x27630, 0x2763c,
1733 0x27700, 0x2771c,
1734 0x27780, 0x2778c,
1735 0x27800, 0x27c38,
1736 0x27c80, 0x27d7c,
1737 0x27e00, 0x27e04
1738 };
1739
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001740 static const unsigned int t5_reg_ranges[] = {
1741 0x1008, 0x1148,
1742 0x1180, 0x11b4,
1743 0x11fc, 0x123c,
1744 0x1280, 0x173c,
1745 0x1800, 0x18fc,
1746 0x3000, 0x3028,
1747 0x3060, 0x30d8,
1748 0x30e0, 0x30fc,
1749 0x3140, 0x357c,
1750 0x35a8, 0x35cc,
1751 0x35ec, 0x35ec,
1752 0x3600, 0x5624,
1753 0x56cc, 0x575c,
1754 0x580c, 0x5814,
1755 0x5890, 0x58bc,
1756 0x5940, 0x59dc,
1757 0x59fc, 0x5a18,
1758 0x5a60, 0x5a9c,
1759 0x5b9c, 0x5bfc,
1760 0x6000, 0x6040,
1761 0x6058, 0x614c,
1762 0x7700, 0x7798,
1763 0x77c0, 0x78fc,
1764 0x7b00, 0x7c54,
1765 0x7d00, 0x7efc,
1766 0x8dc0, 0x8de0,
1767 0x8df8, 0x8e84,
1768 0x8ea0, 0x8f84,
1769 0x8fc0, 0x90f8,
1770 0x9400, 0x9470,
1771 0x9600, 0x96f4,
1772 0x9800, 0x9808,
1773 0x9820, 0x983c,
1774 0x9850, 0x9864,
1775 0x9c00, 0x9c6c,
1776 0x9c80, 0x9cec,
1777 0x9d00, 0x9d6c,
1778 0x9d80, 0x9dec,
1779 0x9e00, 0x9e6c,
1780 0x9e80, 0x9eec,
1781 0x9f00, 0x9f6c,
1782 0x9f80, 0xa020,
1783 0xd004, 0xd03c,
1784 0xdfc0, 0xdfe0,
1785 0xe000, 0x11088,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301786 0x1109c, 0x11110,
1787 0x11118, 0x1117c,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001788 0x11190, 0x11204,
1789 0x19040, 0x1906c,
1790 0x19078, 0x19080,
1791 0x1908c, 0x19124,
1792 0x19150, 0x191b0,
1793 0x191d0, 0x191e8,
1794 0x19238, 0x19290,
1795 0x193f8, 0x19474,
1796 0x19490, 0x194cc,
1797 0x194f0, 0x194f8,
1798 0x19c00, 0x19c60,
1799 0x19c94, 0x19e10,
1800 0x19e50, 0x19f34,
1801 0x19f40, 0x19f50,
1802 0x19f90, 0x19fe4,
1803 0x1a000, 0x1a06c,
1804 0x1a0b0, 0x1a120,
1805 0x1a128, 0x1a138,
1806 0x1a190, 0x1a1c4,
1807 0x1a1fc, 0x1a1fc,
1808 0x1e008, 0x1e00c,
1809 0x1e040, 0x1e04c,
1810 0x1e284, 0x1e290,
1811 0x1e2c0, 0x1e2c0,
1812 0x1e2e0, 0x1e2e0,
1813 0x1e300, 0x1e384,
1814 0x1e3c0, 0x1e3c8,
1815 0x1e408, 0x1e40c,
1816 0x1e440, 0x1e44c,
1817 0x1e684, 0x1e690,
1818 0x1e6c0, 0x1e6c0,
1819 0x1e6e0, 0x1e6e0,
1820 0x1e700, 0x1e784,
1821 0x1e7c0, 0x1e7c8,
1822 0x1e808, 0x1e80c,
1823 0x1e840, 0x1e84c,
1824 0x1ea84, 0x1ea90,
1825 0x1eac0, 0x1eac0,
1826 0x1eae0, 0x1eae0,
1827 0x1eb00, 0x1eb84,
1828 0x1ebc0, 0x1ebc8,
1829 0x1ec08, 0x1ec0c,
1830 0x1ec40, 0x1ec4c,
1831 0x1ee84, 0x1ee90,
1832 0x1eec0, 0x1eec0,
1833 0x1eee0, 0x1eee0,
1834 0x1ef00, 0x1ef84,
1835 0x1efc0, 0x1efc8,
1836 0x1f008, 0x1f00c,
1837 0x1f040, 0x1f04c,
1838 0x1f284, 0x1f290,
1839 0x1f2c0, 0x1f2c0,
1840 0x1f2e0, 0x1f2e0,
1841 0x1f300, 0x1f384,
1842 0x1f3c0, 0x1f3c8,
1843 0x1f408, 0x1f40c,
1844 0x1f440, 0x1f44c,
1845 0x1f684, 0x1f690,
1846 0x1f6c0, 0x1f6c0,
1847 0x1f6e0, 0x1f6e0,
1848 0x1f700, 0x1f784,
1849 0x1f7c0, 0x1f7c8,
1850 0x1f808, 0x1f80c,
1851 0x1f840, 0x1f84c,
1852 0x1fa84, 0x1fa90,
1853 0x1fac0, 0x1fac0,
1854 0x1fae0, 0x1fae0,
1855 0x1fb00, 0x1fb84,
1856 0x1fbc0, 0x1fbc8,
1857 0x1fc08, 0x1fc0c,
1858 0x1fc40, 0x1fc4c,
1859 0x1fe84, 0x1fe90,
1860 0x1fec0, 0x1fec0,
1861 0x1fee0, 0x1fee0,
1862 0x1ff00, 0x1ff84,
1863 0x1ffc0, 0x1ffc8,
1864 0x30000, 0x30030,
1865 0x30100, 0x30144,
1866 0x30190, 0x301d0,
1867 0x30200, 0x30318,
1868 0x30400, 0x3052c,
1869 0x30540, 0x3061c,
1870 0x30800, 0x30834,
1871 0x308c0, 0x30908,
1872 0x30910, 0x309ac,
1873 0x30a00, 0x30a04,
1874 0x30a0c, 0x30a2c,
1875 0x30a44, 0x30a50,
1876 0x30a74, 0x30c24,
1877 0x30d08, 0x30d14,
1878 0x30d1c, 0x30d20,
1879 0x30d3c, 0x30d50,
1880 0x31200, 0x3120c,
1881 0x31220, 0x31220,
1882 0x31240, 0x31240,
1883 0x31600, 0x31600,
1884 0x31608, 0x3160c,
1885 0x31a00, 0x31a1c,
1886 0x31e04, 0x31e20,
1887 0x31e38, 0x31e3c,
1888 0x31e80, 0x31e80,
1889 0x31e88, 0x31ea8,
1890 0x31eb0, 0x31eb4,
1891 0x31ec8, 0x31ed4,
1892 0x31fb8, 0x32004,
1893 0x32208, 0x3223c,
1894 0x32600, 0x32630,
1895 0x32a00, 0x32abc,
1896 0x32b00, 0x32b70,
1897 0x33000, 0x33048,
1898 0x33060, 0x3309c,
1899 0x330f0, 0x33148,
1900 0x33160, 0x3319c,
1901 0x331f0, 0x332e4,
1902 0x332f8, 0x333e4,
1903 0x333f8, 0x33448,
1904 0x33460, 0x3349c,
1905 0x334f0, 0x33548,
1906 0x33560, 0x3359c,
1907 0x335f0, 0x336e4,
1908 0x336f8, 0x337e4,
1909 0x337f8, 0x337fc,
1910 0x33814, 0x33814,
1911 0x3382c, 0x3382c,
1912 0x33880, 0x3388c,
1913 0x338e8, 0x338ec,
1914 0x33900, 0x33948,
1915 0x33960, 0x3399c,
1916 0x339f0, 0x33ae4,
1917 0x33af8, 0x33b10,
1918 0x33b28, 0x33b28,
1919 0x33b3c, 0x33b50,
1920 0x33bf0, 0x33c10,
1921 0x33c28, 0x33c28,
1922 0x33c3c, 0x33c50,
1923 0x33cf0, 0x33cfc,
1924 0x34000, 0x34030,
1925 0x34100, 0x34144,
1926 0x34190, 0x341d0,
1927 0x34200, 0x34318,
1928 0x34400, 0x3452c,
1929 0x34540, 0x3461c,
1930 0x34800, 0x34834,
1931 0x348c0, 0x34908,
1932 0x34910, 0x349ac,
1933 0x34a00, 0x34a04,
1934 0x34a0c, 0x34a2c,
1935 0x34a44, 0x34a50,
1936 0x34a74, 0x34c24,
1937 0x34d08, 0x34d14,
1938 0x34d1c, 0x34d20,
1939 0x34d3c, 0x34d50,
1940 0x35200, 0x3520c,
1941 0x35220, 0x35220,
1942 0x35240, 0x35240,
1943 0x35600, 0x35600,
1944 0x35608, 0x3560c,
1945 0x35a00, 0x35a1c,
1946 0x35e04, 0x35e20,
1947 0x35e38, 0x35e3c,
1948 0x35e80, 0x35e80,
1949 0x35e88, 0x35ea8,
1950 0x35eb0, 0x35eb4,
1951 0x35ec8, 0x35ed4,
1952 0x35fb8, 0x36004,
1953 0x36208, 0x3623c,
1954 0x36600, 0x36630,
1955 0x36a00, 0x36abc,
1956 0x36b00, 0x36b70,
1957 0x37000, 0x37048,
1958 0x37060, 0x3709c,
1959 0x370f0, 0x37148,
1960 0x37160, 0x3719c,
1961 0x371f0, 0x372e4,
1962 0x372f8, 0x373e4,
1963 0x373f8, 0x37448,
1964 0x37460, 0x3749c,
1965 0x374f0, 0x37548,
1966 0x37560, 0x3759c,
1967 0x375f0, 0x376e4,
1968 0x376f8, 0x377e4,
1969 0x377f8, 0x377fc,
1970 0x37814, 0x37814,
1971 0x3782c, 0x3782c,
1972 0x37880, 0x3788c,
1973 0x378e8, 0x378ec,
1974 0x37900, 0x37948,
1975 0x37960, 0x3799c,
1976 0x379f0, 0x37ae4,
1977 0x37af8, 0x37b10,
1978 0x37b28, 0x37b28,
1979 0x37b3c, 0x37b50,
1980 0x37bf0, 0x37c10,
1981 0x37c28, 0x37c28,
1982 0x37c3c, 0x37c50,
1983 0x37cf0, 0x37cfc,
1984 0x38000, 0x38030,
1985 0x38100, 0x38144,
1986 0x38190, 0x381d0,
1987 0x38200, 0x38318,
1988 0x38400, 0x3852c,
1989 0x38540, 0x3861c,
1990 0x38800, 0x38834,
1991 0x388c0, 0x38908,
1992 0x38910, 0x389ac,
1993 0x38a00, 0x38a04,
1994 0x38a0c, 0x38a2c,
1995 0x38a44, 0x38a50,
1996 0x38a74, 0x38c24,
1997 0x38d08, 0x38d14,
1998 0x38d1c, 0x38d20,
1999 0x38d3c, 0x38d50,
2000 0x39200, 0x3920c,
2001 0x39220, 0x39220,
2002 0x39240, 0x39240,
2003 0x39600, 0x39600,
2004 0x39608, 0x3960c,
2005 0x39a00, 0x39a1c,
2006 0x39e04, 0x39e20,
2007 0x39e38, 0x39e3c,
2008 0x39e80, 0x39e80,
2009 0x39e88, 0x39ea8,
2010 0x39eb0, 0x39eb4,
2011 0x39ec8, 0x39ed4,
2012 0x39fb8, 0x3a004,
2013 0x3a208, 0x3a23c,
2014 0x3a600, 0x3a630,
2015 0x3aa00, 0x3aabc,
2016 0x3ab00, 0x3ab70,
2017 0x3b000, 0x3b048,
2018 0x3b060, 0x3b09c,
2019 0x3b0f0, 0x3b148,
2020 0x3b160, 0x3b19c,
2021 0x3b1f0, 0x3b2e4,
2022 0x3b2f8, 0x3b3e4,
2023 0x3b3f8, 0x3b448,
2024 0x3b460, 0x3b49c,
2025 0x3b4f0, 0x3b548,
2026 0x3b560, 0x3b59c,
2027 0x3b5f0, 0x3b6e4,
2028 0x3b6f8, 0x3b7e4,
2029 0x3b7f8, 0x3b7fc,
2030 0x3b814, 0x3b814,
2031 0x3b82c, 0x3b82c,
2032 0x3b880, 0x3b88c,
2033 0x3b8e8, 0x3b8ec,
2034 0x3b900, 0x3b948,
2035 0x3b960, 0x3b99c,
2036 0x3b9f0, 0x3bae4,
2037 0x3baf8, 0x3bb10,
2038 0x3bb28, 0x3bb28,
2039 0x3bb3c, 0x3bb50,
2040 0x3bbf0, 0x3bc10,
2041 0x3bc28, 0x3bc28,
2042 0x3bc3c, 0x3bc50,
2043 0x3bcf0, 0x3bcfc,
2044 0x3c000, 0x3c030,
2045 0x3c100, 0x3c144,
2046 0x3c190, 0x3c1d0,
2047 0x3c200, 0x3c318,
2048 0x3c400, 0x3c52c,
2049 0x3c540, 0x3c61c,
2050 0x3c800, 0x3c834,
2051 0x3c8c0, 0x3c908,
2052 0x3c910, 0x3c9ac,
2053 0x3ca00, 0x3ca04,
2054 0x3ca0c, 0x3ca2c,
2055 0x3ca44, 0x3ca50,
2056 0x3ca74, 0x3cc24,
2057 0x3cd08, 0x3cd14,
2058 0x3cd1c, 0x3cd20,
2059 0x3cd3c, 0x3cd50,
2060 0x3d200, 0x3d20c,
2061 0x3d220, 0x3d220,
2062 0x3d240, 0x3d240,
2063 0x3d600, 0x3d600,
2064 0x3d608, 0x3d60c,
2065 0x3da00, 0x3da1c,
2066 0x3de04, 0x3de20,
2067 0x3de38, 0x3de3c,
2068 0x3de80, 0x3de80,
2069 0x3de88, 0x3dea8,
2070 0x3deb0, 0x3deb4,
2071 0x3dec8, 0x3ded4,
2072 0x3dfb8, 0x3e004,
2073 0x3e208, 0x3e23c,
2074 0x3e600, 0x3e630,
2075 0x3ea00, 0x3eabc,
2076 0x3eb00, 0x3eb70,
2077 0x3f000, 0x3f048,
2078 0x3f060, 0x3f09c,
2079 0x3f0f0, 0x3f148,
2080 0x3f160, 0x3f19c,
2081 0x3f1f0, 0x3f2e4,
2082 0x3f2f8, 0x3f3e4,
2083 0x3f3f8, 0x3f448,
2084 0x3f460, 0x3f49c,
2085 0x3f4f0, 0x3f548,
2086 0x3f560, 0x3f59c,
2087 0x3f5f0, 0x3f6e4,
2088 0x3f6f8, 0x3f7e4,
2089 0x3f7f8, 0x3f7fc,
2090 0x3f814, 0x3f814,
2091 0x3f82c, 0x3f82c,
2092 0x3f880, 0x3f88c,
2093 0x3f8e8, 0x3f8ec,
2094 0x3f900, 0x3f948,
2095 0x3f960, 0x3f99c,
2096 0x3f9f0, 0x3fae4,
2097 0x3faf8, 0x3fb10,
2098 0x3fb28, 0x3fb28,
2099 0x3fb3c, 0x3fb50,
2100 0x3fbf0, 0x3fc10,
2101 0x3fc28, 0x3fc28,
2102 0x3fc3c, 0x3fc50,
2103 0x3fcf0, 0x3fcfc,
2104 0x40000, 0x4000c,
2105 0x40040, 0x40068,
2106 0x40080, 0x40144,
2107 0x40180, 0x4018c,
2108 0x40200, 0x40298,
2109 0x402ac, 0x4033c,
2110 0x403f8, 0x403fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302111 0x41304, 0x413c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002112 0x41400, 0x4141c,
2113 0x41480, 0x414d0,
2114 0x44000, 0x44078,
2115 0x440c0, 0x44278,
2116 0x442c0, 0x44478,
2117 0x444c0, 0x44678,
2118 0x446c0, 0x44878,
2119 0x448c0, 0x449fc,
2120 0x45000, 0x45068,
2121 0x45080, 0x45084,
2122 0x450a0, 0x450b0,
2123 0x45200, 0x45268,
2124 0x45280, 0x45284,
2125 0x452a0, 0x452b0,
2126 0x460c0, 0x460e4,
2127 0x47000, 0x4708c,
2128 0x47200, 0x47250,
2129 0x47400, 0x47420,
2130 0x47600, 0x47618,
2131 0x47800, 0x47814,
2132 0x48000, 0x4800c,
2133 0x48040, 0x48068,
2134 0x48080, 0x48144,
2135 0x48180, 0x4818c,
2136 0x48200, 0x48298,
2137 0x482ac, 0x4833c,
2138 0x483f8, 0x483fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302139 0x49304, 0x493c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002140 0x49400, 0x4941c,
2141 0x49480, 0x494d0,
2142 0x4c000, 0x4c078,
2143 0x4c0c0, 0x4c278,
2144 0x4c2c0, 0x4c478,
2145 0x4c4c0, 0x4c678,
2146 0x4c6c0, 0x4c878,
2147 0x4c8c0, 0x4c9fc,
2148 0x4d000, 0x4d068,
2149 0x4d080, 0x4d084,
2150 0x4d0a0, 0x4d0b0,
2151 0x4d200, 0x4d268,
2152 0x4d280, 0x4d284,
2153 0x4d2a0, 0x4d2b0,
2154 0x4e0c0, 0x4e0e4,
2155 0x4f000, 0x4f08c,
2156 0x4f200, 0x4f250,
2157 0x4f400, 0x4f420,
2158 0x4f600, 0x4f618,
2159 0x4f800, 0x4f814,
2160 0x50000, 0x500cc,
2161 0x50400, 0x50400,
2162 0x50800, 0x508cc,
2163 0x50c00, 0x50c00,
2164 0x51000, 0x5101c,
2165 0x51300, 0x51308,
2166 };
2167
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002168 int i;
2169 struct adapter *ap = netdev2adap(dev);
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002170 static const unsigned int *reg_ranges;
2171 int arr_size = 0, buf_size = 0;
2172
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302173 if (is_t4(ap->params.chip)) {
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002174 reg_ranges = &t4_reg_ranges[0];
2175 arr_size = ARRAY_SIZE(t4_reg_ranges);
2176 buf_size = T4_REGMAP_SIZE;
2177 } else {
2178 reg_ranges = &t5_reg_ranges[0];
2179 arr_size = ARRAY_SIZE(t5_reg_ranges);
2180 buf_size = T5_REGMAP_SIZE;
2181 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002182
2183 regs->version = mk_adap_vers(ap);
2184
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002185 memset(buf, 0, buf_size);
2186 for (i = 0; i < arr_size; i += 2)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002187 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2188}
2189
2190static int restart_autoneg(struct net_device *dev)
2191{
2192 struct port_info *p = netdev_priv(dev);
2193
2194 if (!netif_running(dev))
2195 return -EAGAIN;
2196 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2197 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002198 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002199 return 0;
2200}
2201
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002202static int identify_port(struct net_device *dev,
2203 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002204{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002205 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002206 struct adapter *adap = netdev2adap(dev);
2207
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002208 if (state == ETHTOOL_ID_ACTIVE)
2209 val = 0xffff;
2210 else if (state == ETHTOOL_ID_INACTIVE)
2211 val = 0;
2212 else
2213 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002214
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002215 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002216}
2217
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302218static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002219{
2220 unsigned int v = 0;
2221
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002222 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2223 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002224 v |= SUPPORTED_TP;
2225 if (caps & FW_PORT_CAP_SPEED_100M)
2226 v |= SUPPORTED_100baseT_Full;
2227 if (caps & FW_PORT_CAP_SPEED_1G)
2228 v |= SUPPORTED_1000baseT_Full;
2229 if (caps & FW_PORT_CAP_SPEED_10G)
2230 v |= SUPPORTED_10000baseT_Full;
2231 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2232 v |= SUPPORTED_Backplane;
2233 if (caps & FW_PORT_CAP_SPEED_1G)
2234 v |= SUPPORTED_1000baseKX_Full;
2235 if (caps & FW_PORT_CAP_SPEED_10G)
2236 v |= SUPPORTED_10000baseKX4_Full;
2237 } else if (type == FW_PORT_TYPE_KR)
2238 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002239 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00002240 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2241 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2242 else if (type == FW_PORT_TYPE_BP4_AP)
2243 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2244 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2245 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002246 else if (type == FW_PORT_TYPE_FIBER_XFI ||
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302247 type == FW_PORT_TYPE_FIBER_XAUI ||
2248 type == FW_PORT_TYPE_SFP ||
2249 type == FW_PORT_TYPE_QSFP_10G ||
2250 type == FW_PORT_TYPE_QSA) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002251 v |= SUPPORTED_FIBRE;
Hariprasad Shenai4c2d5182014-11-28 18:35:14 +05302252 if (caps & FW_PORT_CAP_SPEED_1G)
2253 v |= SUPPORTED_1000baseT_Full;
2254 if (caps & FW_PORT_CAP_SPEED_10G)
2255 v |= SUPPORTED_10000baseT_Full;
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302256 } else if (type == FW_PORT_TYPE_BP40_BA ||
2257 type == FW_PORT_TYPE_QSFP) {
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302258 v |= SUPPORTED_40000baseSR4_Full;
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302259 v |= SUPPORTED_FIBRE;
2260 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002261
2262 if (caps & FW_PORT_CAP_ANEG)
2263 v |= SUPPORTED_Autoneg;
2264 return v;
2265}
2266
2267static unsigned int to_fw_linkcaps(unsigned int caps)
2268{
2269 unsigned int v = 0;
2270
2271 if (caps & ADVERTISED_100baseT_Full)
2272 v |= FW_PORT_CAP_SPEED_100M;
2273 if (caps & ADVERTISED_1000baseT_Full)
2274 v |= FW_PORT_CAP_SPEED_1G;
2275 if (caps & ADVERTISED_10000baseT_Full)
2276 v |= FW_PORT_CAP_SPEED_10G;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302277 if (caps & ADVERTISED_40000baseSR4_Full)
2278 v |= FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002279 return v;
2280}
2281
2282static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2283{
2284 const struct port_info *p = netdev_priv(dev);
2285
2286 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002287 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002288 p->port_type == FW_PORT_TYPE_BT_XAUI)
2289 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002290 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2291 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002292 cmd->port = PORT_FIBRE;
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302293 else if (p->port_type == FW_PORT_TYPE_SFP ||
2294 p->port_type == FW_PORT_TYPE_QSFP_10G ||
Hariprasad Shenai40e9de42014-12-12 12:07:57 +05302295 p->port_type == FW_PORT_TYPE_QSA ||
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302296 p->port_type == FW_PORT_TYPE_QSFP) {
2297 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2298 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2299 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2300 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2301 cmd->port = PORT_FIBRE;
2302 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2303 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002304 cmd->port = PORT_DA;
2305 else
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302306 cmd->port = PORT_OTHER;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002307 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002308 cmd->port = PORT_OTHER;
2309
2310 if (p->mdio_addr >= 0) {
2311 cmd->phy_address = p->mdio_addr;
2312 cmd->transceiver = XCVR_EXTERNAL;
2313 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2314 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2315 } else {
2316 cmd->phy_address = 0; /* not really, but no better option */
2317 cmd->transceiver = XCVR_INTERNAL;
2318 cmd->mdio_support = 0;
2319 }
2320
2321 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2322 cmd->advertising = from_fw_linkcaps(p->port_type,
2323 p->link_cfg.advertising);
David Decotigny70739492011-04-27 18:32:40 +00002324 ethtool_cmd_speed_set(cmd,
2325 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002326 cmd->duplex = DUPLEX_FULL;
2327 cmd->autoneg = p->link_cfg.autoneg;
2328 cmd->maxtxpkt = 0;
2329 cmd->maxrxpkt = 0;
2330 return 0;
2331}
2332
2333static unsigned int speed_to_caps(int speed)
2334{
Ben Hutchingse8b39012014-02-23 00:03:24 +00002335 if (speed == 100)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002336 return FW_PORT_CAP_SPEED_100M;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002337 if (speed == 1000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002338 return FW_PORT_CAP_SPEED_1G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002339 if (speed == 10000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002340 return FW_PORT_CAP_SPEED_10G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002341 if (speed == 40000)
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302342 return FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002343 return 0;
2344}
2345
2346static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2347{
2348 unsigned int cap;
2349 struct port_info *p = netdev_priv(dev);
2350 struct link_config *lc = &p->link_cfg;
David Decotigny25db0332011-04-27 18:32:39 +00002351 u32 speed = ethtool_cmd_speed(cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002352
2353 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2354 return -EINVAL;
2355
2356 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2357 /*
2358 * PHY offers a single speed. See if that's what's
2359 * being requested.
2360 */
2361 if (cmd->autoneg == AUTONEG_DISABLE &&
David Decotigny25db0332011-04-27 18:32:39 +00002362 (lc->supported & speed_to_caps(speed)))
2363 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002364 return -EINVAL;
2365 }
2366
2367 if (cmd->autoneg == AUTONEG_DISABLE) {
David Decotigny25db0332011-04-27 18:32:39 +00002368 cap = speed_to_caps(speed);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002369
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302370 if (!(lc->supported & cap) ||
Ben Hutchingse8b39012014-02-23 00:03:24 +00002371 (speed == 1000) ||
2372 (speed == 10000) ||
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302373 (speed == 40000))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002374 return -EINVAL;
2375 lc->requested_speed = cap;
2376 lc->advertising = 0;
2377 } else {
2378 cap = to_fw_linkcaps(cmd->advertising);
2379 if (!(lc->supported & cap))
2380 return -EINVAL;
2381 lc->requested_speed = 0;
2382 lc->advertising = cap | FW_PORT_CAP_ANEG;
2383 }
2384 lc->autoneg = cmd->autoneg;
2385
2386 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002387 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2388 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002389 return 0;
2390}
2391
2392static void get_pauseparam(struct net_device *dev,
2393 struct ethtool_pauseparam *epause)
2394{
2395 struct port_info *p = netdev_priv(dev);
2396
2397 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2398 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2399 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2400}
2401
2402static int set_pauseparam(struct net_device *dev,
2403 struct ethtool_pauseparam *epause)
2404{
2405 struct port_info *p = netdev_priv(dev);
2406 struct link_config *lc = &p->link_cfg;
2407
2408 if (epause->autoneg == AUTONEG_DISABLE)
2409 lc->requested_fc = 0;
2410 else if (lc->supported & FW_PORT_CAP_ANEG)
2411 lc->requested_fc = PAUSE_AUTONEG;
2412 else
2413 return -EINVAL;
2414
2415 if (epause->rx_pause)
2416 lc->requested_fc |= PAUSE_RX;
2417 if (epause->tx_pause)
2418 lc->requested_fc |= PAUSE_TX;
2419 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002420 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2421 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002422 return 0;
2423}
2424
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002425static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2426{
2427 const struct port_info *pi = netdev_priv(dev);
2428 const struct sge *s = &pi->adapter->sge;
2429
2430 e->rx_max_pending = MAX_RX_BUFFERS;
2431 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2432 e->rx_jumbo_max_pending = 0;
2433 e->tx_max_pending = MAX_TXQ_ENTRIES;
2434
2435 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2436 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2437 e->rx_jumbo_pending = 0;
2438 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2439}
2440
2441static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2442{
2443 int i;
2444 const struct port_info *pi = netdev_priv(dev);
2445 struct adapter *adapter = pi->adapter;
2446 struct sge *s = &adapter->sge;
2447
2448 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2449 e->tx_pending > MAX_TXQ_ENTRIES ||
2450 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2451 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2452 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2453 return -EINVAL;
2454
2455 if (adapter->flags & FULL_INIT_DONE)
2456 return -EBUSY;
2457
2458 for (i = 0; i < pi->nqsets; ++i) {
2459 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2460 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2461 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2462 }
2463 return 0;
2464}
2465
2466static int closest_timer(const struct sge *s, int time)
2467{
2468 int i, delta, match = 0, min_delta = INT_MAX;
2469
2470 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2471 delta = time - s->timer_val[i];
2472 if (delta < 0)
2473 delta = -delta;
2474 if (delta < min_delta) {
2475 min_delta = delta;
2476 match = i;
2477 }
2478 }
2479 return match;
2480}
2481
2482static int closest_thres(const struct sge *s, int thres)
2483{
2484 int i, delta, match = 0, min_delta = INT_MAX;
2485
2486 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2487 delta = thres - s->counter_val[i];
2488 if (delta < 0)
2489 delta = -delta;
2490 if (delta < min_delta) {
2491 min_delta = delta;
2492 match = i;
2493 }
2494 }
2495 return match;
2496}
2497
2498/*
2499 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2500 */
2501static unsigned int qtimer_val(const struct adapter *adap,
2502 const struct sge_rspq *q)
2503{
2504 unsigned int idx = q->intr_params >> 1;
2505
2506 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2507}
2508
2509/**
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302510 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002511 * @q: the Rx queue
2512 * @us: the hold-off time in us, or 0 to disable timer
2513 * @cnt: the hold-off packet count, or 0 to disable counter
2514 *
2515 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2516 * one of the two needs to be enabled for the queue to generate interrupts.
2517 */
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302518static int set_rspq_intr_params(struct sge_rspq *q,
2519 unsigned int us, unsigned int cnt)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002520{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302521 struct adapter *adap = q->adap;
2522
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002523 if ((us | cnt) == 0)
2524 cnt = 1;
2525
2526 if (cnt) {
2527 int err;
2528 u32 v, new_idx;
2529
2530 new_idx = closest_thres(&adap->sge, cnt);
2531 if (q->desc && q->pktcnt_idx != new_idx) {
2532 /* the queue has already been created, update it */
Hariprasad Shenai51678652014-11-21 12:52:02 +05302533 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2534 FW_PARAMS_PARAM_X_V(
2535 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2536 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002537 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2538 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002539 if (err)
2540 return err;
2541 }
2542 q->pktcnt_idx = new_idx;
2543 }
2544
2545 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2546 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2547 return 0;
2548}
2549
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302550/**
2551 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2552 * @dev: the network device
2553 * @us: the hold-off time in us, or 0 to disable timer
2554 * @cnt: the hold-off packet count, or 0 to disable counter
2555 *
2556 * Set the RX interrupt hold-off parameters for a network device.
2557 */
2558static int set_rx_intr_params(struct net_device *dev,
2559 unsigned int us, unsigned int cnt)
2560{
2561 int i, err;
2562 struct port_info *pi = netdev_priv(dev);
2563 struct adapter *adap = pi->adapter;
2564 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2565
2566 for (i = 0; i < pi->nqsets; i++, q++) {
2567 err = set_rspq_intr_params(&q->rspq, us, cnt);
2568 if (err)
2569 return err;
2570 }
2571 return 0;
2572}
2573
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302574static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2575{
2576 int i;
2577 struct port_info *pi = netdev_priv(dev);
2578 struct adapter *adap = pi->adapter;
2579 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2580
2581 for (i = 0; i < pi->nqsets; i++, q++)
2582 q->rspq.adaptive_rx = adaptive_rx;
2583
2584 return 0;
2585}
2586
2587static int get_adaptive_rx_setting(struct net_device *dev)
2588{
2589 struct port_info *pi = netdev_priv(dev);
2590 struct adapter *adap = pi->adapter;
2591 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2592
2593 return q->rspq.adaptive_rx;
2594}
2595
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002596static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2597{
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302598 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302599 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2600 c->rx_max_coalesced_frames);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002601}
2602
2603static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2604{
2605 const struct port_info *pi = netdev_priv(dev);
2606 const struct adapter *adap = pi->adapter;
2607 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2608
2609 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2610 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2611 adap->sge.counter_val[rq->pktcnt_idx] : 0;
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302612 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002613 return 0;
2614}
2615
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002616/**
2617 * eeprom_ptov - translate a physical EEPROM address to virtual
2618 * @phys_addr: the physical EEPROM address
2619 * @fn: the PCI function number
2620 * @sz: size of function-specific area
2621 *
2622 * Translate a physical EEPROM address to virtual. The first 1K is
2623 * accessed through virtual addresses starting at 31K, the rest is
2624 * accessed through virtual addresses starting at 0.
2625 *
2626 * The mapping is as follows:
2627 * [0..1K) -> [31K..32K)
2628 * [1K..1K+A) -> [31K-A..31K)
2629 * [1K+A..ES) -> [0..ES-A-1K)
2630 *
2631 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002632 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002633static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002634{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002635 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002636 if (phys_addr < 1024)
2637 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002638 if (phys_addr < 1024 + fn)
2639 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002640 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002641 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002642 return -EINVAL;
2643}
2644
2645/*
2646 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002647 */
2648static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2649{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002650 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002651
2652 if (vaddr >= 0)
2653 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2654 return vaddr < 0 ? vaddr : 0;
2655}
2656
2657static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2658{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002659 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002660
2661 if (vaddr >= 0)
2662 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2663 return vaddr < 0 ? vaddr : 0;
2664}
2665
2666#define EEPROM_MAGIC 0x38E2F10C
2667
2668static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2669 u8 *data)
2670{
2671 int i, err = 0;
2672 struct adapter *adapter = netdev2adap(dev);
2673
2674 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2675 if (!buf)
2676 return -ENOMEM;
2677
2678 e->magic = EEPROM_MAGIC;
2679 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2680 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2681
2682 if (!err)
2683 memcpy(data, buf + e->offset, e->len);
2684 kfree(buf);
2685 return err;
2686}
2687
2688static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2689 u8 *data)
2690{
2691 u8 *buf;
2692 int err = 0;
2693 u32 aligned_offset, aligned_len, *p;
2694 struct adapter *adapter = netdev2adap(dev);
2695
2696 if (eeprom->magic != EEPROM_MAGIC)
2697 return -EINVAL;
2698
2699 aligned_offset = eeprom->offset & ~3;
2700 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2701
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002702 if (adapter->fn > 0) {
2703 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2704
2705 if (aligned_offset < start ||
2706 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2707 return -EPERM;
2708 }
2709
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002710 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2711 /*
2712 * RMW possibly needed for first or last words.
2713 */
2714 buf = kmalloc(aligned_len, GFP_KERNEL);
2715 if (!buf)
2716 return -ENOMEM;
2717 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2718 if (!err && aligned_len > 4)
2719 err = eeprom_rd_phys(adapter,
2720 aligned_offset + aligned_len - 4,
2721 (u32 *)&buf[aligned_len - 4]);
2722 if (err)
2723 goto out;
2724 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2725 } else
2726 buf = data;
2727
2728 err = t4_seeprom_wp(adapter, false);
2729 if (err)
2730 goto out;
2731
2732 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2733 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2734 aligned_offset += 4;
2735 }
2736
2737 if (!err)
2738 err = t4_seeprom_wp(adapter, true);
2739out:
2740 if (buf != data)
2741 kfree(buf);
2742 return err;
2743}
2744
2745static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2746{
2747 int ret;
2748 const struct firmware *fw;
2749 struct adapter *adap = netdev2adap(netdev);
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302750 unsigned int mbox = PCIE_FW_MASTER_M + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002751
2752 ef->data[sizeof(ef->data) - 1] = '\0';
2753 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2754 if (ret < 0)
2755 return ret;
2756
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302757 /* If the adapter has been fully initialized then we'll go ahead and
2758 * try to get the firmware's cooperation in upgrading to the new
2759 * firmware image otherwise we'll try to do the entire job from the
2760 * host ... and we always "force" the operation in this path.
2761 */
2762 if (adap->flags & FULL_INIT_DONE)
2763 mbox = adap->mbox;
2764
2765 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002766 release_firmware(fw);
2767 if (!ret)
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302768 dev_info(adap->pdev_dev, "loaded firmware %s,"
2769 " reload cxgb4 driver\n", ef->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002770 return ret;
2771}
2772
2773#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2774#define BCAST_CRC 0xa0ccc1a6
2775
2776static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2777{
2778 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2779 wol->wolopts = netdev2adap(dev)->wol;
2780 memset(&wol->sopass, 0, sizeof(wol->sopass));
2781}
2782
2783static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2784{
2785 int err = 0;
2786 struct port_info *pi = netdev_priv(dev);
2787
2788 if (wol->wolopts & ~WOL_SUPPORTED)
2789 return -EINVAL;
2790 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2791 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2792 if (wol->wolopts & WAKE_BCAST) {
2793 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2794 ~0ULL, 0, false);
2795 if (!err)
2796 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2797 ~6ULL, ~0ULL, BCAST_CRC, true);
2798 } else
2799 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2800 return err;
2801}
2802
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002803static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002804{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002805 const struct port_info *pi = netdev_priv(dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002806 netdev_features_t changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002807 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002808
Patrick McHardyf6469682013-04-19 02:04:27 +00002809 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002810 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002811
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002812 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2813 -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +00002814 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002815 if (unlikely(err))
Patrick McHardyf6469682013-04-19 02:04:27 +00002816 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002817 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07002818}
2819
Ben Hutchings7850f632011-12-15 13:55:01 +00002820static u32 get_rss_table_size(struct net_device *dev)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002821{
2822 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002823
Ben Hutchings7850f632011-12-15 13:55:01 +00002824 return pi->rss_size;
2825}
2826
Eyal Perry892311f2014-12-02 18:12:10 +02002827static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
Ben Hutchings7850f632011-12-15 13:55:01 +00002828{
2829 const struct port_info *pi = netdev_priv(dev);
2830 unsigned int n = pi->rss_size;
2831
Eyal Perry892311f2014-12-02 18:12:10 +02002832 if (hfunc)
2833 *hfunc = ETH_RSS_HASH_TOP;
2834 if (!p)
2835 return 0;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002836 while (n--)
Ben Hutchings7850f632011-12-15 13:55:01 +00002837 p[n] = pi->rss[n];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002838 return 0;
2839}
2840
Eyal Perry892311f2014-12-02 18:12:10 +02002841static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
2842 const u8 hfunc)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002843{
2844 unsigned int i;
2845 struct port_info *pi = netdev_priv(dev);
2846
Eyal Perry892311f2014-12-02 18:12:10 +02002847 /* We require at least one supported parameter to be changed and no
2848 * change in any of the unsupported parameters
2849 */
2850 if (key ||
2851 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
2852 return -EOPNOTSUPP;
2853 if (!p)
2854 return 0;
2855
Ben Hutchings7850f632011-12-15 13:55:01 +00002856 for (i = 0; i < pi->rss_size; i++)
2857 pi->rss[i] = p[i];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002858 if (pi->adapter->flags & FULL_INIT_DONE)
2859 return write_rss(pi, pi->rss);
2860 return 0;
2861}
2862
2863static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
Ben Hutchings815c7db2011-09-06 13:49:12 +00002864 u32 *rules)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002865{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002866 const struct port_info *pi = netdev_priv(dev);
2867
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002868 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002869 case ETHTOOL_GRXFH: {
2870 unsigned int v = pi->rss_mode;
2871
2872 info->data = 0;
2873 switch (info->flow_type) {
2874 case TCP_V4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302875 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002876 info->data = RXH_IP_SRC | RXH_IP_DST |
2877 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302878 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002879 info->data = RXH_IP_SRC | RXH_IP_DST;
2880 break;
2881 case UDP_V4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302882 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
2883 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002884 info->data = RXH_IP_SRC | RXH_IP_DST |
2885 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302886 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002887 info->data = RXH_IP_SRC | RXH_IP_DST;
2888 break;
2889 case SCTP_V4_FLOW:
2890 case AH_ESP_V4_FLOW:
2891 case IPV4_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302892 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002893 info->data = RXH_IP_SRC | RXH_IP_DST;
2894 break;
2895 case TCP_V6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302896 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002897 info->data = RXH_IP_SRC | RXH_IP_DST |
2898 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302899 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002900 info->data = RXH_IP_SRC | RXH_IP_DST;
2901 break;
2902 case UDP_V6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302903 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
2904 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002905 info->data = RXH_IP_SRC | RXH_IP_DST |
2906 RXH_L4_B_0_1 | RXH_L4_B_2_3;
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302907 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002908 info->data = RXH_IP_SRC | RXH_IP_DST;
2909 break;
2910 case SCTP_V6_FLOW:
2911 case AH_ESP_V6_FLOW:
2912 case IPV6_FLOW:
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302913 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002914 info->data = RXH_IP_SRC | RXH_IP_DST;
2915 break;
2916 }
2917 return 0;
2918 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002919 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002920 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002921 return 0;
2922 }
2923 return -EOPNOTSUPP;
2924}
2925
stephen hemminger9b07be42012-01-04 12:59:49 +00002926static const struct ethtool_ops cxgb_ethtool_ops = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002927 .get_settings = get_settings,
2928 .set_settings = set_settings,
2929 .get_drvinfo = get_drvinfo,
2930 .get_msglevel = get_msglevel,
2931 .set_msglevel = set_msglevel,
2932 .get_ringparam = get_sge_param,
2933 .set_ringparam = set_sge_param,
2934 .get_coalesce = get_coalesce,
2935 .set_coalesce = set_coalesce,
2936 .get_eeprom_len = get_eeprom_len,
2937 .get_eeprom = get_eeprom,
2938 .set_eeprom = set_eeprom,
2939 .get_pauseparam = get_pauseparam,
2940 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002941 .get_link = ethtool_op_get_link,
2942 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002943 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002944 .nway_reset = restart_autoneg,
2945 .get_sset_count = get_sset_count,
2946 .get_ethtool_stats = get_stats,
2947 .get_regs_len = get_regs_len,
2948 .get_regs = get_regs,
2949 .get_wol = get_wol,
2950 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002951 .get_rxnfc = get_rxnfc,
Ben Hutchings7850f632011-12-15 13:55:01 +00002952 .get_rxfh_indir_size = get_rss_table_size,
Ben Hutchingsfe62d002014-05-15 01:25:27 +01002953 .get_rxfh = get_rss_table,
2954 .set_rxfh = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002955 .flash_device = set_flash,
2956};
2957
Bill Pemberton91744942012-12-03 09:23:02 -05002958static int setup_debugfs(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002959{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002960 if (IS_ERR_OR_NULL(adap->debugfs_root))
2961 return -1;
2962
Hariprasad Shenaifd88b312014-11-07 09:35:23 +05302963#ifdef CONFIG_DEBUG_FS
2964 t4_setup_debugfs(adap);
2965#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002966 return 0;
2967}
2968
2969/*
2970 * upper-layer driver support
2971 */
2972
2973/*
2974 * Allocate an active-open TID and set it to the supplied value.
2975 */
2976int cxgb4_alloc_atid(struct tid_info *t, void *data)
2977{
2978 int atid = -1;
2979
2980 spin_lock_bh(&t->atid_lock);
2981 if (t->afree) {
2982 union aopen_entry *p = t->afree;
2983
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002984 atid = (p - t->atid_tab) + t->atid_base;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002985 t->afree = p->next;
2986 p->data = data;
2987 t->atids_in_use++;
2988 }
2989 spin_unlock_bh(&t->atid_lock);
2990 return atid;
2991}
2992EXPORT_SYMBOL(cxgb4_alloc_atid);
2993
2994/*
2995 * Release an active-open TID.
2996 */
2997void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2998{
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002999 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003000
3001 spin_lock_bh(&t->atid_lock);
3002 p->next = t->afree;
3003 t->afree = p;
3004 t->atids_in_use--;
3005 spin_unlock_bh(&t->atid_lock);
3006}
3007EXPORT_SYMBOL(cxgb4_free_atid);
3008
3009/*
3010 * Allocate a server TID and set it to the supplied value.
3011 */
3012int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3013{
3014 int stid;
3015
3016 spin_lock_bh(&t->stid_lock);
3017 if (family == PF_INET) {
3018 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3019 if (stid < t->nstids)
3020 __set_bit(stid, t->stid_bmap);
3021 else
3022 stid = -1;
3023 } else {
3024 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3025 if (stid < 0)
3026 stid = -1;
3027 }
3028 if (stid >= 0) {
3029 t->stid_tab[stid].data = data;
3030 stid += t->stid_base;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303031 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3032 * This is equivalent to 4 TIDs. With CLIP enabled it
3033 * needs 2 TIDs.
3034 */
3035 if (family == PF_INET)
3036 t->stids_in_use++;
3037 else
3038 t->stids_in_use += 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003039 }
3040 spin_unlock_bh(&t->stid_lock);
3041 return stid;
3042}
3043EXPORT_SYMBOL(cxgb4_alloc_stid);
3044
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003045/* Allocate a server filter TID and set it to the supplied value.
3046 */
3047int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3048{
3049 int stid;
3050
3051 spin_lock_bh(&t->stid_lock);
3052 if (family == PF_INET) {
3053 stid = find_next_zero_bit(t->stid_bmap,
3054 t->nstids + t->nsftids, t->nstids);
3055 if (stid < (t->nstids + t->nsftids))
3056 __set_bit(stid, t->stid_bmap);
3057 else
3058 stid = -1;
3059 } else {
3060 stid = -1;
3061 }
3062 if (stid >= 0) {
3063 t->stid_tab[stid].data = data;
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303064 stid -= t->nstids;
3065 stid += t->sftid_base;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003066 t->stids_in_use++;
3067 }
3068 spin_unlock_bh(&t->stid_lock);
3069 return stid;
3070}
3071EXPORT_SYMBOL(cxgb4_alloc_sftid);
3072
3073/* Release a server TID.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003074 */
3075void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3076{
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303077 /* Is it a server filter TID? */
3078 if (t->nsftids && (stid >= t->sftid_base)) {
3079 stid -= t->sftid_base;
3080 stid += t->nstids;
3081 } else {
3082 stid -= t->stid_base;
3083 }
3084
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003085 spin_lock_bh(&t->stid_lock);
3086 if (family == PF_INET)
3087 __clear_bit(stid, t->stid_bmap);
3088 else
3089 bitmap_release_region(t->stid_bmap, stid, 2);
3090 t->stid_tab[stid].data = NULL;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303091 if (family == PF_INET)
3092 t->stids_in_use--;
3093 else
3094 t->stids_in_use -= 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003095 spin_unlock_bh(&t->stid_lock);
3096}
3097EXPORT_SYMBOL(cxgb4_free_stid);
3098
3099/*
3100 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3101 */
3102static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3103 unsigned int tid)
3104{
3105 struct cpl_tid_release *req;
3106
3107 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3108 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3109 INIT_TP_WR(req, tid);
3110 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3111}
3112
3113/*
3114 * Queue a TID release request and if necessary schedule a work queue to
3115 * process it.
3116 */
stephen hemminger31b9c192010-10-18 05:39:18 +00003117static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3118 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003119{
3120 void **p = &t->tid_tab[tid];
3121 struct adapter *adap = container_of(t, struct adapter, tids);
3122
3123 spin_lock_bh(&adap->tid_release_lock);
3124 *p = adap->tid_release_head;
3125 /* Low 2 bits encode the Tx channel number */
3126 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3127 if (!adap->tid_release_task_busy) {
3128 adap->tid_release_task_busy = true;
Anish Bhatt29aaee62014-08-20 13:44:06 -07003129 queue_work(adap->workq, &adap->tid_release_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003130 }
3131 spin_unlock_bh(&adap->tid_release_lock);
3132}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003133
3134/*
3135 * Process the list of pending TID release requests.
3136 */
3137static void process_tid_release_list(struct work_struct *work)
3138{
3139 struct sk_buff *skb;
3140 struct adapter *adap;
3141
3142 adap = container_of(work, struct adapter, tid_release_task);
3143
3144 spin_lock_bh(&adap->tid_release_lock);
3145 while (adap->tid_release_head) {
3146 void **p = adap->tid_release_head;
3147 unsigned int chan = (uintptr_t)p & 3;
3148 p = (void *)p - chan;
3149
3150 adap->tid_release_head = *p;
3151 *p = NULL;
3152 spin_unlock_bh(&adap->tid_release_lock);
3153
3154 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3155 GFP_KERNEL)))
3156 schedule_timeout_uninterruptible(1);
3157
3158 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3159 t4_ofld_send(adap, skb);
3160 spin_lock_bh(&adap->tid_release_lock);
3161 }
3162 adap->tid_release_task_busy = false;
3163 spin_unlock_bh(&adap->tid_release_lock);
3164}
3165
3166/*
3167 * Release a TID and inform HW. If we are unable to allocate the release
3168 * message we defer to a work queue.
3169 */
3170void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3171{
3172 void *old;
3173 struct sk_buff *skb;
3174 struct adapter *adap = container_of(t, struct adapter, tids);
3175
3176 old = t->tid_tab[tid];
3177 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3178 if (likely(skb)) {
3179 t->tid_tab[tid] = NULL;
3180 mk_tid_release(skb, chan, tid);
3181 t4_ofld_send(adap, skb);
3182 } else
3183 cxgb4_queue_tid_release(t, chan, tid);
3184 if (old)
3185 atomic_dec(&t->tids_in_use);
3186}
3187EXPORT_SYMBOL(cxgb4_remove_tid);
3188
3189/*
3190 * Allocate and initialize the TID tables. Returns 0 on success.
3191 */
3192static int tid_init(struct tid_info *t)
3193{
3194 size_t size;
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003195 unsigned int stid_bmap_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003196 unsigned int natids = t->natids;
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303197 struct adapter *adap = container_of(t, struct adapter, tids);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003198
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003199 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003200 size = t->ntids * sizeof(*t->tid_tab) +
3201 natids * sizeof(*t->atid_tab) +
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003202 t->nstids * sizeof(*t->stid_tab) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003203 t->nsftids * sizeof(*t->stid_tab) +
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003204 stid_bmap_size * sizeof(long) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003205 t->nftids * sizeof(*t->ftid_tab) +
3206 t->nsftids * sizeof(*t->ftid_tab);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003207
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003208 t->tid_tab = t4_alloc_mem(size);
3209 if (!t->tid_tab)
3210 return -ENOMEM;
3211
3212 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3213 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003214 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003215 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003216 spin_lock_init(&t->stid_lock);
3217 spin_lock_init(&t->atid_lock);
3218
3219 t->stids_in_use = 0;
3220 t->afree = NULL;
3221 t->atids_in_use = 0;
3222 atomic_set(&t->tids_in_use, 0);
3223
3224 /* Setup the free list for atid_tab and clear the stid bitmap. */
3225 if (natids) {
3226 while (--natids)
3227 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3228 t->afree = t->atid_tab;
3229 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003230 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303231 /* Reserve stid 0 for T4/T5 adapters */
3232 if (!t->stid_base &&
3233 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3234 __set_bit(0, t->stid_bmap);
3235
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003236 return 0;
3237}
3238
Anish Bhatta3e3b282014-07-17 00:18:16 -07003239int cxgb4_clip_get(const struct net_device *dev,
3240 const struct in6_addr *lip)
Vipul Pandya01bcca62013-07-04 16:10:46 +05303241{
3242 struct adapter *adap;
3243 struct fw_clip_cmd c;
3244
3245 adap = netdev2adap(dev);
3246 memset(&c, 0, sizeof(c));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05303247 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3248 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05303249 c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
Joe Perches12f2a472014-03-24 10:45:12 -07003250 c.ip_hi = *(__be64 *)(lip->s6_addr);
3251 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303252 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3253}
Anish Bhatta3e3b282014-07-17 00:18:16 -07003254EXPORT_SYMBOL(cxgb4_clip_get);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303255
Anish Bhatta3e3b282014-07-17 00:18:16 -07003256int cxgb4_clip_release(const struct net_device *dev,
3257 const struct in6_addr *lip)
Vipul Pandya01bcca62013-07-04 16:10:46 +05303258{
3259 struct adapter *adap;
3260 struct fw_clip_cmd c;
3261
3262 adap = netdev2adap(dev);
3263 memset(&c, 0, sizeof(c));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05303264 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3265 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05303266 c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
Joe Perches12f2a472014-03-24 10:45:12 -07003267 c.ip_hi = *(__be64 *)(lip->s6_addr);
3268 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303269 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3270}
Anish Bhatta3e3b282014-07-17 00:18:16 -07003271EXPORT_SYMBOL(cxgb4_clip_release);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303272
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003273/**
3274 * cxgb4_create_server - create an IP server
3275 * @dev: the device
3276 * @stid: the server TID
3277 * @sip: local IP address to bind server to
3278 * @sport: the server's TCP port
3279 * @queue: queue to direct messages from this server to
3280 *
3281 * Create an IP server for the given port and address.
3282 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3283 */
3284int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00003285 __be32 sip, __be16 sport, __be16 vlan,
3286 unsigned int queue)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003287{
3288 unsigned int chan;
3289 struct sk_buff *skb;
3290 struct adapter *adap;
3291 struct cpl_pass_open_req *req;
Vipul Pandya80f40c12013-07-04 16:10:45 +05303292 int ret;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003293
3294 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3295 if (!skb)
3296 return -ENOMEM;
3297
3298 adap = netdev2adap(dev);
3299 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3300 INIT_TP_WR(req, 0);
3301 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3302 req->local_port = sport;
3303 req->peer_port = htons(0);
3304 req->local_ip = sip;
3305 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003306 chan = rxq_to_chan(&adap->sge, queue);
Anish Bhattd7990b02014-11-12 17:15:57 -08003307 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003308 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3309 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303310 ret = t4_mgmt_tx(adap, skb);
3311 return net_xmit_eval(ret);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003312}
3313EXPORT_SYMBOL(cxgb4_create_server);
3314
Vipul Pandya80f40c12013-07-04 16:10:45 +05303315/* cxgb4_create_server6 - create an IPv6 server
3316 * @dev: the device
3317 * @stid: the server TID
3318 * @sip: local IPv6 address to bind server to
3319 * @sport: the server's TCP port
3320 * @queue: queue to direct messages from this server to
3321 *
3322 * Create an IPv6 server for the given port and address.
3323 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3324 */
3325int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3326 const struct in6_addr *sip, __be16 sport,
3327 unsigned int queue)
3328{
3329 unsigned int chan;
3330 struct sk_buff *skb;
3331 struct adapter *adap;
3332 struct cpl_pass_open_req6 *req;
3333 int ret;
3334
3335 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3336 if (!skb)
3337 return -ENOMEM;
3338
3339 adap = netdev2adap(dev);
3340 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3341 INIT_TP_WR(req, 0);
3342 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3343 req->local_port = sport;
3344 req->peer_port = htons(0);
3345 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3346 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3347 req->peer_ip_hi = cpu_to_be64(0);
3348 req->peer_ip_lo = cpu_to_be64(0);
3349 chan = rxq_to_chan(&adap->sge, queue);
Anish Bhattd7990b02014-11-12 17:15:57 -08003350 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003351 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3352 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303353 ret = t4_mgmt_tx(adap, skb);
3354 return net_xmit_eval(ret);
3355}
3356EXPORT_SYMBOL(cxgb4_create_server6);
3357
3358int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3359 unsigned int queue, bool ipv6)
3360{
3361 struct sk_buff *skb;
3362 struct adapter *adap;
3363 struct cpl_close_listsvr_req *req;
3364 int ret;
3365
3366 adap = netdev2adap(dev);
3367
3368 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3369 if (!skb)
3370 return -ENOMEM;
3371
3372 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3373 INIT_TP_WR(req, 0);
3374 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -08003375 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
3376 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303377 ret = t4_mgmt_tx(adap, skb);
3378 return net_xmit_eval(ret);
3379}
3380EXPORT_SYMBOL(cxgb4_remove_server);
3381
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003382/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003383 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3384 * @mtus: the HW MTU table
3385 * @mtu: the target MTU
3386 * @idx: index of selected entry in the MTU table
3387 *
3388 * Returns the index and the value in the HW MTU table that is closest to
3389 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3390 * table, in which case that smallest available value is selected.
3391 */
3392unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3393 unsigned int *idx)
3394{
3395 unsigned int i = 0;
3396
3397 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3398 ++i;
3399 if (idx)
3400 *idx = i;
3401 return mtus[i];
3402}
3403EXPORT_SYMBOL(cxgb4_best_mtu);
3404
3405/**
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05303406 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3407 * @mtus: the HW MTU table
3408 * @header_size: Header Size
3409 * @data_size_max: maximum Data Segment Size
3410 * @data_size_align: desired Data Segment Size Alignment (2^N)
3411 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3412 *
3413 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3414 * MTU Table based solely on a Maximum MTU parameter, we break that
3415 * parameter up into a Header Size and Maximum Data Segment Size, and
3416 * provide a desired Data Segment Size Alignment. If we find an MTU in
3417 * the Hardware MTU Table which will result in a Data Segment Size with
3418 * the requested alignment _and_ that MTU isn't "too far" from the
3419 * closest MTU, then we'll return that rather than the closest MTU.
3420 */
3421unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3422 unsigned short header_size,
3423 unsigned short data_size_max,
3424 unsigned short data_size_align,
3425 unsigned int *mtu_idxp)
3426{
3427 unsigned short max_mtu = header_size + data_size_max;
3428 unsigned short data_size_align_mask = data_size_align - 1;
3429 int mtu_idx, aligned_mtu_idx;
3430
3431 /* Scan the MTU Table till we find an MTU which is larger than our
3432 * Maximum MTU or we reach the end of the table. Along the way,
3433 * record the last MTU found, if any, which will result in a Data
3434 * Segment Length matching the requested alignment.
3435 */
3436 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3437 unsigned short data_size = mtus[mtu_idx] - header_size;
3438
3439 /* If this MTU minus the Header Size would result in a
3440 * Data Segment Size of the desired alignment, remember it.
3441 */
3442 if ((data_size & data_size_align_mask) == 0)
3443 aligned_mtu_idx = mtu_idx;
3444
3445 /* If we're not at the end of the Hardware MTU Table and the
3446 * next element is larger than our Maximum MTU, drop out of
3447 * the loop.
3448 */
3449 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3450 break;
3451 }
3452
3453 /* If we fell out of the loop because we ran to the end of the table,
3454 * then we just have to use the last [largest] entry.
3455 */
3456 if (mtu_idx == NMTUS)
3457 mtu_idx--;
3458
3459 /* If we found an MTU which resulted in the requested Data Segment
3460 * Length alignment and that's "not far" from the largest MTU which is
3461 * less than or equal to the maximum MTU, then use that.
3462 */
3463 if (aligned_mtu_idx >= 0 &&
3464 mtu_idx - aligned_mtu_idx <= 1)
3465 mtu_idx = aligned_mtu_idx;
3466
3467 /* If the caller has passed in an MTU Index pointer, pass the
3468 * MTU Index back. Return the MTU value.
3469 */
3470 if (mtu_idxp)
3471 *mtu_idxp = mtu_idx;
3472 return mtus[mtu_idx];
3473}
3474EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3475
3476/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003477 * cxgb4_port_chan - get the HW channel of a port
3478 * @dev: the net device for the port
3479 *
3480 * Return the HW Tx channel of the given port.
3481 */
3482unsigned int cxgb4_port_chan(const struct net_device *dev)
3483{
3484 return netdev2pinfo(dev)->tx_chan;
3485}
3486EXPORT_SYMBOL(cxgb4_port_chan);
3487
Vipul Pandya881806b2012-05-18 15:29:24 +05303488unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3489{
3490 struct adapter *adap = netdev2adap(dev);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003491 u32 v1, v2, lp_count, hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303492
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303493 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3494 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303495 if (is_t4(adap->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303496 lp_count = LP_COUNT_G(v1);
3497 hp_count = HP_COUNT_G(v1);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003498 } else {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303499 lp_count = LP_COUNT_T5_G(v1);
3500 hp_count = HP_COUNT_T5_G(v2);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003501 }
3502 return lpfifo ? lp_count : hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303503}
3504EXPORT_SYMBOL(cxgb4_dbfifo_count);
3505
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003506/**
3507 * cxgb4_port_viid - get the VI id of a port
3508 * @dev: the net device for the port
3509 *
3510 * Return the VI id of the given port.
3511 */
3512unsigned int cxgb4_port_viid(const struct net_device *dev)
3513{
3514 return netdev2pinfo(dev)->viid;
3515}
3516EXPORT_SYMBOL(cxgb4_port_viid);
3517
3518/**
3519 * cxgb4_port_idx - get the index of a port
3520 * @dev: the net device for the port
3521 *
3522 * Return the index of the given port.
3523 */
3524unsigned int cxgb4_port_idx(const struct net_device *dev)
3525{
3526 return netdev2pinfo(dev)->port_id;
3527}
3528EXPORT_SYMBOL(cxgb4_port_idx);
3529
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003530void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3531 struct tp_tcp_stats *v6)
3532{
3533 struct adapter *adap = pci_get_drvdata(pdev);
3534
3535 spin_lock(&adap->stats_lock);
3536 t4_tp_get_tcp_stats(adap, v4, v6);
3537 spin_unlock(&adap->stats_lock);
3538}
3539EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3540
3541void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3542 const unsigned int *pgsz_order)
3543{
3544 struct adapter *adap = netdev2adap(dev);
3545
Hariprasad Shenai0d804332015-01-05 16:30:47 +05303546 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
3547 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
3548 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
3549 HPZ3_V(pgsz_order[3]));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003550}
3551EXPORT_SYMBOL(cxgb4_iscsi_init);
3552
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303553int cxgb4_flush_eq_cache(struct net_device *dev)
3554{
3555 struct adapter *adap = netdev2adap(dev);
3556 int ret;
3557
3558 ret = t4_fwaddrspace_write(adap, adap->mbox,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303559 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303560 return ret;
3561}
3562EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3563
3564static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3565{
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303566 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303567 __be64 indices;
3568 int ret;
3569
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303570 spin_lock(&adap->win0_lock);
3571 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3572 sizeof(indices), (__be32 *)&indices,
3573 T4_MEMORY_READ);
3574 spin_unlock(&adap->win0_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303575 if (!ret) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00003576 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3577 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303578 }
3579 return ret;
3580}
3581
3582int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3583 u16 size)
3584{
3585 struct adapter *adap = netdev2adap(dev);
3586 u16 hw_pidx, hw_cidx;
3587 int ret;
3588
3589 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3590 if (ret)
3591 goto out;
3592
3593 if (pidx != hw_pidx) {
3594 u16 delta;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303595 u32 val;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303596
3597 if (pidx >= hw_pidx)
3598 delta = pidx - hw_pidx;
3599 else
3600 delta = size - hw_pidx + pidx;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303601
3602 if (is_t4(adap->params.chip))
3603 val = PIDX_V(delta);
3604 else
3605 val = PIDX_T5_V(delta);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303606 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303607 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3608 QID_V(qid) | val);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303609 }
3610out:
3611 return ret;
3612}
3613EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3614
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003615void cxgb4_disable_db_coalescing(struct net_device *dev)
3616{
3617 struct adapter *adap;
3618
3619 adap = netdev2adap(dev);
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303620 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303621 NOCOALESCE_F);
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003622}
3623EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3624
3625void cxgb4_enable_db_coalescing(struct net_device *dev)
3626{
3627 struct adapter *adap;
3628
3629 adap = netdev2adap(dev);
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303630 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003631}
3632EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3633
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303634int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3635{
3636 struct adapter *adap;
3637 u32 offset, memtype, memaddr;
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303638 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303639 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3640 int ret;
3641
3642 adap = netdev2adap(dev);
3643
3644 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3645
3646 /* Figure out where the offset lands in the Memory Type/Address scheme.
3647 * This code assumes that the memory is laid out starting at offset 0
3648 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3649 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3650 * MC0, and some have both MC0 and MC1.
3651 */
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303652 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3653 edc0_size = EDRAM0_SIZE_G(size) << 20;
3654 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3655 edc1_size = EDRAM1_SIZE_G(size) << 20;
3656 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3657 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303658
3659 edc0_end = edc0_size;
3660 edc1_end = edc0_end + edc1_size;
3661 mc0_end = edc1_end + mc0_size;
3662
3663 if (offset < edc0_end) {
3664 memtype = MEM_EDC0;
3665 memaddr = offset;
3666 } else if (offset < edc1_end) {
3667 memtype = MEM_EDC1;
3668 memaddr = offset - edc0_end;
3669 } else {
3670 if (offset < mc0_end) {
3671 memtype = MEM_MC0;
3672 memaddr = offset - edc1_end;
3673 } else if (is_t4(adap->params.chip)) {
3674 /* T4 only has a single memory channel */
3675 goto err;
3676 } else {
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303677 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3678 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303679 mc1_end = mc0_end + mc1_size;
3680 if (offset < mc1_end) {
3681 memtype = MEM_MC1;
3682 memaddr = offset - mc0_end;
3683 } else {
3684 /* offset beyond the end of any memory */
3685 goto err;
3686 }
3687 }
3688 }
3689
3690 spin_lock(&adap->win0_lock);
3691 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3692 spin_unlock(&adap->win0_lock);
3693 return ret;
3694
3695err:
3696 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3697 stag, offset);
3698 return -EINVAL;
3699}
3700EXPORT_SYMBOL(cxgb4_read_tpte);
3701
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303702u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3703{
3704 u32 hi, lo;
3705 struct adapter *adap;
3706
3707 adap = netdev2adap(dev);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303708 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
3709 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303710
3711 return ((u64)hi << 32) | (u64)lo;
3712}
3713EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3714
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303715int cxgb4_bar2_sge_qregs(struct net_device *dev,
3716 unsigned int qid,
3717 enum cxgb4_bar2_qtype qtype,
3718 u64 *pbar2_qoffset,
3719 unsigned int *pbar2_qid)
3720{
Stephen Rothwelldd0bcc02014-12-10 19:48:02 +11003721 return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303722 qid,
3723 (qtype == CXGB4_BAR2_QTYPE_EGRESS
3724 ? T4_BAR2_QTYPE_EGRESS
3725 : T4_BAR2_QTYPE_INGRESS),
3726 pbar2_qoffset,
3727 pbar2_qid);
3728}
3729EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
3730
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003731static struct pci_driver cxgb4_driver;
3732
3733static void check_neigh_update(struct neighbour *neigh)
3734{
3735 const struct device *parent;
3736 const struct net_device *netdev = neigh->dev;
3737
3738 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3739 netdev = vlan_dev_real_dev(netdev);
3740 parent = netdev->dev.parent;
3741 if (parent && parent->driver == &cxgb4_driver.driver)
3742 t4_l2t_update(dev_get_drvdata(parent), neigh);
3743}
3744
3745static int netevent_cb(struct notifier_block *nb, unsigned long event,
3746 void *data)
3747{
3748 switch (event) {
3749 case NETEVENT_NEIGH_UPDATE:
3750 check_neigh_update(data);
3751 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003752 case NETEVENT_REDIRECT:
3753 default:
3754 break;
3755 }
3756 return 0;
3757}
3758
3759static bool netevent_registered;
3760static struct notifier_block cxgb4_netevent_nb = {
3761 .notifier_call = netevent_cb
3762};
3763
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303764static void drain_db_fifo(struct adapter *adap, int usecs)
3765{
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003766 u32 v1, v2, lp_count, hp_count;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303767
3768 do {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303769 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3770 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303771 if (is_t4(adap->params.chip)) {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303772 lp_count = LP_COUNT_G(v1);
3773 hp_count = HP_COUNT_G(v1);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003774 } else {
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303775 lp_count = LP_COUNT_T5_G(v1);
3776 hp_count = HP_COUNT_T5_G(v2);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003777 }
3778
3779 if (lp_count == 0 && hp_count == 0)
3780 break;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303781 set_current_state(TASK_UNINTERRUPTIBLE);
3782 schedule_timeout(usecs_to_jiffies(usecs));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303783 } while (1);
3784}
3785
3786static void disable_txq_db(struct sge_txq *q)
3787{
Steve Wise05eb2382014-03-14 21:52:08 +05303788 unsigned long flags;
3789
3790 spin_lock_irqsave(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303791 q->db_disabled = 1;
Steve Wise05eb2382014-03-14 21:52:08 +05303792 spin_unlock_irqrestore(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303793}
3794
Steve Wise05eb2382014-03-14 21:52:08 +05303795static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303796{
3797 spin_lock_irq(&q->db_lock);
Steve Wise05eb2382014-03-14 21:52:08 +05303798 if (q->db_pidx_inc) {
3799 /* Make sure that all writes to the TX descriptors
3800 * are committed before we tell HW about them.
3801 */
3802 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303803 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3804 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
Steve Wise05eb2382014-03-14 21:52:08 +05303805 q->db_pidx_inc = 0;
3806 }
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303807 q->db_disabled = 0;
3808 spin_unlock_irq(&q->db_lock);
3809}
3810
3811static void disable_dbs(struct adapter *adap)
3812{
3813 int i;
3814
3815 for_each_ethrxq(&adap->sge, i)
3816 disable_txq_db(&adap->sge.ethtxq[i].q);
3817 for_each_ofldrxq(&adap->sge, i)
3818 disable_txq_db(&adap->sge.ofldtxq[i].q);
3819 for_each_port(adap, i)
3820 disable_txq_db(&adap->sge.ctrlq[i].q);
3821}
3822
3823static void enable_dbs(struct adapter *adap)
3824{
3825 int i;
3826
3827 for_each_ethrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303828 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303829 for_each_ofldrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303830 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303831 for_each_port(adap, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303832 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3833}
3834
3835static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3836{
3837 if (adap->uld_handle[CXGB4_ULD_RDMA])
3838 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3839 cmd);
3840}
3841
3842static void process_db_full(struct work_struct *work)
3843{
3844 struct adapter *adap;
3845
3846 adap = container_of(work, struct adapter, db_full_task);
3847
3848 drain_db_fifo(adap, dbfifo_drain_delay);
3849 enable_dbs(adap);
3850 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303851 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3852 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
3853 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303854}
3855
3856static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3857{
3858 u16 hw_pidx, hw_cidx;
3859 int ret;
3860
Steve Wise05eb2382014-03-14 21:52:08 +05303861 spin_lock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303862 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3863 if (ret)
3864 goto out;
3865 if (q->db_pidx != hw_pidx) {
3866 u16 delta;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303867 u32 val;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303868
3869 if (q->db_pidx >= hw_pidx)
3870 delta = q->db_pidx - hw_pidx;
3871 else
3872 delta = q->size - hw_pidx + q->db_pidx;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303873
3874 if (is_t4(adap->params.chip))
3875 val = PIDX_V(delta);
3876 else
3877 val = PIDX_T5_V(delta);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303878 wmb();
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303879 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3880 QID_V(q->cntxt_id) | val);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303881 }
3882out:
3883 q->db_disabled = 0;
Steve Wise05eb2382014-03-14 21:52:08 +05303884 q->db_pidx_inc = 0;
3885 spin_unlock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303886 if (ret)
3887 CH_WARN(adap, "DB drop recovery failed.\n");
3888}
3889static void recover_all_queues(struct adapter *adap)
3890{
3891 int i;
3892
3893 for_each_ethrxq(&adap->sge, i)
3894 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3895 for_each_ofldrxq(&adap->sge, i)
3896 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3897 for_each_port(adap, i)
3898 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3899}
3900
Vipul Pandya881806b2012-05-18 15:29:24 +05303901static void process_db_drop(struct work_struct *work)
3902{
3903 struct adapter *adap;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303904
Vipul Pandya881806b2012-05-18 15:29:24 +05303905 adap = container_of(work, struct adapter, db_drop_task);
3906
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303907 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05303908 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003909 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
Steve Wise05eb2382014-03-14 21:52:08 +05303910 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003911 recover_all_queues(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05303912 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003913 enable_dbs(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05303914 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003915 } else {
3916 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3917 u16 qid = (dropped_db >> 15) & 0x1ffff;
3918 u16 pidx_inc = dropped_db & 0x1fff;
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303919 u64 bar2_qoffset;
3920 unsigned int bar2_qid;
3921 int ret;
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003922
Stephen Rothwelldd0bcc02014-12-10 19:48:02 +11003923 ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303924 &bar2_qoffset, &bar2_qid);
3925 if (ret)
3926 dev_err(adap->pdev_dev, "doorbell drop recovery: "
3927 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
3928 else
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303929 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303930 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003931
3932 /* Re-enable BAR2 WC */
3933 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3934 }
3935
Hariprasad Shenaif061de422015-01-05 16:30:44 +05303936 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
Vipul Pandya881806b2012-05-18 15:29:24 +05303937}
3938
3939void t4_db_full(struct adapter *adap)
3940{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303941 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05303942 disable_dbs(adap);
3943 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303944 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3945 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
Anish Bhatt29aaee62014-08-20 13:44:06 -07003946 queue_work(adap->workq, &adap->db_full_task);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003947 }
Vipul Pandya881806b2012-05-18 15:29:24 +05303948}
3949
3950void t4_db_dropped(struct adapter *adap)
3951{
Steve Wise05eb2382014-03-14 21:52:08 +05303952 if (is_t4(adap->params.chip)) {
3953 disable_dbs(adap);
3954 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3955 }
Anish Bhatt29aaee62014-08-20 13:44:06 -07003956 queue_work(adap->workq, &adap->db_drop_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303957}
3958
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003959static void uld_attach(struct adapter *adap, unsigned int uld)
3960{
3961 void *handle;
3962 struct cxgb4_lld_info lli;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003963 unsigned short i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003964
3965 lli.pdev = adap->pdev;
Hariprasad Shenai35b1de52014-06-27 19:23:47 +05303966 lli.pf = adap->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003967 lli.l2t = adap->l2t;
3968 lli.tids = &adap->tids;
3969 lli.ports = adap->port;
3970 lli.vr = &adap->vres;
3971 lli.mtus = adap->params.mtus;
3972 if (uld == CXGB4_ULD_RDMA) {
3973 lli.rxq_ids = adap->sge.rdma_rxq;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05303974 lli.ciq_ids = adap->sge.rdma_ciq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003975 lli.nrxq = adap->sge.rdmaqs;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05303976 lli.nciq = adap->sge.rdmaciqs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003977 } else if (uld == CXGB4_ULD_ISCSI) {
3978 lli.rxq_ids = adap->sge.ofld_rxq;
3979 lli.nrxq = adap->sge.ofldqsets;
3980 }
3981 lli.ntxq = adap->sge.ofldqsets;
3982 lli.nchan = adap->params.nports;
3983 lli.nports = adap->params.nports;
3984 lli.wr_cred = adap->params.ofldq_wr_cred;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303985 lli.adapter_type = adap->params.chip;
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05303986 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303987 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05303988 lli.udb_density = 1 << adap->params.sge.eq_qpp;
3989 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05303990 lli.filt_mode = adap->params.tp.vlan_pri_map;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003991 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3992 for (i = 0; i < NCHAN; i++)
3993 lli.tx_modq[i] = i;
Hariprasad Shenaif612b812015-01-05 16:30:43 +05303994 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
3995 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003996 lli.fw_vers = adap->params.fw_vers;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303997 lli.dbfifo_int_thresh = dbfifo_int_thresh;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +05303998 lli.sge_ingpadboundary = adap->sge.fl_align;
3999 lli.sge_egrstatuspagesize = adap->sge.stat_len;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004000 lli.sge_pktshift = adap->sge.pktshift;
4001 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05304002 lli.max_ordird_qp = adap->params.max_ordird_qp;
4003 lli.max_ird_adapter = adap->params.max_ird_adapter;
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05304004 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004005
4006 handle = ulds[uld].add(&lli);
4007 if (IS_ERR(handle)) {
4008 dev_warn(adap->pdev_dev,
4009 "could not attach to the %s driver, error %ld\n",
4010 uld_str[uld], PTR_ERR(handle));
4011 return;
4012 }
4013
4014 adap->uld_handle[uld] = handle;
4015
4016 if (!netevent_registered) {
4017 register_netevent_notifier(&cxgb4_netevent_nb);
4018 netevent_registered = true;
4019 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00004020
4021 if (adap->flags & FULL_INIT_DONE)
4022 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004023}
4024
4025static void attach_ulds(struct adapter *adap)
4026{
4027 unsigned int i;
4028
Vipul Pandya01bcca62013-07-04 16:10:46 +05304029 spin_lock(&adap_rcu_lock);
4030 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4031 spin_unlock(&adap_rcu_lock);
4032
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004033 mutex_lock(&uld_mutex);
4034 list_add_tail(&adap->list_node, &adapter_list);
4035 for (i = 0; i < CXGB4_ULD_MAX; i++)
4036 if (ulds[i].add)
4037 uld_attach(adap, i);
4038 mutex_unlock(&uld_mutex);
4039}
4040
4041static void detach_ulds(struct adapter *adap)
4042{
4043 unsigned int i;
4044
4045 mutex_lock(&uld_mutex);
4046 list_del(&adap->list_node);
4047 for (i = 0; i < CXGB4_ULD_MAX; i++)
4048 if (adap->uld_handle[i]) {
4049 ulds[i].state_change(adap->uld_handle[i],
4050 CXGB4_STATE_DETACH);
4051 adap->uld_handle[i] = NULL;
4052 }
4053 if (netevent_registered && list_empty(&adapter_list)) {
4054 unregister_netevent_notifier(&cxgb4_netevent_nb);
4055 netevent_registered = false;
4056 }
4057 mutex_unlock(&uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304058
4059 spin_lock(&adap_rcu_lock);
4060 list_del_rcu(&adap->rcu_node);
4061 spin_unlock(&adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004062}
4063
4064static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4065{
4066 unsigned int i;
4067
4068 mutex_lock(&uld_mutex);
4069 for (i = 0; i < CXGB4_ULD_MAX; i++)
4070 if (adap->uld_handle[i])
4071 ulds[i].state_change(adap->uld_handle[i], new_state);
4072 mutex_unlock(&uld_mutex);
4073}
4074
4075/**
4076 * cxgb4_register_uld - register an upper-layer driver
4077 * @type: the ULD type
4078 * @p: the ULD methods
4079 *
4080 * Registers an upper-layer driver with this driver and notifies the ULD
4081 * about any presently available devices that support its type. Returns
4082 * %-EBUSY if a ULD of the same type is already registered.
4083 */
4084int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4085{
4086 int ret = 0;
4087 struct adapter *adap;
4088
4089 if (type >= CXGB4_ULD_MAX)
4090 return -EINVAL;
4091 mutex_lock(&uld_mutex);
4092 if (ulds[type].add) {
4093 ret = -EBUSY;
4094 goto out;
4095 }
4096 ulds[type] = *p;
4097 list_for_each_entry(adap, &adapter_list, list_node)
4098 uld_attach(adap, type);
4099out: mutex_unlock(&uld_mutex);
4100 return ret;
4101}
4102EXPORT_SYMBOL(cxgb4_register_uld);
4103
4104/**
4105 * cxgb4_unregister_uld - unregister an upper-layer driver
4106 * @type: the ULD type
4107 *
4108 * Unregisters an existing upper-layer driver.
4109 */
4110int cxgb4_unregister_uld(enum cxgb4_uld type)
4111{
4112 struct adapter *adap;
4113
4114 if (type >= CXGB4_ULD_MAX)
4115 return -EINVAL;
4116 mutex_lock(&uld_mutex);
4117 list_for_each_entry(adap, &adapter_list, list_node)
4118 adap->uld_handle[type] = NULL;
4119 ulds[type].add = NULL;
4120 mutex_unlock(&uld_mutex);
4121 return 0;
4122}
4123EXPORT_SYMBOL(cxgb4_unregister_uld);
4124
Vipul Pandya01bcca62013-07-04 16:10:46 +05304125/* Check if netdev on which event is occured belongs to us or not. Return
Li RongQingee9a33b2014-06-20 17:32:36 +08004126 * success (true) if it belongs otherwise failure (false).
4127 * Called with rcu_read_lock() held.
Vipul Pandya01bcca62013-07-04 16:10:46 +05304128 */
Anish Bhatt1bb60372014-10-14 20:07:22 -07004129#if IS_ENABLED(CONFIG_IPV6)
Li RongQingee9a33b2014-06-20 17:32:36 +08004130static bool cxgb4_netdev(const struct net_device *netdev)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304131{
4132 struct adapter *adap;
4133 int i;
4134
Vipul Pandya01bcca62013-07-04 16:10:46 +05304135 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4136 for (i = 0; i < MAX_NPORTS; i++)
Li RongQingee9a33b2014-06-20 17:32:36 +08004137 if (adap->port[i] == netdev)
4138 return true;
4139 return false;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304140}
4141
4142static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4143 unsigned long event)
4144{
4145 int ret = NOTIFY_DONE;
4146
4147 rcu_read_lock();
4148 if (cxgb4_netdev(event_dev)) {
4149 switch (event) {
4150 case NETDEV_UP:
Joe Perches44835892014-11-06 20:46:14 -08004151 ret = cxgb4_clip_get(event_dev, &ifa->addr);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304152 if (ret < 0) {
4153 rcu_read_unlock();
4154 return ret;
4155 }
4156 ret = NOTIFY_OK;
4157 break;
4158 case NETDEV_DOWN:
Joe Perches44835892014-11-06 20:46:14 -08004159 cxgb4_clip_release(event_dev, &ifa->addr);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304160 ret = NOTIFY_OK;
4161 break;
4162 default:
4163 break;
4164 }
4165 }
4166 rcu_read_unlock();
4167 return ret;
4168}
4169
4170static int cxgb4_inet6addr_handler(struct notifier_block *this,
4171 unsigned long event, void *data)
4172{
4173 struct inet6_ifaddr *ifa = data;
4174 struct net_device *event_dev;
4175 int ret = NOTIFY_DONE;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304176 struct bonding *bond = netdev_priv(ifa->idev->dev);
Veaceslav Falico9caff1e72013-09-25 09:20:14 +02004177 struct list_head *iter;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304178 struct slave *slave;
4179 struct pci_dev *first_pdev = NULL;
4180
4181 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4182 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4183 ret = clip_add(event_dev, ifa, event);
4184 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4185 /* It is possible that two different adapters are bonded in one
4186 * bond. We need to find such different adapters and add clip
4187 * in all of them only once.
4188 */
Veaceslav Falico9caff1e72013-09-25 09:20:14 +02004189 bond_for_each_slave(bond, slave, iter) {
Vipul Pandya01bcca62013-07-04 16:10:46 +05304190 if (!first_pdev) {
4191 ret = clip_add(slave->dev, ifa, event);
4192 /* If clip_add is success then only initialize
4193 * first_pdev since it means it is our device
4194 */
4195 if (ret == NOTIFY_OK)
4196 first_pdev = to_pci_dev(
4197 slave->dev->dev.parent);
4198 } else if (first_pdev !=
4199 to_pci_dev(slave->dev->dev.parent))
4200 ret = clip_add(slave->dev, ifa, event);
4201 }
Vipul Pandya01bcca62013-07-04 16:10:46 +05304202 } else
4203 ret = clip_add(ifa->idev->dev, ifa, event);
4204
4205 return ret;
4206}
4207
4208static struct notifier_block cxgb4_inet6addr_notifier = {
4209 .notifier_call = cxgb4_inet6addr_handler
4210};
4211
4212/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4213 * a physical device.
4214 * The physical device reference is needed to send the actul CLIP command.
4215 */
4216static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4217{
4218 struct inet6_dev *idev = NULL;
4219 struct inet6_ifaddr *ifa;
4220 int ret = 0;
4221
4222 idev = __in6_dev_get(root_dev);
4223 if (!idev)
4224 return ret;
4225
4226 read_lock_bh(&idev->lock);
4227 list_for_each_entry(ifa, &idev->addr_list, if_list) {
Joe Perches44835892014-11-06 20:46:14 -08004228 ret = cxgb4_clip_get(dev, &ifa->addr);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304229 if (ret < 0)
4230 break;
4231 }
4232 read_unlock_bh(&idev->lock);
4233
4234 return ret;
4235}
4236
4237static int update_root_dev_clip(struct net_device *dev)
4238{
4239 struct net_device *root_dev = NULL;
4240 int i, ret = 0;
4241
4242 /* First populate the real net device's IPv6 addresses */
4243 ret = update_dev_clip(dev, dev);
4244 if (ret)
4245 return ret;
4246
4247 /* Parse all bond and vlan devices layered on top of the physical dev */
Anish Bhatt587ddfe2014-10-14 20:07:21 -07004248 root_dev = netdev_master_upper_dev_get_rcu(dev);
4249 if (root_dev) {
4250 ret = update_dev_clip(root_dev, dev);
4251 if (ret)
4252 return ret;
4253 }
4254
Vipul Pandya01bcca62013-07-04 16:10:46 +05304255 for (i = 0; i < VLAN_N_VID; i++) {
dingtianhongf06c7f9f2014-05-09 14:58:05 +08004256 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304257 if (!root_dev)
4258 continue;
4259
4260 ret = update_dev_clip(root_dev, dev);
4261 if (ret)
4262 break;
4263 }
4264 return ret;
4265}
4266
4267static void update_clip(const struct adapter *adap)
4268{
4269 int i;
4270 struct net_device *dev;
4271 int ret;
4272
4273 rcu_read_lock();
4274
4275 for (i = 0; i < MAX_NPORTS; i++) {
4276 dev = adap->port[i];
4277 ret = 0;
4278
4279 if (dev)
4280 ret = update_root_dev_clip(dev);
4281
4282 if (ret < 0)
4283 break;
4284 }
4285 rcu_read_unlock();
4286}
Anish Bhatt1bb60372014-10-14 20:07:22 -07004287#endif /* IS_ENABLED(CONFIG_IPV6) */
Vipul Pandya01bcca62013-07-04 16:10:46 +05304288
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004289/**
4290 * cxgb_up - enable the adapter
4291 * @adap: adapter being enabled
4292 *
4293 * Called when the first port is enabled, this function performs the
4294 * actions necessary to make an adapter operational, such as completing
4295 * the initialization of HW modules, and enabling interrupts.
4296 *
4297 * Must be called with the rtnl lock held.
4298 */
4299static int cxgb_up(struct adapter *adap)
4300{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004301 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004302
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004303 err = setup_sge_queues(adap);
4304 if (err)
4305 goto out;
4306 err = setup_rss(adap);
4307 if (err)
4308 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004309
4310 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004311 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004312 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4313 adap->msix_info[0].desc, adap);
4314 if (err)
4315 goto irq_err;
4316
4317 err = request_msix_queue_irqs(adap);
4318 if (err) {
4319 free_irq(adap->msix_info[0].vec, adap);
4320 goto irq_err;
4321 }
4322 } else {
4323 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4324 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00004325 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004326 if (err)
4327 goto irq_err;
4328 }
4329 enable_rx(adap);
4330 t4_sge_start(adap);
4331 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004332 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004333 notify_ulds(adap, CXGB4_STATE_UP);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004334#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304335 update_clip(adap);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004336#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004337 out:
4338 return err;
4339 irq_err:
4340 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004341 freeq:
4342 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004343 goto out;
4344}
4345
4346static void cxgb_down(struct adapter *adapter)
4347{
4348 t4_intr_disable(adapter);
4349 cancel_work_sync(&adapter->tid_release_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05304350 cancel_work_sync(&adapter->db_full_task);
4351 cancel_work_sync(&adapter->db_drop_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004352 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004353 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004354
4355 if (adapter->flags & USING_MSIX) {
4356 free_msix_queue_irqs(adapter);
4357 free_irq(adapter->msix_info[0].vec, adapter);
4358 } else
4359 free_irq(adapter->pdev->irq, adapter);
4360 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004361 t4_sge_stop(adapter);
4362 t4_free_sge_resources(adapter);
4363 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004364}
4365
4366/*
4367 * net_device operations
4368 */
4369static int cxgb_open(struct net_device *dev)
4370{
4371 int err;
4372 struct port_info *pi = netdev_priv(dev);
4373 struct adapter *adapter = pi->adapter;
4374
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00004375 netif_carrier_off(dev);
4376
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004377 if (!(adapter->flags & FULL_INIT_DONE)) {
4378 err = cxgb_up(adapter);
4379 if (err < 0)
4380 return err;
4381 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004382
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00004383 err = link_start(dev);
4384 if (!err)
4385 netif_tx_start_all_queues(dev);
4386 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004387}
4388
4389static int cxgb_close(struct net_device *dev)
4390{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004391 struct port_info *pi = netdev_priv(dev);
4392 struct adapter *adapter = pi->adapter;
4393
4394 netif_tx_stop_all_queues(dev);
4395 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004396 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004397}
4398
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004399/* Return an error number if the indicated filter isn't writable ...
4400 */
4401static int writable_filter(struct filter_entry *f)
4402{
4403 if (f->locked)
4404 return -EPERM;
4405 if (f->pending)
4406 return -EBUSY;
4407
4408 return 0;
4409}
4410
4411/* Delete the filter at the specified index (if valid). The checks for all
4412 * the common problems with doing this like the filter being locked, currently
4413 * pending in another operation, etc.
4414 */
4415static int delete_filter(struct adapter *adapter, unsigned int fidx)
4416{
4417 struct filter_entry *f;
4418 int ret;
4419
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004420 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004421 return -EINVAL;
4422
4423 f = &adapter->tids.ftid_tab[fidx];
4424 ret = writable_filter(f);
4425 if (ret)
4426 return ret;
4427 if (f->valid)
4428 return del_filter_wr(adapter, fidx);
4429
4430 return 0;
4431}
4432
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004433int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00004434 __be32 sip, __be16 sport, __be16 vlan,
4435 unsigned int queue, unsigned char port, unsigned char mask)
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004436{
4437 int ret;
4438 struct filter_entry *f;
4439 struct adapter *adap;
4440 int i;
4441 u8 *val;
4442
4443 adap = netdev2adap(dev);
4444
Vipul Pandya1cab7752012-12-10 09:30:55 +00004445 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304446 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004447 stid += adap->tids.nftids;
4448
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004449 /* Check to make sure the filter requested is writable ...
4450 */
4451 f = &adap->tids.ftid_tab[stid];
4452 ret = writable_filter(f);
4453 if (ret)
4454 return ret;
4455
4456 /* Clear out any old resources being used by the filter before
4457 * we start constructing the new filter.
4458 */
4459 if (f->valid)
4460 clear_filter(adap, f);
4461
4462 /* Clear out filter specifications */
4463 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4464 f->fs.val.lport = cpu_to_be16(sport);
4465 f->fs.mask.lport = ~0;
4466 val = (u8 *)&sip;
Vipul Pandya793dad92012-12-10 09:30:56 +00004467 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004468 for (i = 0; i < 4; i++) {
4469 f->fs.val.lip[i] = val[i];
4470 f->fs.mask.lip[i] = ~0;
4471 }
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304472 if (adap->params.tp.vlan_pri_map & PORT_F) {
Vipul Pandya793dad92012-12-10 09:30:56 +00004473 f->fs.val.iport = port;
4474 f->fs.mask.iport = mask;
4475 }
4476 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004477
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304478 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
Kumar Sanghvi7c89e552013-12-18 16:38:20 +05304479 f->fs.val.proto = IPPROTO_TCP;
4480 f->fs.mask.proto = ~0;
4481 }
4482
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004483 f->fs.dirsteer = 1;
4484 f->fs.iq = queue;
4485 /* Mark filter as locked */
4486 f->locked = 1;
4487 f->fs.rpttid = 1;
4488
4489 ret = set_filter_wr(adap, stid);
4490 if (ret) {
4491 clear_filter(adap, f);
4492 return ret;
4493 }
4494
4495 return 0;
4496}
4497EXPORT_SYMBOL(cxgb4_create_server_filter);
4498
4499int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4500 unsigned int queue, bool ipv6)
4501{
4502 int ret;
4503 struct filter_entry *f;
4504 struct adapter *adap;
4505
4506 adap = netdev2adap(dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00004507
4508 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304509 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004510 stid += adap->tids.nftids;
4511
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004512 f = &adap->tids.ftid_tab[stid];
4513 /* Unlock the filter */
4514 f->locked = 0;
4515
4516 ret = delete_filter(adap, stid);
4517 if (ret)
4518 return ret;
4519
4520 return 0;
4521}
4522EXPORT_SYMBOL(cxgb4_remove_server_filter);
4523
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00004524static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4525 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004526{
4527 struct port_stats stats;
4528 struct port_info *p = netdev_priv(dev);
4529 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004530
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004531 /* Block retrieving statistics during EEH error
4532 * recovery. Otherwise, the recovery might fail
4533 * and the PCI device will be removed permanently
4534 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004535 spin_lock(&adapter->stats_lock);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004536 if (!netif_device_present(dev)) {
4537 spin_unlock(&adapter->stats_lock);
4538 return ns;
4539 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004540 t4_get_port_stats(adapter, p->tx_chan, &stats);
4541 spin_unlock(&adapter->stats_lock);
4542
4543 ns->tx_bytes = stats.tx_octets;
4544 ns->tx_packets = stats.tx_frames;
4545 ns->rx_bytes = stats.rx_octets;
4546 ns->rx_packets = stats.rx_frames;
4547 ns->multicast = stats.rx_mcast_frames;
4548
4549 /* detailed rx_errors */
4550 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4551 stats.rx_runt;
4552 ns->rx_over_errors = 0;
4553 ns->rx_crc_errors = stats.rx_fcs_err;
4554 ns->rx_frame_errors = stats.rx_symbol_err;
4555 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4556 stats.rx_ovflow2 + stats.rx_ovflow3 +
4557 stats.rx_trunc0 + stats.rx_trunc1 +
4558 stats.rx_trunc2 + stats.rx_trunc3;
4559 ns->rx_missed_errors = 0;
4560
4561 /* detailed tx_errors */
4562 ns->tx_aborted_errors = 0;
4563 ns->tx_carrier_errors = 0;
4564 ns->tx_fifo_errors = 0;
4565 ns->tx_heartbeat_errors = 0;
4566 ns->tx_window_errors = 0;
4567
4568 ns->tx_errors = stats.tx_error_frames;
4569 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4570 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4571 return ns;
4572}
4573
4574static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4575{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004576 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004577 int ret = 0, prtad, devad;
4578 struct port_info *pi = netdev_priv(dev);
4579 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4580
4581 switch (cmd) {
4582 case SIOCGMIIPHY:
4583 if (pi->mdio_addr < 0)
4584 return -EOPNOTSUPP;
4585 data->phy_id = pi->mdio_addr;
4586 break;
4587 case SIOCGMIIREG:
4588 case SIOCSMIIREG:
4589 if (mdio_phy_id_is_c45(data->phy_id)) {
4590 prtad = mdio_phy_id_prtad(data->phy_id);
4591 devad = mdio_phy_id_devad(data->phy_id);
4592 } else if (data->phy_id < 32) {
4593 prtad = data->phy_id;
4594 devad = 0;
4595 data->reg_num &= 0x1f;
4596 } else
4597 return -EINVAL;
4598
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004599 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004600 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004601 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004602 data->reg_num, &data->val_out);
4603 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004604 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004605 data->reg_num, data->val_in);
4606 break;
4607 default:
4608 return -EOPNOTSUPP;
4609 }
4610 return ret;
4611}
4612
4613static void cxgb_set_rxmode(struct net_device *dev)
4614{
4615 /* unfortunately we can't return errors to the stack */
4616 set_rxmode(dev, -1, false);
4617}
4618
4619static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4620{
4621 int ret;
4622 struct port_info *pi = netdev_priv(dev);
4623
4624 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4625 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004626 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4627 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004628 if (!ret)
4629 dev->mtu = new_mtu;
4630 return ret;
4631}
4632
4633static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4634{
4635 int ret;
4636 struct sockaddr *addr = p;
4637 struct port_info *pi = netdev_priv(dev);
4638
4639 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00004640 return -EADDRNOTAVAIL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004641
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004642 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4643 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004644 if (ret < 0)
4645 return ret;
4646
4647 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4648 pi->xact_addr_filt = ret;
4649 return 0;
4650}
4651
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004652#ifdef CONFIG_NET_POLL_CONTROLLER
4653static void cxgb_netpoll(struct net_device *dev)
4654{
4655 struct port_info *pi = netdev_priv(dev);
4656 struct adapter *adap = pi->adapter;
4657
4658 if (adap->flags & USING_MSIX) {
4659 int i;
4660 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4661
4662 for (i = pi->nqsets; i; i--, rx++)
4663 t4_sge_intr_msix(0, &rx->rspq);
4664 } else
4665 t4_intr_handler(adap)(0, adap);
4666}
4667#endif
4668
4669static const struct net_device_ops cxgb4_netdev_ops = {
4670 .ndo_open = cxgb_open,
4671 .ndo_stop = cxgb_close,
4672 .ndo_start_xmit = t4_eth_xmit,
Anish Bhatt688848b2014-06-19 21:37:13 -07004673 .ndo_select_queue = cxgb_select_queue,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00004674 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004675 .ndo_set_rx_mode = cxgb_set_rxmode,
4676 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00004677 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004678 .ndo_validate_addr = eth_validate_addr,
4679 .ndo_do_ioctl = cxgb_ioctl,
4680 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004681#ifdef CONFIG_NET_POLL_CONTROLLER
4682 .ndo_poll_controller = cxgb_netpoll,
4683#endif
4684};
4685
4686void t4_fatal_err(struct adapter *adap)
4687{
Hariprasad Shenaif612b812015-01-05 16:30:43 +05304688 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004689 t4_intr_disable(adap);
4690 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4691}
4692
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304693/* Return the specified PCI-E Configuration Space register from our Physical
4694 * Function. We try first via a Firmware LDST Command since we prefer to let
4695 * the firmware own all of these registers, but if that fails we go for it
4696 * directly ourselves.
4697 */
4698static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4699{
4700 struct fw_ldst_cmd ldst_cmd;
4701 u32 val;
4702 int ret;
4703
4704 /* Construct and send the Firmware LDST Command to retrieve the
4705 * specified PCI-E Configuration Space register.
4706 */
4707 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4708 ldst_cmd.op_to_addrspace =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304709 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4710 FW_CMD_REQUEST_F |
4711 FW_CMD_READ_F |
Hariprasad Shenai51678652014-11-21 12:52:02 +05304712 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304713 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
Hariprasad Shenai51678652014-11-21 12:52:02 +05304714 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304715 ldst_cmd.u.pcie.ctrl_to_fn =
Hariprasad Shenai51678652014-11-21 12:52:02 +05304716 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304717 ldst_cmd.u.pcie.r = reg;
4718 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4719 &ldst_cmd);
4720
4721 /* If the LDST Command suucceeded, exctract the returned register
4722 * value. Otherwise read it directly ourself.
4723 */
4724 if (ret == 0)
4725 val = ntohl(ldst_cmd.u.pcie.data[0]);
4726 else
4727 t4_hw_pci_read_cfg4(adap, reg, &val);
4728
4729 return val;
4730}
4731
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004732static void setup_memwin(struct adapter *adap)
4733{
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304734 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004735
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304736 if (is_t4(adap->params.chip)) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304737 u32 bar0;
4738
4739 /* Truncation intentional: we only read the bottom 32-bits of
4740 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4741 * mechanism to read BAR0 instead of using
4742 * pci_resource_start() because we could be operating from
4743 * within a Virtual Machine which is trapping our accesses to
4744 * our Configuration Space and we need to set up the PCI-E
4745 * Memory Window decoders with the actual addresses which will
4746 * be coming across the PCI-E link.
4747 */
4748 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4749 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4750 adap->t4_bar0 = bar0;
4751
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004752 mem_win0_base = bar0 + MEMWIN0_BASE;
4753 mem_win1_base = bar0 + MEMWIN1_BASE;
4754 mem_win2_base = bar0 + MEMWIN2_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304755 mem_win2_aperture = MEMWIN2_APERTURE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004756 } else {
4757 /* For T5, only relative offset inside the PCIe BAR is passed */
4758 mem_win0_base = MEMWIN0_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304759 mem_win1_base = MEMWIN1_BASE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004760 mem_win2_base = MEMWIN2_BASE_T5;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304761 mem_win2_aperture = MEMWIN2_APERTURE_T5;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004762 }
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304763 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
4764 mem_win0_base | BIR_V(0) |
4765 WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
4766 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
4767 mem_win1_base | BIR_V(0) |
4768 WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
4769 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
4770 mem_win2_base | BIR_V(0) |
4771 WINDOW_V(ilog2(mem_win2_aperture) - 10));
4772 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004773}
4774
4775static void setup_memwin_rdma(struct adapter *adap)
4776{
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004777 if (adap->vres.ocq.size) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304778 u32 start;
4779 unsigned int sz_kb;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004780
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304781 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4782 start &= PCI_BASE_ADDRESS_MEM_MASK;
4783 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004784 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4785 t4_write_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304786 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
4787 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004788 t4_write_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304789 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004790 adap->vres.ocq.start);
4791 t4_read_reg(adap,
Hariprasad Shenaif061de422015-01-05 16:30:44 +05304792 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004793 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004794}
4795
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004796static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4797{
4798 u32 v;
4799 int ret;
4800
4801 /* get device capabilities */
4802 memset(c, 0, sizeof(*c));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304803 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4804 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304805 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004806 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004807 if (ret < 0)
4808 return ret;
4809
4810 /* select capabilities we'll be using */
4811 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4812 if (!vf_acls)
4813 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4814 else
4815 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4816 } else if (vf_acls) {
4817 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4818 return ret;
4819 }
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304820 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4821 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004822 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004823 if (ret < 0)
4824 return ret;
4825
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004826 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004827 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05304828 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4829 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004830 if (ret < 0)
4831 return ret;
4832
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004833 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4834 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004835 if (ret < 0)
4836 return ret;
4837
4838 t4_sge_init(adap);
4839
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004840 /* tweak some settings */
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304841 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304842 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304843 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4844 v = t4_read_reg(adap, TP_PIO_DATA_A);
4845 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004846
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004847 /* first 4 Tx modulation queues point to consecutive Tx channels */
4848 adap->params.tp.tx_modq_map = 0xE4;
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304849 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4850 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004851
4852 /* associate each Tx modulation queue with consecutive Tx channels */
4853 v = 0x84218421;
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304854 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304855 &v, 1, TP_TX_SCHED_HDR_A);
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304856 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304857 &v, 1, TP_TX_SCHED_FIFO_A);
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304858 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304859 &v, 1, TP_TX_SCHED_PCMD_A);
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004860
4861#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4862 if (is_offload(adap)) {
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304863 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4864 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4865 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4866 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4867 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4868 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4869 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4870 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4871 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4872 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004873 }
4874
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004875 /* get basic stuff going */
4876 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004877}
4878
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004879/*
4880 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4881 */
4882#define MAX_ATIDS 8192U
4883
4884/*
4885 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Vipul Pandya636f9d32012-09-26 02:39:39 +00004886 *
4887 * If the firmware we're dealing with has Configuration File support, then
4888 * we use that to perform all configuration
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004889 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00004890
4891/*
4892 * Tweak configuration based on module parameters, etc. Most of these have
4893 * defaults assigned to them by Firmware Configuration Files (if we're using
4894 * them) but need to be explicitly set if we're using hard-coded
4895 * initialization. But even in the case of using Firmware Configuration
4896 * Files, we'd like to expose the ability to change these via module
4897 * parameters so these are essentially common tweaks/settings for
4898 * Configuration Files and hard-coded initialization ...
4899 */
4900static int adap_init0_tweaks(struct adapter *adapter)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004901{
Vipul Pandya636f9d32012-09-26 02:39:39 +00004902 /*
4903 * Fix up various Host-Dependent Parameters like Page Size, Cache
4904 * Line Size, etc. The firmware default is for a 4KB Page Size and
4905 * 64B Cache Line Size ...
4906 */
4907 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004908
Vipul Pandya636f9d32012-09-26 02:39:39 +00004909 /*
4910 * Process module parameters which affect early initialization.
4911 */
4912 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4913 dev_err(&adapter->pdev->dev,
4914 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4915 rx_dma_offset);
4916 rx_dma_offset = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004917 }
Hariprasad Shenaif612b812015-01-05 16:30:43 +05304918 t4_set_reg_field(adapter, SGE_CONTROL_A,
4919 PKTSHIFT_V(PKTSHIFT_M),
4920 PKTSHIFT_V(rx_dma_offset));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004921
Vipul Pandya636f9d32012-09-26 02:39:39 +00004922 /*
4923 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4924 * adds the pseudo header itself.
4925 */
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05304926 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4927 CSUM_HAS_PSEUDO_HDR_F, 0);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004928
4929 return 0;
4930}
4931
4932/*
4933 * Attempt to initialize the adapter via a Firmware Configuration File.
4934 */
4935static int adap_init0_config(struct adapter *adapter, int reset)
4936{
4937 struct fw_caps_config_cmd caps_cmd;
4938 const struct firmware *cf;
4939 unsigned long mtype = 0, maddr = 0;
4940 u32 finiver, finicsum, cfcsum;
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304941 int ret;
4942 int config_issued = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004943 char *fw_config_file, fw_config_file_path[256];
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304944 char *config_name = NULL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004945
4946 /*
4947 * Reset device if necessary.
4948 */
4949 if (reset) {
4950 ret = t4_fw_reset(adapter, adapter->mbox,
Hariprasad Shenai0d804332015-01-05 16:30:47 +05304951 PIORSTMODE_F | PIORST_F);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004952 if (ret < 0)
4953 goto bye;
4954 }
4955
4956 /*
4957 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4958 * then use that. Otherwise, use the configuration file stored
4959 * in the adapter flash ...
4960 */
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304961 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004962 case CHELSIO_T4:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304963 fw_config_file = FW4_CFNAME;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004964 break;
4965 case CHELSIO_T5:
4966 fw_config_file = FW5_CFNAME;
4967 break;
4968 default:
4969 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4970 adapter->pdev->device);
4971 ret = -EINVAL;
4972 goto bye;
4973 }
4974
4975 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004976 if (ret < 0) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304977 config_name = "On FLASH";
Vipul Pandya636f9d32012-09-26 02:39:39 +00004978 mtype = FW_MEMTYPE_CF_FLASH;
4979 maddr = t4_flash_cfg_addr(adapter);
4980 } else {
4981 u32 params[7], val[7];
4982
Hariprasad Shenai16e47622013-12-03 17:05:58 +05304983 sprintf(fw_config_file_path,
4984 "/lib/firmware/%s", fw_config_file);
4985 config_name = fw_config_file_path;
4986
Vipul Pandya636f9d32012-09-26 02:39:39 +00004987 if (cf->size >= FLASH_CFG_MAX_SIZE)
4988 ret = -ENOMEM;
4989 else {
Hariprasad Shenai51678652014-11-21 12:52:02 +05304990 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4991 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004992 ret = t4_query_params(adapter, adapter->mbox,
4993 adapter->fn, 0, 1, params, val);
4994 if (ret == 0) {
4995 /*
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05304996 * For t4_memory_rw() below addresses and
Vipul Pandya636f9d32012-09-26 02:39:39 +00004997 * sizes have to be in terms of multiples of 4
4998 * bytes. So, if the Configuration File isn't
4999 * a multiple of 4 bytes in length we'll have
5000 * to write that out separately since we can't
5001 * guarantee that the bytes following the
5002 * residual byte in the buffer returned by
5003 * request_firmware() are zeroed out ...
5004 */
5005 size_t resid = cf->size & 0x3;
5006 size_t size = cf->size & ~0x3;
5007 __be32 *data = (__be32 *)cf->data;
5008
Hariprasad Shenai51678652014-11-21 12:52:02 +05305009 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
5010 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005011
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305012 spin_lock(&adapter->win0_lock);
5013 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5014 size, data, T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005015 if (ret == 0 && resid != 0) {
5016 union {
5017 __be32 word;
5018 char buf[4];
5019 } last;
5020 int i;
5021
5022 last.word = data[size >> 2];
5023 for (i = resid; i < 4; i++)
5024 last.buf[i] = 0;
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305025 ret = t4_memory_rw(adapter, 0, mtype,
5026 maddr + size,
5027 4, &last.word,
5028 T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005029 }
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305030 spin_unlock(&adapter->win0_lock);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005031 }
5032 }
5033
5034 release_firmware(cf);
5035 if (ret)
5036 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005037 }
5038
Vipul Pandya636f9d32012-09-26 02:39:39 +00005039 /*
5040 * Issue a Capability Configuration command to the firmware to get it
5041 * to parse the Configuration File. We don't use t4_fw_config_file()
5042 * because we want the ability to modify various features after we've
5043 * processed the configuration file ...
5044 */
5045 memset(&caps_cmd, 0, sizeof(caps_cmd));
5046 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305047 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5048 FW_CMD_REQUEST_F |
5049 FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305050 caps_cmd.cfvalid_to_len16 =
Hariprasad Shenai51678652014-11-21 12:52:02 +05305051 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
5052 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
5053 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00005054 FW_LEN16(caps_cmd));
5055 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5056 &caps_cmd);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305057
5058 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5059 * Configuration File in FLASH), our last gasp effort is to use the
5060 * Firmware Configuration File which is embedded in the firmware. A
5061 * very few early versions of the firmware didn't have one embedded
5062 * but we can ignore those.
5063 */
5064 if (ret == -ENOENT) {
5065 memset(&caps_cmd, 0, sizeof(caps_cmd));
5066 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305067 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5068 FW_CMD_REQUEST_F |
5069 FW_CMD_READ_F);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305070 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5071 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5072 sizeof(caps_cmd), &caps_cmd);
5073 config_name = "Firmware Default";
5074 }
5075
5076 config_issued = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005077 if (ret < 0)
5078 goto bye;
5079
Vipul Pandya636f9d32012-09-26 02:39:39 +00005080 finiver = ntohl(caps_cmd.finiver);
5081 finicsum = ntohl(caps_cmd.finicsum);
5082 cfcsum = ntohl(caps_cmd.cfcsum);
5083 if (finicsum != cfcsum)
5084 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5085 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5086 finicsum, cfcsum);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005087
Vipul Pandya636f9d32012-09-26 02:39:39 +00005088 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005089 * And now tell the firmware to use the configuration we just loaded.
5090 */
5091 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305092 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5093 FW_CMD_REQUEST_F |
5094 FW_CMD_WRITE_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305095 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005096 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5097 NULL);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005098 if (ret < 0)
5099 goto bye;
5100
Vipul Pandya636f9d32012-09-26 02:39:39 +00005101 /*
5102 * Tweak configuration based on system architecture, module
5103 * parameters, etc.
5104 */
5105 ret = adap_init0_tweaks(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005106 if (ret < 0)
5107 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005108
Vipul Pandya636f9d32012-09-26 02:39:39 +00005109 /*
5110 * And finally tell the firmware to initialize itself using the
5111 * parameters from the Configuration File.
5112 */
5113 ret = t4_fw_initialize(adapter, adapter->mbox);
5114 if (ret < 0)
5115 goto bye;
5116
Hariprasad Shenai06640312015-01-13 15:19:25 +05305117 /* Emit Firmware Configuration File information and return
5118 * successfully.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005119 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00005120 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305121 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5122 config_name, finiver, cfcsum);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005123 return 0;
5124
5125 /*
5126 * Something bad happened. Return the error ... (If the "error"
5127 * is that there's no Configuration File on the adapter we don't
5128 * want to issue a warning since this is fairly common.)
5129 */
5130bye:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305131 if (config_issued && ret != -ENOENT)
5132 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5133 config_name, -ret);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005134 return ret;
5135}
5136
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305137static struct fw_info fw_info_array[] = {
5138 {
5139 .chip = CHELSIO_T4,
5140 .fs_name = FW4_CFNAME,
5141 .fw_mod_name = FW4_FNAME,
5142 .fw_hdr = {
5143 .chip = FW_HDR_CHIP_T4,
5144 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5145 .intfver_nic = FW_INTFVER(T4, NIC),
5146 .intfver_vnic = FW_INTFVER(T4, VNIC),
5147 .intfver_ri = FW_INTFVER(T4, RI),
5148 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5149 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5150 },
5151 }, {
5152 .chip = CHELSIO_T5,
5153 .fs_name = FW5_CFNAME,
5154 .fw_mod_name = FW5_FNAME,
5155 .fw_hdr = {
5156 .chip = FW_HDR_CHIP_T5,
5157 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5158 .intfver_nic = FW_INTFVER(T5, NIC),
5159 .intfver_vnic = FW_INTFVER(T5, VNIC),
5160 .intfver_ri = FW_INTFVER(T5, RI),
5161 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5162 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5163 },
5164 }
5165};
5166
5167static struct fw_info *find_fw_info(int chip)
5168{
5169 int i;
5170
5171 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5172 if (fw_info_array[i].chip == chip)
5173 return &fw_info_array[i];
5174 }
5175 return NULL;
5176}
5177
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005178/*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005179 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005180 */
5181static int adap_init0(struct adapter *adap)
5182{
5183 int ret;
5184 u32 v, port_vec;
5185 enum dev_state state;
5186 u32 params[7], val[7];
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005187 struct fw_caps_config_cmd caps_cmd;
Hariprasad Shenai49aa2842015-01-07 08:48:00 +05305188 struct fw_devlog_cmd devlog_cmd;
5189 u32 devlog_meminfo;
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05305190 int reset = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005191
Hariprasad Shenai666224d2014-12-11 11:11:43 +05305192 /* Contact FW, advertising Master capability */
5193 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005194 if (ret < 0) {
5195 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5196 ret);
5197 return ret;
5198 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005199 if (ret == adap->mbox)
5200 adap->flags |= MASTER_PF;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005201
Vipul Pandya636f9d32012-09-26 02:39:39 +00005202 /*
5203 * If we're the Master PF Driver and the device is uninitialized,
5204 * then let's consider upgrading the firmware ... (We always want
5205 * to check the firmware version number in order to A. get it for
5206 * later reporting and B. to warn if the currently loaded firmware
5207 * is excessively mismatched relative to the driver.)
5208 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305209 t4_get_fw_version(adap, &adap->params.fw_vers);
5210 t4_get_tp_version(adap, &adap->params.tp_vers);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005211 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305212 struct fw_info *fw_info;
5213 struct fw_hdr *card_fw;
5214 const struct firmware *fw;
5215 const u8 *fw_data = NULL;
5216 unsigned int fw_size = 0;
5217
5218 /* This is the firmware whose headers the driver was compiled
5219 * against
5220 */
5221 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5222 if (fw_info == NULL) {
5223 dev_err(adap->pdev_dev,
5224 "unable to get firmware info for chip %d.\n",
5225 CHELSIO_CHIP_VERSION(adap->params.chip));
5226 return -EINVAL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005227 }
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305228
5229 /* allocate memory to read the header of the firmware on the
5230 * card
5231 */
5232 card_fw = t4_alloc_mem(sizeof(*card_fw));
5233
5234 /* Get FW from from /lib/firmware/ */
5235 ret = request_firmware(&fw, fw_info->fw_mod_name,
5236 adap->pdev_dev);
5237 if (ret < 0) {
5238 dev_err(adap->pdev_dev,
5239 "unable to load firmware image %s, error %d\n",
5240 fw_info->fw_mod_name, ret);
5241 } else {
5242 fw_data = fw->data;
5243 fw_size = fw->size;
5244 }
5245
5246 /* upgrade FW logic */
5247 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5248 state, &reset);
5249
5250 /* Cleaning up */
5251 if (fw != NULL)
5252 release_firmware(fw);
5253 t4_free_mem(card_fw);
5254
Vipul Pandya636f9d32012-09-26 02:39:39 +00005255 if (ret < 0)
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305256 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005257 }
5258
5259 /*
5260 * Grab VPD parameters. This should be done after we establish a
5261 * connection to the firmware since some of the VPD parameters
5262 * (notably the Core Clock frequency) are retrieved via requests to
5263 * the firmware. On the other hand, we need these fairly early on
5264 * so we do this right after getting ahold of the firmware.
5265 */
5266 ret = get_vpd_params(adap, &adap->params.vpd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005267 if (ret < 0)
5268 goto bye;
5269
Hariprasad Shenai49aa2842015-01-07 08:48:00 +05305270 /* Read firmware device log parameters. We really need to find a way
5271 * to get these parameters initialized with some default values (which
5272 * are likely to be correct) for the case where we either don't
5273 * attache to the firmware or it's crashed when we probe the adapter.
5274 * That way we'll still be able to perform early firmware startup
5275 * debugging ... If the request to get the Firmware's Device Log
5276 * parameters fails, we'll live so we don't make that a fatal error.
5277 */
5278 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5279 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5280 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5281 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5282 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5283 &devlog_cmd);
5284 if (ret == 0) {
5285 devlog_meminfo =
5286 ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5287 adap->params.devlog.memtype =
5288 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5289 adap->params.devlog.start =
5290 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5291 adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
5292 }
5293
Vipul Pandya636f9d32012-09-26 02:39:39 +00005294 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005295 * Find out what ports are available to us. Note that we need to do
5296 * this before calling adap_init0_no_config() since it needs nports
5297 * and portvec ...
Vipul Pandya636f9d32012-09-26 02:39:39 +00005298 */
5299 v =
Hariprasad Shenai51678652014-11-21 12:52:02 +05305300 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5301 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005302 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5303 if (ret < 0)
5304 goto bye;
5305
5306 adap->params.nports = hweight32(port_vec);
5307 adap->params.portvec = port_vec;
5308
Hariprasad Shenai06640312015-01-13 15:19:25 +05305309 /* If the firmware is initialized already, emit a simply note to that
5310 * effect. Otherwise, it's time to try initializing the adapter.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005311 */
5312 if (state == DEV_STATE_INIT) {
5313 dev_info(adap->pdev_dev, "Coming up as %s: "\
5314 "Adapter already initialized\n",
5315 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
Vipul Pandya636f9d32012-09-26 02:39:39 +00005316 } else {
5317 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5318 "Initializing adapter\n");
Hariprasad Shenai06640312015-01-13 15:19:25 +05305319
5320 /* Find out whether we're dealing with a version of the
5321 * firmware which has configuration file support.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005322 */
Hariprasad Shenai06640312015-01-13 15:19:25 +05305323 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5324 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
5325 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5326 params, val);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005327
Hariprasad Shenai06640312015-01-13 15:19:25 +05305328 /* If the firmware doesn't support Configuration Files,
5329 * return an error.
5330 */
5331 if (ret < 0) {
5332 dev_err(adap->pdev_dev, "firmware doesn't support "
5333 "Firmware Configuration Files\n");
5334 goto bye;
5335 }
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005336
Hariprasad Shenai06640312015-01-13 15:19:25 +05305337 /* The firmware provides us with a memory buffer where we can
5338 * load a Configuration File from the host if we want to
5339 * override the Configuration File in flash.
5340 */
5341 ret = adap_init0_config(adap, reset);
5342 if (ret == -ENOENT) {
5343 dev_err(adap->pdev_dev, "no Configuration File "
5344 "present on adapter.\n");
5345 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005346 }
5347 if (ret < 0) {
Hariprasad Shenai06640312015-01-13 15:19:25 +05305348 dev_err(adap->pdev_dev, "could not initialize "
5349 "adapter, error %d\n", -ret);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005350 goto bye;
5351 }
5352 }
5353
Hariprasad Shenai06640312015-01-13 15:19:25 +05305354 /* Give the SGE code a chance to pull in anything that it needs ...
5355 * Note that this must be called after we retrieve our VPD parameters
5356 * in order to know how to convert core ticks to seconds, etc.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005357 */
Hariprasad Shenai06640312015-01-13 15:19:25 +05305358 ret = t4_sge_init(adap);
5359 if (ret < 0)
5360 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005361
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005362 if (is_bypass_device(adap->pdev->device))
5363 adap->params.bypass = 1;
5364
Vipul Pandya636f9d32012-09-26 02:39:39 +00005365 /*
5366 * Grab some of our basic fundamental operating parameters.
5367 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005368#define FW_PARAM_DEV(param) \
Hariprasad Shenai51678652014-11-21 12:52:02 +05305369 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
5370 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005371
5372#define FW_PARAM_PFVF(param) \
Hariprasad Shenai51678652014-11-21 12:52:02 +05305373 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
5374 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
5375 FW_PARAMS_PARAM_Y_V(0) | \
5376 FW_PARAMS_PARAM_Z_V(0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005377
Vipul Pandya636f9d32012-09-26 02:39:39 +00005378 params[0] = FW_PARAM_PFVF(EQ_START);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005379 params[1] = FW_PARAM_PFVF(L2T_START);
5380 params[2] = FW_PARAM_PFVF(L2T_END);
5381 params[3] = FW_PARAM_PFVF(FILTER_START);
5382 params[4] = FW_PARAM_PFVF(FILTER_END);
5383 params[5] = FW_PARAM_PFVF(IQFLINT_START);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005384 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005385 if (ret < 0)
5386 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005387 adap->sge.egr_start = val[0];
5388 adap->l2t_start = val[1];
5389 adap->l2t_end = val[2];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005390 adap->tids.ftid_base = val[3];
5391 adap->tids.nftids = val[4] - val[3] + 1;
5392 adap->sge.ingr_start = val[5];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005393
Vipul Pandya636f9d32012-09-26 02:39:39 +00005394 /* query params related to active filter region */
5395 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5396 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5397 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5398 /* If Active filter size is set we enable establishing
5399 * offload connection through firmware work request
5400 */
5401 if ((val[0] != val[1]) && (ret >= 0)) {
5402 adap->flags |= FW_OFLD_CONN;
5403 adap->tids.aftid_base = val[0];
5404 adap->tids.aftid_end = val[1];
5405 }
5406
Vipul Pandyab407a4a2013-04-29 04:04:40 +00005407 /* If we're running on newer firmware, let it know that we're
5408 * prepared to deal with encapsulated CPL messages. Older
5409 * firmware won't understand this and we'll just get
5410 * unencapsulated messages ...
5411 */
5412 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5413 val[0] = 1;
5414 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5415
Vipul Pandya636f9d32012-09-26 02:39:39 +00005416 /*
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05305417 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5418 * capability. Earlier versions of the firmware didn't have the
5419 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5420 * permission to use ULPTX MEMWRITE DSGL.
5421 */
5422 if (is_t4(adap->params.chip)) {
5423 adap->params.ulptx_memwrite_dsgl = false;
5424 } else {
5425 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5426 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5427 1, params, val);
5428 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5429 }
5430
5431 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005432 * Get device capabilities so we can determine what resources we need
5433 * to manage.
5434 */
5435 memset(&caps_cmd, 0, sizeof(caps_cmd));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305436 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5437 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305438 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005439 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5440 &caps_cmd);
5441 if (ret < 0)
5442 goto bye;
5443
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005444 if (caps_cmd.ofldcaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005445 /* query offload-related parameters */
5446 params[0] = FW_PARAM_DEV(NTID);
5447 params[1] = FW_PARAM_PFVF(SERVER_START);
5448 params[2] = FW_PARAM_PFVF(SERVER_END);
5449 params[3] = FW_PARAM_PFVF(TDDP_START);
5450 params[4] = FW_PARAM_PFVF(TDDP_END);
5451 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005452 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5453 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005454 if (ret < 0)
5455 goto bye;
5456 adap->tids.ntids = val[0];
5457 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5458 adap->tids.stid_base = val[1];
5459 adap->tids.nstids = val[2] - val[1] + 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005460 /*
5461 * Setup server filter region. Divide the availble filter
5462 * region into two parts. Regular filters get 1/3rd and server
5463 * filters get 2/3rd part. This is only enabled if workarond
5464 * path is enabled.
5465 * 1. For regular filters.
5466 * 2. Server filter: This are special filters which are used
5467 * to redirect SYN packets to offload queue.
5468 */
5469 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5470 adap->tids.sftid_base = adap->tids.ftid_base +
5471 DIV_ROUND_UP(adap->tids.nftids, 3);
5472 adap->tids.nsftids = adap->tids.nftids -
5473 DIV_ROUND_UP(adap->tids.nftids, 3);
5474 adap->tids.nftids = adap->tids.sftid_base -
5475 adap->tids.ftid_base;
5476 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005477 adap->vres.ddp.start = val[3];
5478 adap->vres.ddp.size = val[4] - val[3] + 1;
5479 adap->params.ofldq_wr_cred = val[5];
Vipul Pandya636f9d32012-09-26 02:39:39 +00005480
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005481 adap->params.offload = 1;
5482 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005483 if (caps_cmd.rdmacaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005484 params[0] = FW_PARAM_PFVF(STAG_START);
5485 params[1] = FW_PARAM_PFVF(STAG_END);
5486 params[2] = FW_PARAM_PFVF(RQ_START);
5487 params[3] = FW_PARAM_PFVF(RQ_END);
5488 params[4] = FW_PARAM_PFVF(PBL_START);
5489 params[5] = FW_PARAM_PFVF(PBL_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005490 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5491 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005492 if (ret < 0)
5493 goto bye;
5494 adap->vres.stag.start = val[0];
5495 adap->vres.stag.size = val[1] - val[0] + 1;
5496 adap->vres.rq.start = val[2];
5497 adap->vres.rq.size = val[3] - val[2] + 1;
5498 adap->vres.pbl.start = val[4];
5499 adap->vres.pbl.size = val[5] - val[4] + 1;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005500
5501 params[0] = FW_PARAM_PFVF(SQRQ_START);
5502 params[1] = FW_PARAM_PFVF(SQRQ_END);
5503 params[2] = FW_PARAM_PFVF(CQ_START);
5504 params[3] = FW_PARAM_PFVF(CQ_END);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005505 params[4] = FW_PARAM_PFVF(OCQ_START);
5506 params[5] = FW_PARAM_PFVF(OCQ_END);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05305507 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5508 val);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005509 if (ret < 0)
5510 goto bye;
5511 adap->vres.qp.start = val[0];
5512 adap->vres.qp.size = val[1] - val[0] + 1;
5513 adap->vres.cq.start = val[2];
5514 adap->vres.cq.size = val[3] - val[2] + 1;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005515 adap->vres.ocq.start = val[4];
5516 adap->vres.ocq.size = val[5] - val[4] + 1;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05305517
5518 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5519 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05305520 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5521 val);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05305522 if (ret < 0) {
5523 adap->params.max_ordird_qp = 8;
5524 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5525 ret = 0;
5526 } else {
5527 adap->params.max_ordird_qp = val[0];
5528 adap->params.max_ird_adapter = val[1];
5529 }
5530 dev_info(adap->pdev_dev,
5531 "max_ordird_qp %d max_ird_adapter %d\n",
5532 adap->params.max_ordird_qp,
5533 adap->params.max_ird_adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005534 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005535 if (caps_cmd.iscsicaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005536 params[0] = FW_PARAM_PFVF(ISCSI_START);
5537 params[1] = FW_PARAM_PFVF(ISCSI_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005538 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5539 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005540 if (ret < 0)
5541 goto bye;
5542 adap->vres.iscsi.start = val[0];
5543 adap->vres.iscsi.size = val[1] - val[0] + 1;
5544 }
5545#undef FW_PARAM_PFVF
5546#undef FW_PARAM_DEV
5547
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305548 /* The MTU/MSS Table is initialized by now, so load their values. If
5549 * we're initializing the adapter, then we'll make any modifications
5550 * we want to the MTU/MSS Table and also initialize the congestion
5551 * parameters.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005552 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005553 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305554 if (state != DEV_STATE_INIT) {
5555 int i;
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005556
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305557 /* The default MTU Table contains values 1492 and 1500.
5558 * However, for TCP, it's better to have two values which are
5559 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5560 * This allows us to have a TCP Data Payload which is a
5561 * multiple of 8 regardless of what combination of TCP Options
5562 * are in use (always a multiple of 4 bytes) which is
5563 * important for performance reasons. For instance, if no
5564 * options are in use, then we have a 20-byte IP header and a
5565 * 20-byte TCP header. In this case, a 1500-byte MSS would
5566 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5567 * which is not a multiple of 8. So using an MSS of 1488 in
5568 * this case results in a TCP Data Payload of 1448 bytes which
5569 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5570 * Stamps have been negotiated, then an MTU of 1500 bytes
5571 * results in a TCP Data Payload of 1448 bytes which, as
5572 * above, is a multiple of 8 bytes ...
5573 */
5574 for (i = 0; i < NMTUS; i++)
5575 if (adap->params.mtus[i] == 1492) {
5576 adap->params.mtus[i] = 1488;
5577 break;
5578 }
5579
5580 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5581 adap->params.b_wnd);
5582 }
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05305583 t4_init_sge_params(adap);
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05305584 t4_init_tp_params(adap);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005585 adap->flags |= FW_OK;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005586 return 0;
5587
5588 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005589 * Something bad happened. If a command timed out or failed with EIO
5590 * FW does not operate within its spec or something catastrophic
5591 * happened to HW/FW, stop issuing commands.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005592 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00005593bye:
5594 if (ret != -ETIMEDOUT && ret != -EIO)
5595 t4_fw_bye(adap, adap->mbox);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005596 return ret;
5597}
5598
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005599/* EEH callbacks */
5600
5601static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5602 pci_channel_state_t state)
5603{
5604 int i;
5605 struct adapter *adap = pci_get_drvdata(pdev);
5606
5607 if (!adap)
5608 goto out;
5609
5610 rtnl_lock();
5611 adap->flags &= ~FW_OK;
5612 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08005613 spin_lock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005614 for_each_port(adap, i) {
5615 struct net_device *dev = adap->port[i];
5616
5617 netif_device_detach(dev);
5618 netif_carrier_off(dev);
5619 }
Gavin Shan9fe6cb52014-01-23 12:27:35 +08005620 spin_unlock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005621 if (adap->flags & FULL_INIT_DONE)
5622 cxgb_down(adap);
5623 rtnl_unlock();
Gavin Shan144be3d2014-01-23 12:27:34 +08005624 if ((adap->flags & DEV_ENABLED)) {
5625 pci_disable_device(pdev);
5626 adap->flags &= ~DEV_ENABLED;
5627 }
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005628out: return state == pci_channel_io_perm_failure ?
5629 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5630}
5631
5632static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5633{
5634 int i, ret;
5635 struct fw_caps_config_cmd c;
5636 struct adapter *adap = pci_get_drvdata(pdev);
5637
5638 if (!adap) {
5639 pci_restore_state(pdev);
5640 pci_save_state(pdev);
5641 return PCI_ERS_RESULT_RECOVERED;
5642 }
5643
Gavin Shan144be3d2014-01-23 12:27:34 +08005644 if (!(adap->flags & DEV_ENABLED)) {
5645 if (pci_enable_device(pdev)) {
5646 dev_err(&pdev->dev, "Cannot reenable PCI "
5647 "device after reset\n");
5648 return PCI_ERS_RESULT_DISCONNECT;
5649 }
5650 adap->flags |= DEV_ENABLED;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005651 }
5652
5653 pci_set_master(pdev);
5654 pci_restore_state(pdev);
5655 pci_save_state(pdev);
5656 pci_cleanup_aer_uncorrect_error_status(pdev);
5657
Hariprasad Shenai8203b502014-10-09 05:48:47 +05305658 if (t4_wait_dev_ready(adap->regs) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005659 return PCI_ERS_RESULT_DISCONNECT;
Thadeu Lima de Souza Cascardo777c2302013-05-03 08:11:04 +00005660 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005661 return PCI_ERS_RESULT_DISCONNECT;
5662 adap->flags |= FW_OK;
5663 if (adap_init1(adap, &c))
5664 return PCI_ERS_RESULT_DISCONNECT;
5665
5666 for_each_port(adap, i) {
5667 struct port_info *p = adap2pinfo(adap, i);
5668
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005669 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5670 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005671 if (ret < 0)
5672 return PCI_ERS_RESULT_DISCONNECT;
5673 p->viid = ret;
5674 p->xact_addr_filt = -1;
5675 }
5676
5677 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5678 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005679 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005680 if (cxgb_up(adap))
5681 return PCI_ERS_RESULT_DISCONNECT;
5682 return PCI_ERS_RESULT_RECOVERED;
5683}
5684
5685static void eeh_resume(struct pci_dev *pdev)
5686{
5687 int i;
5688 struct adapter *adap = pci_get_drvdata(pdev);
5689
5690 if (!adap)
5691 return;
5692
5693 rtnl_lock();
5694 for_each_port(adap, i) {
5695 struct net_device *dev = adap->port[i];
5696
5697 if (netif_running(dev)) {
5698 link_start(dev);
5699 cxgb_set_rxmode(dev);
5700 }
5701 netif_device_attach(dev);
5702 }
5703 rtnl_unlock();
5704}
5705
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005706static const struct pci_error_handlers cxgb4_eeh = {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005707 .error_detected = eeh_err_detected,
5708 .slot_reset = eeh_slot_reset,
5709 .resume = eeh_resume,
5710};
5711
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305712static inline bool is_x_10g_port(const struct link_config *lc)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005713{
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305714 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5715 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005716}
5717
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305718static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5719 unsigned int us, unsigned int cnt,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005720 unsigned int size, unsigned int iqe_size)
5721{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305722 q->adap = adap;
5723 set_rspq_intr_params(q, us, cnt);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005724 q->iqe_len = iqe_size;
5725 q->size = size;
5726}
5727
5728/*
5729 * Perform default configuration of DMA queues depending on the number and type
5730 * of ports we found and the number of available CPUs. Most settings can be
5731 * modified by the admin prior to actual use.
5732 */
Bill Pemberton91744942012-12-03 09:23:02 -05005733static void cfg_queues(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005734{
5735 struct sge *s = &adap->sge;
Anish Bhatt688848b2014-06-19 21:37:13 -07005736 int i, n10g = 0, qidx = 0;
5737#ifndef CONFIG_CHELSIO_T4_DCB
5738 int q10g = 0;
5739#endif
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305740 int ciq_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005741
5742 for_each_port(adap, i)
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305743 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
Anish Bhatt688848b2014-06-19 21:37:13 -07005744#ifdef CONFIG_CHELSIO_T4_DCB
5745 /* For Data Center Bridging support we need to be able to support up
5746 * to 8 Traffic Priorities; each of which will be assigned to its
5747 * own TX Queue in order to prevent Head-Of-Line Blocking.
5748 */
5749 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
5750 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
5751 MAX_ETH_QSETS, adap->params.nports * 8);
5752 BUG_ON(1);
5753 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005754
Anish Bhatt688848b2014-06-19 21:37:13 -07005755 for_each_port(adap, i) {
5756 struct port_info *pi = adap2pinfo(adap, i);
5757
5758 pi->first_qset = qidx;
5759 pi->nqsets = 8;
5760 qidx += pi->nqsets;
5761 }
5762#else /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005763 /*
5764 * We default to 1 queue per non-10G port and up to # of cores queues
5765 * per 10G port.
5766 */
5767 if (n10g)
5768 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
Yuval Mintz5952dde2012-07-01 03:18:55 +00005769 if (q10g > netif_get_num_default_rss_queues())
5770 q10g = netif_get_num_default_rss_queues();
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005771
5772 for_each_port(adap, i) {
5773 struct port_info *pi = adap2pinfo(adap, i);
5774
5775 pi->first_qset = qidx;
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05305776 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005777 qidx += pi->nqsets;
5778 }
Anish Bhatt688848b2014-06-19 21:37:13 -07005779#endif /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005780
5781 s->ethqsets = qidx;
5782 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5783
5784 if (is_offload(adap)) {
5785 /*
5786 * For offload we use 1 queue/channel if all ports are up to 1G,
5787 * otherwise we divide all available queues amongst the channels
5788 * capped by the number of available cores.
5789 */
5790 if (n10g) {
5791 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5792 num_online_cpus());
5793 s->ofldqsets = roundup(i, adap->params.nports);
5794 } else
5795 s->ofldqsets = adap->params.nports;
5796 /* For RDMA one Rx queue per channel suffices */
5797 s->rdmaqs = adap->params.nports;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305798 s->rdmaciqs = adap->params.nports;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005799 }
5800
5801 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5802 struct sge_eth_rxq *r = &s->ethrxq[i];
5803
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305804 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005805 r->fl.size = 72;
5806 }
5807
5808 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5809 s->ethtxq[i].q.size = 1024;
5810
5811 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5812 s->ctrlq[i].q.size = 512;
5813
5814 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5815 s->ofldtxq[i].q.size = 1024;
5816
5817 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5818 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5819
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305820 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005821 r->rspq.uld = CXGB4_ULD_ISCSI;
5822 r->fl.size = 72;
5823 }
5824
5825 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5826 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5827
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305828 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005829 r->rspq.uld = CXGB4_ULD_RDMA;
5830 r->fl.size = 72;
5831 }
5832
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305833 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5834 if (ciq_size > SGE_MAX_IQ_SIZE) {
5835 CH_WARN(adap, "CIQ size too small for available IQs\n");
5836 ciq_size = SGE_MAX_IQ_SIZE;
5837 }
5838
5839 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5840 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5841
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305842 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305843 r->rspq.uld = CXGB4_ULD_RDMA;
5844 }
5845
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05305846 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5847 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005848}
5849
5850/*
5851 * Reduce the number of Ethernet queues across all ports to at most n.
5852 * n provides at least one queue per port.
5853 */
Bill Pemberton91744942012-12-03 09:23:02 -05005854static void reduce_ethqs(struct adapter *adap, int n)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005855{
5856 int i;
5857 struct port_info *pi;
5858
5859 while (n < adap->sge.ethqsets)
5860 for_each_port(adap, i) {
5861 pi = adap2pinfo(adap, i);
5862 if (pi->nqsets > 1) {
5863 pi->nqsets--;
5864 adap->sge.ethqsets--;
5865 if (adap->sge.ethqsets <= n)
5866 break;
5867 }
5868 }
5869
5870 n = 0;
5871 for_each_port(adap, i) {
5872 pi = adap2pinfo(adap, i);
5873 pi->first_qset = n;
5874 n += pi->nqsets;
5875 }
5876}
5877
5878/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5879#define EXTRA_VECS 2
5880
Bill Pemberton91744942012-12-03 09:23:02 -05005881static int enable_msix(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005882{
5883 int ofld_need = 0;
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005884 int i, want, need;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005885 struct sge *s = &adap->sge;
5886 unsigned int nchan = adap->params.nports;
5887 struct msix_entry entries[MAX_INGQ + 1];
5888
5889 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5890 entries[i].entry = i;
5891
5892 want = s->max_ethqsets + EXTRA_VECS;
5893 if (is_offload(adap)) {
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305894 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005895 /* need nchan for each possible ULD */
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05305896 ofld_need = 3 * nchan;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005897 }
Anish Bhatt688848b2014-06-19 21:37:13 -07005898#ifdef CONFIG_CHELSIO_T4_DCB
5899 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5900 * each port.
5901 */
5902 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
5903#else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005904 need = adap->params.nports + EXTRA_VECS + ofld_need;
Anish Bhatt688848b2014-06-19 21:37:13 -07005905#endif
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005906 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5907 if (want < 0)
5908 return want;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005909
Alexander Gordeevc32ad222014-02-18 11:07:59 +01005910 /*
5911 * Distribute available vectors to the various queue groups.
5912 * Every group gets its minimum requirement and NIC gets top
5913 * priority for leftovers.
5914 */
5915 i = want - EXTRA_VECS - ofld_need;
5916 if (i < s->max_ethqsets) {
5917 s->max_ethqsets = i;
5918 if (i < s->ethqsets)
5919 reduce_ethqs(adap, i);
5920 }
5921 if (is_offload(adap)) {
5922 i = want - EXTRA_VECS - s->max_ethqsets;
5923 i -= ofld_need - nchan;
5924 s->ofldqsets = (i / nchan) * nchan; /* round down */
5925 }
5926 for (i = 0; i < want; ++i)
5927 adap->msix_info[i].vec = entries[i].vector;
5928
5929 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005930}
5931
5932#undef EXTRA_VECS
5933
Bill Pemberton91744942012-12-03 09:23:02 -05005934static int init_rss(struct adapter *adap)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005935{
5936 unsigned int i, j;
5937
5938 for_each_port(adap, i) {
5939 struct port_info *pi = adap2pinfo(adap, i);
5940
5941 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5942 if (!pi->rss)
5943 return -ENOMEM;
5944 for (j = 0; j < pi->rss_size; j++)
Ben Hutchings278bc422011-12-15 13:56:49 +00005945 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005946 }
5947 return 0;
5948}
5949
Bill Pemberton91744942012-12-03 09:23:02 -05005950static void print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005951{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005952 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005953 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00005954 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005955 const struct port_info *pi = netdev_priv(dev);
5956 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00005957
5958 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5959 spd = " 2.5 GT/s";
5960 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5961 spd = " 5 GT/s";
Roland Dreierd2e752d2014-04-28 17:36:20 -07005962 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5963 spd = " 8 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005964
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005965 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5966 bufp += sprintf(bufp, "100/");
5967 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5968 bufp += sprintf(bufp, "1000/");
5969 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5970 bufp += sprintf(bufp, "10G/");
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05305971 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5972 bufp += sprintf(bufp, "40G/");
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005973 if (bufp != buf)
5974 --bufp;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05305975 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005976
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005977 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005978 adap->params.vpd.id,
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05305979 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005980 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5981 (adap->flags & USING_MSIX) ? " MSI-X" :
5982 (adap->flags & USING_MSI) ? " MSI" : "");
Kumar Sanghvia94cd702014-02-18 17:56:09 +05305983 netdev_info(dev, "S/N: %s, P/N: %s\n",
5984 adap->params.vpd.sn, adap->params.vpd.pn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005985}
5986
Bill Pemberton91744942012-12-03 09:23:02 -05005987static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005988{
Jiang Liue5c8ae52012-08-20 13:53:19 -06005989 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005990}
5991
Dimitris Michailidis06546392010-07-11 12:01:16 +00005992/*
5993 * Free the following resources:
5994 * - memory used for tables
5995 * - MSI/MSI-X
5996 * - net devices
5997 * - resources FW is holding for us
5998 */
5999static void free_some_resources(struct adapter *adapter)
6000{
6001 unsigned int i;
6002
6003 t4_free_mem(adapter->l2t);
6004 t4_free_mem(adapter->tids.tid_tab);
6005 disable_msi(adapter);
6006
6007 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006008 if (adapter->port[i]) {
6009 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00006010 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006011 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00006012 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006013 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00006014}
6015
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00006016#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00006017#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006018 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006019#define SEGMENT_SIZE 128
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006020
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00006021static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006022{
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006023 int func, i, err, s_qpp, qpp, num_seg;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006024 struct port_info *pi;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006025 bool highdma = false;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006026 struct adapter *adapter = NULL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306027 void __iomem *regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006028
6029 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6030
6031 err = pci_request_regions(pdev, KBUILD_MODNAME);
6032 if (err) {
6033 /* Just info, some other driver may have claimed the device. */
6034 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6035 return err;
6036 }
6037
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006038 err = pci_enable_device(pdev);
6039 if (err) {
6040 dev_err(&pdev->dev, "cannot enable PCI device\n");
6041 goto out_release_regions;
6042 }
6043
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306044 regs = pci_ioremap_bar(pdev, 0);
6045 if (!regs) {
6046 dev_err(&pdev->dev, "cannot map device registers\n");
6047 err = -ENOMEM;
6048 goto out_disable_device;
6049 }
6050
Hariprasad Shenai8203b502014-10-09 05:48:47 +05306051 err = t4_wait_dev_ready(regs);
6052 if (err < 0)
6053 goto out_unmap_bar0;
6054
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306055 /* We control everything through one PF */
Hariprasad Shenai0d804332015-01-05 16:30:47 +05306056 func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306057 if (func != ent->driver_data) {
6058 iounmap(regs);
6059 pci_disable_device(pdev);
6060 pci_save_state(pdev); /* to restore SR-IOV later */
6061 goto sriov;
6062 }
6063
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006064 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006065 highdma = true;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006066 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6067 if (err) {
6068 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6069 "coherent allocations\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306070 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006071 }
6072 } else {
6073 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6074 if (err) {
6075 dev_err(&pdev->dev, "no usable DMA configuration\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306076 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006077 }
6078 }
6079
6080 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00006081 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006082 pci_set_master(pdev);
6083 pci_save_state(pdev);
6084
6085 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6086 if (!adapter) {
6087 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306088 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006089 }
6090
Anish Bhatt29aaee62014-08-20 13:44:06 -07006091 adapter->workq = create_singlethread_workqueue("cxgb4");
6092 if (!adapter->workq) {
6093 err = -ENOMEM;
6094 goto out_free_adapter;
6095 }
6096
Gavin Shan144be3d2014-01-23 12:27:34 +08006097 /* PCI device has been enabled */
6098 adapter->flags |= DEV_ENABLED;
6099
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306100 adapter->regs = regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006101 adapter->pdev = pdev;
6102 adapter->pdev_dev = &pdev->dev;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05306103 adapter->mbox = func;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006104 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006105 adapter->msg_enable = dflt_msg_enable;
6106 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6107
6108 spin_lock_init(&adapter->stats_lock);
6109 spin_lock_init(&adapter->tid_release_lock);
Anish Bhatte327c222014-10-29 17:54:03 -07006110 spin_lock_init(&adapter->win0_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006111
6112 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
Vipul Pandya881806b2012-05-18 15:29:24 +05306113 INIT_WORK(&adapter->db_full_task, process_db_full);
6114 INIT_WORK(&adapter->db_drop_task, process_db_drop);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006115
6116 err = t4_prep_adapter(adapter);
6117 if (err)
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306118 goto out_free_adapter;
6119
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006120
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306121 if (!is_t4(adapter->params.chip)) {
Hariprasad Shenaif612b812015-01-05 16:30:43 +05306122 s_qpp = (QUEUESPERPAGEPF0_S +
6123 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6124 adapter->fn);
6125 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6126 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006127 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6128
6129 /* Each segment size is 128B. Write coalescing is enabled only
6130 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6131 * queue is less no of segments that can be accommodated in
6132 * a page size.
6133 */
6134 if (qpp > num_seg) {
6135 dev_err(&pdev->dev,
6136 "Incorrect number of egress queues per page\n");
6137 err = -EINVAL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306138 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006139 }
6140 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6141 pci_resource_len(pdev, 2));
6142 if (!adapter->bar2) {
6143 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6144 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306145 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006146 }
6147 }
6148
Vipul Pandya636f9d32012-09-26 02:39:39 +00006149 setup_memwin(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006150 err = adap_init0(adapter);
Vipul Pandya636f9d32012-09-26 02:39:39 +00006151 setup_memwin_rdma(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006152 if (err)
6153 goto out_unmap_bar;
6154
6155 for_each_port(adapter, i) {
6156 struct net_device *netdev;
6157
6158 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6159 MAX_ETH_QSETS);
6160 if (!netdev) {
6161 err = -ENOMEM;
6162 goto out_free_dev;
6163 }
6164
6165 SET_NETDEV_DEV(netdev, &pdev->dev);
6166
6167 adapter->port[i] = netdev;
6168 pi = netdev_priv(netdev);
6169 pi->adapter = adapter;
6170 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006171 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006172 netdev->irq = pdev->irq;
6173
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00006174 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6175 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6176 NETIF_F_RXCSUM | NETIF_F_RXHASH |
Patrick McHardyf6469682013-04-19 02:04:27 +00006177 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006178 if (highdma)
6179 netdev->hw_features |= NETIF_F_HIGHDMA;
6180 netdev->features |= netdev->hw_features;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006181 netdev->vlan_features = netdev->features & VLAN_FEAT;
6182
Jiri Pirko01789342011-08-16 06:29:00 +00006183 netdev->priv_flags |= IFF_UNICAST_FLT;
6184
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006185 netdev->netdev_ops = &cxgb4_netdev_ops;
Anish Bhatt688848b2014-06-19 21:37:13 -07006186#ifdef CONFIG_CHELSIO_T4_DCB
6187 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6188 cxgb4_dcb_state_init(netdev);
6189#endif
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00006190 netdev->ethtool_ops = &cxgb_ethtool_ops;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006191 }
6192
6193 pci_set_drvdata(pdev, adapter);
6194
6195 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006196 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006197 if (err)
6198 goto out_free_dev;
6199 }
6200
6201 /*
6202 * Configure queues and allocate tables now, they can be needed as
6203 * soon as the first register_netdev completes.
6204 */
6205 cfg_queues(adapter);
6206
6207 adapter->l2t = t4_init_l2t();
6208 if (!adapter->l2t) {
6209 /* We tolerate a lack of L2T, giving up some functionality */
6210 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6211 adapter->params.offload = 0;
6212 }
6213
6214 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6215 dev_warn(&pdev->dev, "could not allocate TID table, "
6216 "continuing\n");
6217 adapter->params.offload = 0;
6218 }
6219
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00006220 /* See what interrupts we'll be using */
6221 if (msi > 1 && enable_msix(adapter) == 0)
6222 adapter->flags |= USING_MSIX;
6223 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6224 adapter->flags |= USING_MSI;
6225
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006226 err = init_rss(adapter);
6227 if (err)
6228 goto out_free_dev;
6229
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006230 /*
6231 * The card is now ready to go. If any errors occur during device
6232 * registration we do not fail the whole card but rather proceed only
6233 * with the ports we manage to register successfully. However we must
6234 * register at least one net device.
6235 */
6236 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00006237 pi = adap2pinfo(adapter, i);
6238 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6239 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6240
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006241 err = register_netdev(adapter->port[i]);
6242 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006243 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006244 adapter->chan_map[pi->tx_chan] = i;
6245 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006246 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006247 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006248 dev_err(&pdev->dev, "could not register any net devices\n");
6249 goto out_free_dev;
6250 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006251 if (err) {
6252 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6253 err = 0;
Joe Perches6403eab2011-06-03 11:51:20 +00006254 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006255
6256 if (cxgb4_debugfs_root) {
6257 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6258 cxgb4_debugfs_root);
6259 setup_debugfs(adapter);
6260 }
6261
David S. Miller88c51002011-10-07 13:38:43 -04006262 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6263 pdev->needs_freset = 1;
6264
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006265 if (is_offload(adapter))
6266 attach_ulds(adapter);
6267
Hariprasad Shenai8e1e6052014-08-06 17:10:59 +05306268sriov:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006269#ifdef CONFIG_PCI_IOV
Santosh Rastapur7d6727c2013-03-14 05:08:56 +00006270 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006271 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6272 dev_info(&pdev->dev,
6273 "instantiated %u virtual functions\n",
6274 num_vf[func]);
6275#endif
6276 return 0;
6277
6278 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00006279 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006280 out_unmap_bar:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306281 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006282 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006283 out_free_adapter:
Anish Bhatt29aaee62014-08-20 13:44:06 -07006284 if (adapter->workq)
6285 destroy_workqueue(adapter->workq);
6286
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006287 kfree(adapter);
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306288 out_unmap_bar0:
6289 iounmap(regs);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006290 out_disable_device:
6291 pci_disable_pcie_error_reporting(pdev);
6292 pci_disable_device(pdev);
6293 out_release_regions:
6294 pci_release_regions(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006295 return err;
6296}
6297
Bill Pemberton91744942012-12-03 09:23:02 -05006298static void remove_one(struct pci_dev *pdev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006299{
6300 struct adapter *adapter = pci_get_drvdata(pdev);
6301
Vipul Pandya636f9d32012-09-26 02:39:39 +00006302#ifdef CONFIG_PCI_IOV
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006303 pci_disable_sriov(pdev);
6304
Vipul Pandya636f9d32012-09-26 02:39:39 +00006305#endif
6306
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006307 if (adapter) {
6308 int i;
6309
Anish Bhatt29aaee62014-08-20 13:44:06 -07006310 /* Tear down per-adapter Work Queue first since it can contain
6311 * references to our adapter data structure.
6312 */
6313 destroy_workqueue(adapter->workq);
6314
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006315 if (is_offload(adapter))
6316 detach_ulds(adapter);
6317
6318 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00006319 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006320 unregister_netdev(adapter->port[i]);
6321
Fabian Frederick9f16dc22014-06-27 22:51:52 +02006322 debugfs_remove_recursive(adapter->debugfs_root);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006323
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006324 /* If we allocated filters, free up state associated with any
6325 * valid filters ...
6326 */
6327 if (adapter->tids.ftid_tab) {
6328 struct filter_entry *f = &adapter->tids.ftid_tab[0];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00006329 for (i = 0; i < (adapter->tids.nftids +
6330 adapter->tids.nsftids); i++, f++)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006331 if (f->valid)
6332 clear_filter(adapter, f);
6333 }
6334
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00006335 if (adapter->flags & FULL_INIT_DONE)
6336 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006337
Dimitris Michailidis06546392010-07-11 12:01:16 +00006338 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006339 iounmap(adapter->regs);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306340 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006341 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006342 pci_disable_pcie_error_reporting(pdev);
Gavin Shan144be3d2014-01-23 12:27:34 +08006343 if ((adapter->flags & DEV_ENABLED)) {
6344 pci_disable_device(pdev);
6345 adapter->flags &= ~DEV_ENABLED;
6346 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006347 pci_release_regions(pdev);
Li RongQingee9a33b2014-06-20 17:32:36 +08006348 synchronize_rcu();
Gavin Shan8b662fe2014-01-24 17:12:03 +08006349 kfree(adapter);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00006350 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006351 pci_release_regions(pdev);
6352}
6353
6354static struct pci_driver cxgb4_driver = {
6355 .name = KBUILD_MODNAME,
6356 .id_table = cxgb4_pci_tbl,
6357 .probe = init_one,
Bill Pemberton91744942012-12-03 09:23:02 -05006358 .remove = remove_one,
Thadeu Lima de Souza Cascardo687d7052014-02-24 17:04:52 -03006359 .shutdown = remove_one,
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006360 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006361};
6362
6363static int __init cxgb4_init_module(void)
6364{
6365 int ret;
6366
6367 /* Debugfs support is optional, just warn if this fails */
6368 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6369 if (!cxgb4_debugfs_root)
Joe Perches428ac432013-01-06 13:34:49 +00006370 pr_warn("could not create debugfs entry, continuing\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006371
6372 ret = pci_register_driver(&cxgb4_driver);
Anish Bhatt29aaee62014-08-20 13:44:06 -07006373 if (ret < 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006374 debugfs_remove(cxgb4_debugfs_root);
Vipul Pandya01bcca62013-07-04 16:10:46 +05306375
Anish Bhatt1bb60372014-10-14 20:07:22 -07006376#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05306377 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
Anish Bhatt1bb60372014-10-14 20:07:22 -07006378#endif
Vipul Pandya01bcca62013-07-04 16:10:46 +05306379
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006380 return ret;
6381}
6382
6383static void __exit cxgb4_cleanup_module(void)
6384{
Anish Bhatt1bb60372014-10-14 20:07:22 -07006385#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05306386 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
Anish Bhatt1bb60372014-10-14 20:07:22 -07006387#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006388 pci_unregister_driver(&cxgb4_driver);
6389 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006390}
6391
6392module_init(cxgb4_init_module);
6393module_exit(cxgb4_cleanup_module);