blob: 3f60070f2519d2f1df97eb2d9dd39bfae0c0ae92 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000045#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
Vipul Pandya01bcca62013-07-04 16:10:46 +053063#include <net/addrconf.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000064#include <asm/uaccess.h>
65
66#include "cxgb4.h"
67#include "t4_regs.h"
68#include "t4_msg.h"
69#include "t4fw_api.h"
Anish Bhatt688848b2014-06-19 21:37:13 -070070#include "cxgb4_dcb.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000071#include "l2t.h"
72
Vipul Pandya01bcca62013-07-04 16:10:46 +053073#include <../drivers/net/bonding/bonding.h>
74
75#ifdef DRV_VERSION
76#undef DRV_VERSION
77#endif
Santosh Rastapur3a7f8552013-03-14 05:08:55 +000078#define DRV_VERSION "2.0.0-ko"
79#define DRV_DESC "Chelsio T4/T5 Network Driver"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000080
81/*
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
84 * recover.
85 */
86#define MAX_SGE_TIMERVAL 200U
87
Casey Leedom7ee9ff92010-06-25 12:11:46 +000088enum {
Vipul Pandya13ee15d2012-09-26 02:39:40 +000089 /*
90 * Physical Function provisioning constants.
91 */
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 */
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103
104#ifdef CONFIG_PCI_IOV
105 /*
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
111 * Egress Context.
112 */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000126#endif
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000127};
128
129/*
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
133 * controls.
134 */
135static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
137{
138 unsigned int portn, portvec;
139
140 /*
141 * Give PF's access to all of the ports.
142 */
143 if (vf == 0)
144 return FW_PFVF_CMD_PMASK_MASK;
145
146 /*
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
152 */
153 if (adapter->params.nports == 0)
154 return 0;
155
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
158 for (;;) {
159 /*
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
164 */
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 if (portn == 0)
167 return pmask;
168 portn--;
169 portvec &= ~pmask;
170 }
171 /*NOTREACHED*/
172}
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000173
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000174enum {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
182 MIN_FL_ENTRIES = 16
183};
184
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000185/* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
190 */
191struct filter_entry {
192 /* Administrative fields for filter.
193 */
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
196
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
200
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 */
206 struct ch_filter_specification fs;
207};
208
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000209#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000213#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000214
Benoit Taine9baa3c32014-08-08 15:56:03 +0200215static const struct pci_device_id cxgb4_pci_tbl[] = {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000216 CH_DEVICE(0xa000, 0), /* PE10K */
Dimitris Michailidisccea7902010-08-23 17:21:01 +0000217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
Hariprasad Shenaifb1e9332014-06-27 19:23:50 +0530227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1),
Dimitris Michailidisccea7902010-08-23 17:21:01 +0000238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4),
Vipul Pandyaf637d572012-03-05 22:56:36 +0000248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4),
Hariprasad Shenaifb1e9332014-06-27 19:23:50 +0530250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4),
Vipul Pandya9ef603a2013-04-29 04:04:39 +0000259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4),
Hariprasad Shenaif0a8e6d2014-02-18 17:56:15 +0530278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4),
Hariprasad Shenai0183aa62014-03-27 18:17:09 +0530280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4),
Hariprasad Shenai56e03e52014-09-10 17:44:31 +0530286 CH_DEVICE(0x5086, 4),
Hariprasad Shenai91c04a92014-09-26 00:23:54 +0530287 CH_DEVICE(0x5087, 4),
288 CH_DEVICE(0x5088, 4),
Vipul Pandya9ef603a2013-04-29 04:04:39 +0000289 CH_DEVICE(0x5401, 4),
290 CH_DEVICE(0x5402, 4),
291 CH_DEVICE(0x5403, 4),
292 CH_DEVICE(0x5404, 4),
293 CH_DEVICE(0x5405, 4),
294 CH_DEVICE(0x5406, 4),
295 CH_DEVICE(0x5407, 4),
296 CH_DEVICE(0x5408, 4),
297 CH_DEVICE(0x5409, 4),
298 CH_DEVICE(0x540A, 4),
299 CH_DEVICE(0x540B, 4),
300 CH_DEVICE(0x540C, 4),
301 CH_DEVICE(0x540D, 4),
302 CH_DEVICE(0x540E, 4),
303 CH_DEVICE(0x540F, 4),
304 CH_DEVICE(0x5410, 4),
305 CH_DEVICE(0x5411, 4),
306 CH_DEVICE(0x5412, 4),
307 CH_DEVICE(0x5413, 4),
Hariprasad Shenaif0a8e6d2014-02-18 17:56:15 +0530308 CH_DEVICE(0x5414, 4),
309 CH_DEVICE(0x5415, 4),
Hariprasad Shenai0183aa62014-03-27 18:17:09 +0530310 CH_DEVICE(0x5480, 4),
311 CH_DEVICE(0x5481, 4),
312 CH_DEVICE(0x5482, 4),
313 CH_DEVICE(0x5483, 4),
314 CH_DEVICE(0x5484, 4),
315 CH_DEVICE(0x5485, 4),
Hariprasad Shenai56e03e52014-09-10 17:44:31 +0530316 CH_DEVICE(0x5486, 4),
Hariprasad Shenai91c04a92014-09-26 00:23:54 +0530317 CH_DEVICE(0x5487, 4),
318 CH_DEVICE(0x5488, 4),
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000319 { 0, }
320};
321
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530322#define FW4_FNAME "cxgb4/t4fw.bin"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000323#define FW5_FNAME "cxgb4/t5fw.bin"
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530324#define FW4_CFNAME "cxgb4/t4-config.txt"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000325#define FW5_CFNAME "cxgb4/t5-config.txt"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000326
327MODULE_DESCRIPTION(DRV_DESC);
328MODULE_AUTHOR("Chelsio Communications");
329MODULE_LICENSE("Dual BSD/GPL");
330MODULE_VERSION(DRV_VERSION);
331MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530332MODULE_FIRMWARE(FW4_FNAME);
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000333MODULE_FIRMWARE(FW5_FNAME);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000334
Vipul Pandya636f9d32012-09-26 02:39:39 +0000335/*
336 * Normally we're willing to become the firmware's Master PF but will be happy
337 * if another PF has already become the Master and initialized the adapter.
338 * Setting "force_init" will cause this driver to forcibly establish itself as
339 * the Master PF and initialize the adapter.
340 */
341static uint force_init;
342
343module_param(force_init, uint, 0644);
344MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
345
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000346/*
347 * Normally if the firmware we connect to has Configuration File support, we
348 * use that and only fall back to the old Driver-based initialization if the
349 * Configuration File fails for some reason. If force_old_init is set, then
350 * we'll always use the old Driver-based initialization sequence.
351 */
352static uint force_old_init;
353
354module_param(force_old_init, uint, 0644);
355MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
356
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000357static int dflt_msg_enable = DFLT_MSG_ENABLE;
358
359module_param(dflt_msg_enable, int, 0644);
360MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
361
362/*
363 * The driver uses the best interrupt scheme available on a platform in the
364 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
365 * of these schemes the driver may consider as follows:
366 *
367 * msi = 2: choose from among all three options
368 * msi = 1: only consider MSI and INTx interrupts
369 * msi = 0: force INTx interrupts
370 */
371static int msi = 2;
372
373module_param(msi, int, 0644);
374MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
375
376/*
377 * Queue interrupt hold-off timer values. Queues default to the first of these
378 * upon creation.
379 */
380static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
381
382module_param_array(intr_holdoff, uint, NULL, 0644);
383MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
384 "0..4 in microseconds");
385
386static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
387
388module_param_array(intr_cnt, uint, NULL, 0644);
389MODULE_PARM_DESC(intr_cnt,
390 "thresholds 1..3 for queue interrupt packet counters");
391
Vipul Pandya636f9d32012-09-26 02:39:39 +0000392/*
393 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
394 * offset by 2 bytes in order to have the IP headers line up on 4-byte
395 * boundaries. This is a requirement for many architectures which will throw
396 * a machine check fault if an attempt is made to access one of the 4-byte IP
397 * header fields on a non-4-byte boundary. And it's a major performance issue
398 * even on some architectures which allow it like some implementations of the
399 * x86 ISA. However, some architectures don't mind this and for some very
400 * edge-case performance sensitive applications (like forwarding large volumes
401 * of small packets), setting this DMA offset to 0 will decrease the number of
402 * PCI-E Bus transfers enough to measurably affect performance.
403 */
404static int rx_dma_offset = 2;
405
Rusty Russelleb939922011-12-19 14:08:01 +0000406static bool vf_acls;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000407
408#ifdef CONFIG_PCI_IOV
409module_param(vf_acls, bool, 0644);
410MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
411
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000412/* Configure the number of PCI-E Virtual Function which are to be instantiated
413 * on SR-IOV Capable Physical Functions.
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000414 */
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000415static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000416
417module_param_array(num_vf, uint, NULL, 0644);
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000418MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000419#endif
420
Anish Bhatt688848b2014-06-19 21:37:13 -0700421/* TX Queue select used to determine what algorithm to use for selecting TX
422 * queue. Select between the kernel provided function (select_queue=0) or user
423 * cxgb_select_queue function (select_queue=1)
424 *
425 * Default: select_queue=0
426 */
427static int select_queue;
428module_param(select_queue, int, 0644);
429MODULE_PARM_DESC(select_queue,
430 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
431
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000432/*
433 * The filter TCAM has a fixed portion and a variable portion. The fixed
434 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
435 * ports. The variable portion is 36 bits which can include things like Exact
436 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
437 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
438 * far exceed the 36-bit budget for this "compressed" header portion of the
439 * filter. Thus, we have a scarce resource which must be carefully managed.
440 *
441 * By default we set this up to mostly match the set of filter matching
442 * capabilities of T3 but with accommodations for some of T4's more
443 * interesting features:
444 *
445 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
446 * [Inner] VLAN (17), Port (3), FCoE (1) }
447 */
448enum {
449 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
450 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
451 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
452};
453
454static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
455
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000456module_param(tp_vlan_pri_map, uint, 0644);
457MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
458
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000459static struct dentry *cxgb4_debugfs_root;
460
461static LIST_HEAD(adapter_list);
462static DEFINE_MUTEX(uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +0530463/* Adapter list to be accessed from atomic context */
464static LIST_HEAD(adap_rcu_list);
465static DEFINE_SPINLOCK(adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000466static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
467static const char *uld_str[] = { "RDMA", "iSCSI" };
468
469static void link_report(struct net_device *dev)
470{
471 if (!netif_carrier_ok(dev))
472 netdev_info(dev, "link down\n");
473 else {
474 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
475
476 const char *s = "10Mbps";
477 const struct port_info *p = netdev_priv(dev);
478
479 switch (p->link_cfg.speed) {
Ben Hutchingse8b39012014-02-23 00:03:24 +0000480 case 10000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000481 s = "10Gbps";
482 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000483 case 1000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000484 s = "1000Mbps";
485 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000486 case 100:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000487 s = "100Mbps";
488 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000489 case 40000:
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +0530490 s = "40Gbps";
491 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000492 }
493
494 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
495 fc[p->link_cfg.fc]);
496 }
497}
498
Anish Bhatt688848b2014-06-19 21:37:13 -0700499#ifdef CONFIG_CHELSIO_T4_DCB
500/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
501static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
502{
503 struct port_info *pi = netdev_priv(dev);
504 struct adapter *adap = pi->adapter;
505 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
506 int i;
507
508 /* We use a simple mapping of Port TX Queue Index to DCB
509 * Priority when we're enabling DCB.
510 */
511 for (i = 0; i < pi->nqsets; i++, txq++) {
512 u32 name, value;
513 int err;
514
515 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
516 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
517 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
518 value = enable ? i : 0xffffffff;
519
520 /* Since we can be called while atomic (from "interrupt
521 * level") we need to issue the Set Parameters Commannd
522 * without sleeping (timeout < 0).
523 */
524 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
525 &name, &value);
526
527 if (err)
528 dev_err(adap->pdev_dev,
529 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
530 enable ? "set" : "unset", pi->port_id, i, -err);
Anish Bhatt10b00462014-08-07 16:14:03 -0700531 else
532 txq->dcb_prio = value;
Anish Bhatt688848b2014-06-19 21:37:13 -0700533 }
534}
535#endif /* CONFIG_CHELSIO_T4_DCB */
536
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000537void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
538{
539 struct net_device *dev = adapter->port[port_id];
540
541 /* Skip changes from disabled ports. */
542 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
543 if (link_stat)
544 netif_carrier_on(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700545 else {
546#ifdef CONFIG_CHELSIO_T4_DCB
547 cxgb4_dcb_state_init(dev);
548 dcb_tx_queue_prio_enable(dev, false);
549#endif /* CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000550 netif_carrier_off(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700551 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000552
553 link_report(dev);
554 }
555}
556
557void t4_os_portmod_changed(const struct adapter *adap, int port_id)
558{
559 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000560 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000561 };
562
563 const struct net_device *dev = adap->port[port_id];
564 const struct port_info *pi = netdev_priv(dev);
565
566 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
567 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000568 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000569 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
570}
571
572/*
573 * Configure the exact and hash address filters to handle a port's multicast
574 * and secondary unicast MAC addresses.
575 */
576static int set_addr_filters(const struct net_device *dev, bool sleep)
577{
578 u64 mhash = 0;
579 u64 uhash = 0;
580 bool free = true;
581 u16 filt_idx[7];
582 const u8 *addr[7];
583 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000584 const struct netdev_hw_addr *ha;
585 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700586 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000587 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000588 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000589
590 /* first do the secondary unicast addresses */
591 netdev_for_each_uc_addr(ha, dev) {
592 addr[naddr++] = ha->addr;
593 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000594 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000595 naddr, addr, filt_idx, &uhash, sleep);
596 if (ret < 0)
597 return ret;
598
599 free = false;
600 naddr = 0;
601 }
602 }
603
604 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700605 netdev_for_each_mc_addr(ha, dev) {
606 addr[naddr++] = ha->addr;
607 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000608 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000609 naddr, addr, filt_idx, &mhash, sleep);
610 if (ret < 0)
611 return ret;
612
613 free = false;
614 naddr = 0;
615 }
616 }
617
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000618 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000619 uhash | mhash, sleep);
620}
621
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530622int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
623module_param(dbfifo_int_thresh, int, 0644);
624MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
625
Vipul Pandya404d9e32012-10-08 02:59:43 +0000626/*
627 * usecs to sleep while draining the dbfifo
628 */
629static int dbfifo_drain_delay = 1000;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530630module_param(dbfifo_drain_delay, int, 0644);
631MODULE_PARM_DESC(dbfifo_drain_delay,
632 "usecs to sleep while draining the dbfifo");
633
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000634/*
635 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
636 * If @mtu is -1 it is left unchanged.
637 */
638static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
639{
640 int ret;
641 struct port_info *pi = netdev_priv(dev);
642
643 ret = set_addr_filters(dev, sleep_ok);
644 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000645 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000646 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000647 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000648 sleep_ok);
649 return ret;
650}
651
652/**
653 * link_start - enable a port
654 * @dev: the port to enable
655 *
656 * Performs the MAC and PHY actions needed to enable a port.
657 */
658static int link_start(struct net_device *dev)
659{
660 int ret;
661 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000662 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000663
664 /*
665 * We do not set address filters and promiscuity here, the stack does
666 * that step explicitly.
667 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000668 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +0000669 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000670 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000671 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000672 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000673 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000674 if (ret >= 0) {
675 pi->xact_addr_filt = ret;
676 ret = 0;
677 }
678 }
679 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000680 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
681 &pi->link_cfg);
Anish Bhatt30f00842014-08-05 16:05:23 -0700682 if (ret == 0) {
683 local_bh_disable();
Anish Bhatt688848b2014-06-19 21:37:13 -0700684 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
685 true, CXGB4_DCB_ENABLED);
Anish Bhatt30f00842014-08-05 16:05:23 -0700686 local_bh_enable();
687 }
Anish Bhatt688848b2014-06-19 21:37:13 -0700688
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000689 return ret;
690}
691
Anish Bhatt688848b2014-06-19 21:37:13 -0700692int cxgb4_dcb_enabled(const struct net_device *dev)
693{
694#ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info *pi = netdev_priv(dev);
696
697 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
698#else
699 return 0;
700#endif
701}
702EXPORT_SYMBOL(cxgb4_dcb_enabled);
703
704#ifdef CONFIG_CHELSIO_T4_DCB
705/* Handle a Data Center Bridging update message from the firmware. */
706static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
707{
708 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
709 struct net_device *dev = adap->port[port];
710 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
711 int new_dcb_enabled;
712
713 cxgb4_dcb_handle_fw_update(adap, pcmd);
714 new_dcb_enabled = cxgb4_dcb_enabled(dev);
715
716 /* If the DCB has become enabled or disabled on the port then we're
717 * going to need to set up/tear down DCB Priority parameters for the
718 * TX Queues associated with the port.
719 */
720 if (new_dcb_enabled != old_dcb_enabled)
721 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
722}
723#endif /* CONFIG_CHELSIO_T4_DCB */
724
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000725/* Clear a filter and release any of its resources that we own. This also
726 * clears the filter's "pending" status.
727 */
728static void clear_filter(struct adapter *adap, struct filter_entry *f)
729{
730 /* If the new or old filter have loopback rewriteing rules then we'll
731 * need to free any existing Layer Two Table (L2T) entries of the old
732 * filter rule. The firmware will handle freeing up any Source MAC
733 * Table (SMT) entries used for rewriting Source MAC Addresses in
734 * loopback rules.
735 */
736 if (f->l2t)
737 cxgb4_l2t_release(f->l2t);
738
739 /* The zeroing of the filter rule below clears the filter valid,
740 * pending, locked flags, l2t pointer, etc. so it's all we need for
741 * this operation.
742 */
743 memset(f, 0, sizeof(*f));
744}
745
746/* Handle a filter write/deletion reply.
747 */
748static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
749{
750 unsigned int idx = GET_TID(rpl);
751 unsigned int nidx = idx - adap->tids.ftid_base;
752 unsigned int ret;
753 struct filter_entry *f;
754
755 if (idx >= adap->tids.ftid_base && nidx <
756 (adap->tids.nftids + adap->tids.nsftids)) {
757 idx = nidx;
758 ret = GET_TCB_COOKIE(rpl->cookie);
759 f = &adap->tids.ftid_tab[idx];
760
761 if (ret == FW_FILTER_WR_FLT_DELETED) {
762 /* Clear the filter when we get confirmation from the
763 * hardware that the filter has been deleted.
764 */
765 clear_filter(adap, f);
766 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
767 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
768 idx);
769 clear_filter(adap, f);
770 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
771 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
772 f->pending = 0; /* asynchronous setup completed */
773 f->valid = 1;
774 } else {
775 /* Something went wrong. Issue a warning about the
776 * problem and clear everything out.
777 */
778 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
779 idx, ret);
780 clear_filter(adap, f);
781 }
782 }
783}
784
785/* Response queue handler for the FW event queue.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000786 */
787static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
788 const struct pkt_gl *gl)
789{
790 u8 opcode = ((const struct rss_header *)rsp)->opcode;
791
792 rsp++; /* skip RSS header */
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000793
794 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
795 */
796 if (unlikely(opcode == CPL_FW4_MSG &&
797 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
798 rsp++;
799 opcode = ((const struct rss_header *)rsp)->opcode;
800 rsp++;
801 if (opcode != CPL_SGE_EGR_UPDATE) {
802 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
803 , opcode);
804 goto out;
805 }
806 }
807
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000808 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
809 const struct cpl_sge_egr_update *p = (void *)rsp;
810 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000811 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000812
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000813 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000814 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000815 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000816 struct sge_eth_txq *eq;
817
818 eq = container_of(txq, struct sge_eth_txq, q);
819 netif_tx_wake_queue(eq->txq);
820 } else {
821 struct sge_ofld_txq *oq;
822
823 oq = container_of(txq, struct sge_ofld_txq, q);
824 tasklet_schedule(&oq->qresume_tsk);
825 }
826 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
827 const struct cpl_fw6_msg *p = (void *)rsp;
828
Anish Bhatt688848b2014-06-19 21:37:13 -0700829#ifdef CONFIG_CHELSIO_T4_DCB
830 const struct fw_port_cmd *pcmd = (const void *)p->data;
831 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
832 unsigned int action =
833 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
834
835 if (cmd == FW_PORT_CMD &&
836 action == FW_PORT_ACTION_GET_PORT_INFO) {
837 int port = FW_PORT_CMD_PORTID_GET(
838 be32_to_cpu(pcmd->op_to_portid));
839 struct net_device *dev = q->adap->port[port];
840 int state_input = ((pcmd->u.info.dcbxdis_pkd &
841 FW_PORT_CMD_DCBXDIS)
842 ? CXGB4_DCB_INPUT_FW_DISABLED
843 : CXGB4_DCB_INPUT_FW_ENABLED);
844
845 cxgb4_dcb_state_fsm(dev, state_input);
846 }
847
848 if (cmd == FW_PORT_CMD &&
849 action == FW_PORT_ACTION_L2_DCB_CFG)
850 dcb_rpl(q->adap, pcmd);
851 else
852#endif
853 if (p->type == 0)
854 t4_handle_fw_rpl(q->adap, p->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000855 } else if (opcode == CPL_L2T_WRITE_RPL) {
856 const struct cpl_l2t_write_rpl *p = (void *)rsp;
857
858 do_l2t_write_rpl(q->adap, p);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000859 } else if (opcode == CPL_SET_TCB_RPL) {
860 const struct cpl_set_tcb_rpl *p = (void *)rsp;
861
862 filter_rpl(q->adap, p);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000863 } else
864 dev_err(q->adap->pdev_dev,
865 "unexpected CPL %#x on FW event queue\n", opcode);
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000866out:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000867 return 0;
868}
869
870/**
871 * uldrx_handler - response queue handler for ULD queues
872 * @q: the response queue that received the packet
873 * @rsp: the response queue descriptor holding the offload message
874 * @gl: the gather list of packet fragments
875 *
876 * Deliver an ingress offload packet to a ULD. All processing is done by
877 * the ULD, we just maintain statistics.
878 */
879static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
880 const struct pkt_gl *gl)
881{
882 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
883
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000884 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
885 */
886 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
887 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
888 rsp += 2;
889
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000890 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
891 rxq->stats.nomem++;
892 return -1;
893 }
894 if (gl == NULL)
895 rxq->stats.imm++;
896 else if (gl == CXGB4_MSG_AN)
897 rxq->stats.an++;
898 else
899 rxq->stats.pkts++;
900 return 0;
901}
902
903static void disable_msi(struct adapter *adapter)
904{
905 if (adapter->flags & USING_MSIX) {
906 pci_disable_msix(adapter->pdev);
907 adapter->flags &= ~USING_MSIX;
908 } else if (adapter->flags & USING_MSI) {
909 pci_disable_msi(adapter->pdev);
910 adapter->flags &= ~USING_MSI;
911 }
912}
913
914/*
915 * Interrupt handler for non-data events used with MSI-X.
916 */
917static irqreturn_t t4_nondata_intr(int irq, void *cookie)
918{
919 struct adapter *adap = cookie;
920
921 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
922 if (v & PFSW) {
923 adap->swintr = 1;
924 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
925 }
926 t4_slow_intr_handler(adap);
927 return IRQ_HANDLED;
928}
929
930/*
931 * Name the MSI-X interrupts.
932 */
933static void name_msix_vecs(struct adapter *adap)
934{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000935 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000936
937 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000938 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000939
940 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000941 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
942 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000943
944 /* Ethernet queues */
945 for_each_port(adap, j) {
946 struct net_device *d = adap->port[j];
947 const struct port_info *pi = netdev_priv(d);
948
Dimitris Michailidisba278162010-12-14 21:36:50 +0000949 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000950 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
951 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000952 }
953
954 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000955 for_each_ofldrxq(&adap->sge, i)
956 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000957 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000958
959 for_each_rdmarxq(&adap->sge, i)
960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000961 adap->port[0]->name, i);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530962
963 for_each_rdmaciq(&adap->sge, i)
964 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
965 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000966}
967
968static int request_msix_queue_irqs(struct adapter *adap)
969{
970 struct sge *s = &adap->sge;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530971 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
972 int msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000973
974 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
975 adap->msix_info[1].desc, &s->fw_evtq);
976 if (err)
977 return err;
978
979 for_each_ethrxq(s, ethqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000980 err = request_irq(adap->msix_info[msi_index].vec,
981 t4_sge_intr_msix, 0,
982 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000983 &s->ethrxq[ethqidx].rspq);
984 if (err)
985 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000986 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000987 }
988 for_each_ofldrxq(s, ofldqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000989 err = request_irq(adap->msix_info[msi_index].vec,
990 t4_sge_intr_msix, 0,
991 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000992 &s->ofldrxq[ofldqidx].rspq);
993 if (err)
994 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000995 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000996 }
997 for_each_rdmarxq(s, rdmaqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000998 err = request_irq(adap->msix_info[msi_index].vec,
999 t4_sge_intr_msix, 0,
1000 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001001 &s->rdmarxq[rdmaqidx].rspq);
1002 if (err)
1003 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +00001004 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001005 }
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301006 for_each_rdmaciq(s, rdmaciqqidx) {
1007 err = request_irq(adap->msix_info[msi_index].vec,
1008 t4_sge_intr_msix, 0,
1009 adap->msix_info[msi_index].desc,
1010 &s->rdmaciq[rdmaciqqidx].rspq);
1011 if (err)
1012 goto unwind;
1013 msi_index++;
1014 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001015 return 0;
1016
1017unwind:
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301018 while (--rdmaciqqidx >= 0)
1019 free_irq(adap->msix_info[--msi_index].vec,
1020 &s->rdmaciq[rdmaciqqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001021 while (--rdmaqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001022 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001023 &s->rdmarxq[rdmaqidx].rspq);
1024 while (--ofldqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001025 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001026 &s->ofldrxq[ofldqidx].rspq);
1027 while (--ethqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001028 free_irq(adap->msix_info[--msi_index].vec,
1029 &s->ethrxq[ethqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001030 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1031 return err;
1032}
1033
1034static void free_msix_queue_irqs(struct adapter *adap)
1035{
Vipul Pandya404d9e32012-10-08 02:59:43 +00001036 int i, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001037 struct sge *s = &adap->sge;
1038
1039 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1040 for_each_ethrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001041 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001042 for_each_ofldrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001043 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001044 for_each_rdmarxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001045 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301046 for_each_rdmaciq(s, i)
1047 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001048}
1049
1050/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001051 * write_rss - write the RSS table for a given port
1052 * @pi: the port
1053 * @queues: array of queue indices for RSS
1054 *
1055 * Sets up the portion of the HW RSS table for the port's VI to distribute
1056 * packets to the Rx queues in @queues.
1057 */
1058static int write_rss(const struct port_info *pi, const u16 *queues)
1059{
1060 u16 *rss;
1061 int i, err;
1062 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1063
1064 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1065 if (!rss)
1066 return -ENOMEM;
1067
1068 /* map the queue indices to queue ids */
1069 for (i = 0; i < pi->rss_size; i++, queues++)
1070 rss[i] = q[*queues].rspq.abs_id;
1071
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001072 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1073 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001074 kfree(rss);
1075 return err;
1076}
1077
1078/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001079 * setup_rss - configure RSS
1080 * @adap: the adapter
1081 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001082 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001083 */
1084static int setup_rss(struct adapter *adap)
1085{
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001086 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001087
1088 for_each_port(adap, i) {
1089 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001090
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001091 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001092 if (err)
1093 return err;
1094 }
1095 return 0;
1096}
1097
1098/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +00001099 * Return the channel of the ingress queue with the given qid.
1100 */
1101static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1102{
1103 qid -= p->ingr_start;
1104 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1105}
1106
1107/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001108 * Wait until all NAPI handlers are descheduled.
1109 */
1110static void quiesce_rx(struct adapter *adap)
1111{
1112 int i;
1113
1114 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1115 struct sge_rspq *q = adap->sge.ingr_map[i];
1116
1117 if (q && q->handler)
1118 napi_disable(&q->napi);
1119 }
1120}
1121
1122/*
1123 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1124 */
1125static void enable_rx(struct adapter *adap)
1126{
1127 int i;
1128
1129 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1130 struct sge_rspq *q = adap->sge.ingr_map[i];
1131
1132 if (!q)
1133 continue;
1134 if (q->handler)
1135 napi_enable(&q->napi);
1136 /* 0-increment GTS to start the timer and enable interrupts */
1137 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1138 SEINTARM(q->intr_params) |
1139 INGRESSQID(q->cntxt_id));
1140 }
1141}
1142
1143/**
1144 * setup_sge_queues - configure SGE Tx/Rx/response queues
1145 * @adap: the adapter
1146 *
1147 * Determines how many sets of SGE queues to use and initializes them.
1148 * We support multiple queue sets per port if we have MSI-X, otherwise
1149 * just one queue set per port.
1150 */
1151static int setup_sge_queues(struct adapter *adap)
1152{
1153 int err, msi_idx, i, j;
1154 struct sge *s = &adap->sge;
1155
1156 bitmap_zero(s->starving_fl, MAX_EGRQ);
1157 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1158
1159 if (adap->flags & USING_MSIX)
1160 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1161 else {
1162 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1163 NULL, NULL);
1164 if (err)
1165 return err;
1166 msi_idx = -((int)s->intrq.abs_id + 1);
1167 }
1168
1169 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1170 msi_idx, NULL, fwevtq_handler);
1171 if (err) {
1172freeout: t4_free_sge_resources(adap);
1173 return err;
1174 }
1175
1176 for_each_port(adap, i) {
1177 struct net_device *dev = adap->port[i];
1178 struct port_info *pi = netdev_priv(dev);
1179 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1180 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1181
1182 for (j = 0; j < pi->nqsets; j++, q++) {
1183 if (msi_idx > 0)
1184 msi_idx++;
1185 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1186 msi_idx, &q->fl,
1187 t4_ethrx_handler);
1188 if (err)
1189 goto freeout;
1190 q->rspq.idx = j;
1191 memset(&q->stats, 0, sizeof(q->stats));
1192 }
1193 for (j = 0; j < pi->nqsets; j++, t++) {
1194 err = t4_sge_alloc_eth_txq(adap, t, dev,
1195 netdev_get_tx_queue(dev, j),
1196 s->fw_evtq.cntxt_id);
1197 if (err)
1198 goto freeout;
1199 }
1200 }
1201
1202 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1203 for_each_ofldrxq(s, i) {
1204 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1205 struct net_device *dev = adap->port[i / j];
1206
1207 if (msi_idx > 0)
1208 msi_idx++;
1209 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301210 q->fl.size ? &q->fl : NULL,
1211 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001212 if (err)
1213 goto freeout;
1214 memset(&q->stats, 0, sizeof(q->stats));
1215 s->ofld_rxq[i] = q->rspq.abs_id;
1216 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1217 s->fw_evtq.cntxt_id);
1218 if (err)
1219 goto freeout;
1220 }
1221
1222 for_each_rdmarxq(s, i) {
1223 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1224
1225 if (msi_idx > 0)
1226 msi_idx++;
1227 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301228 msi_idx, q->fl.size ? &q->fl : NULL,
1229 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001230 if (err)
1231 goto freeout;
1232 memset(&q->stats, 0, sizeof(q->stats));
1233 s->rdma_rxq[i] = q->rspq.abs_id;
1234 }
1235
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301236 for_each_rdmaciq(s, i) {
1237 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1238
1239 if (msi_idx > 0)
1240 msi_idx++;
1241 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1242 msi_idx, q->fl.size ? &q->fl : NULL,
1243 uldrx_handler);
1244 if (err)
1245 goto freeout;
1246 memset(&q->stats, 0, sizeof(q->stats));
1247 s->rdma_ciq[i] = q->rspq.abs_id;
1248 }
1249
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001250 for_each_port(adap, i) {
1251 /*
1252 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1253 * have RDMA queues, and that's the right value.
1254 */
1255 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1256 s->fw_evtq.cntxt_id,
1257 s->rdmarxq[i].rspq.cntxt_id);
1258 if (err)
1259 goto freeout;
1260 }
1261
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301262 t4_write_reg(adap, is_t4(adap->params.chip) ?
1263 MPS_TRC_RSS_CONTROL :
1264 MPS_T5_TRC_RSS_CONTROL,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001265 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1266 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1267 return 0;
1268}
1269
1270/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001271 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1272 * The allocated memory is cleared.
1273 */
1274void *t4_alloc_mem(size_t size)
1275{
Joe Perches8be04b92013-06-19 12:15:53 -07001276 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001277
1278 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001279 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001280 return p;
1281}
1282
1283/*
1284 * Free memory allocated through alloc_mem().
1285 */
stephen hemminger31b9c192010-10-18 05:39:18 +00001286static void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001287{
1288 if (is_vmalloc_addr(addr))
1289 vfree(addr);
1290 else
1291 kfree(addr);
1292}
1293
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001294/* Send a Work Request to write the filter at a specified index. We construct
1295 * a Firmware Filter Work Request to have the work done and put the indicated
1296 * filter into "pending" mode which will prevent any further actions against
1297 * it till we get a reply from the firmware on the completion status of the
1298 * request.
1299 */
1300static int set_filter_wr(struct adapter *adapter, int fidx)
1301{
1302 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1303 struct sk_buff *skb;
1304 struct fw_filter_wr *fwr;
1305 unsigned int ftid;
1306
1307 /* If the new filter requires loopback Destination MAC and/or VLAN
1308 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1309 * the filter.
1310 */
1311 if (f->fs.newdmac || f->fs.newvlan) {
1312 /* allocate L2T entry for new filter */
1313 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1314 if (f->l2t == NULL)
1315 return -EAGAIN;
1316 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1317 f->fs.eport, f->fs.dmac)) {
1318 cxgb4_l2t_release(f->l2t);
1319 f->l2t = NULL;
1320 return -ENOMEM;
1321 }
1322 }
1323
1324 ftid = adapter->tids.ftid_base + fidx;
1325
1326 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1327 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1328 memset(fwr, 0, sizeof(*fwr));
1329
1330 /* It would be nice to put most of the following in t4_hw.c but most
1331 * of the work is translating the cxgbtool ch_filter_specification
1332 * into the Work Request and the definition of that structure is
1333 * currently in cxgbtool.h which isn't appropriate to pull into the
1334 * common code. We may eventually try to come up with a more neutral
1335 * filter specification structure but for now it's easiest to simply
1336 * put this fairly direct code in line ...
1337 */
1338 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1339 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1340 fwr->tid_to_iq =
1341 htonl(V_FW_FILTER_WR_TID(ftid) |
1342 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1343 V_FW_FILTER_WR_NOREPLY(0) |
1344 V_FW_FILTER_WR_IQ(f->fs.iq));
1345 fwr->del_filter_to_l2tix =
1346 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1347 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1348 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1349 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1350 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1351 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1352 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1353 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1354 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1355 f->fs.newvlan == VLAN_REWRITE) |
1356 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1357 f->fs.newvlan == VLAN_REWRITE) |
1358 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1359 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1360 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1361 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1362 fwr->ethtype = htons(f->fs.val.ethtype);
1363 fwr->ethtypem = htons(f->fs.mask.ethtype);
1364 fwr->frag_to_ovlan_vldm =
1365 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1366 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1367 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1368 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1369 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1370 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1371 fwr->smac_sel = 0;
1372 fwr->rx_chan_rx_rpl_iq =
1373 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1374 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1375 fwr->maci_to_matchtypem =
1376 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1377 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1378 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1379 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1380 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1381 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1382 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1383 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1384 fwr->ptcl = f->fs.val.proto;
1385 fwr->ptclm = f->fs.mask.proto;
1386 fwr->ttyp = f->fs.val.tos;
1387 fwr->ttypm = f->fs.mask.tos;
1388 fwr->ivlan = htons(f->fs.val.ivlan);
1389 fwr->ivlanm = htons(f->fs.mask.ivlan);
1390 fwr->ovlan = htons(f->fs.val.ovlan);
1391 fwr->ovlanm = htons(f->fs.mask.ovlan);
1392 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1393 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1394 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1395 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1396 fwr->lp = htons(f->fs.val.lport);
1397 fwr->lpm = htons(f->fs.mask.lport);
1398 fwr->fp = htons(f->fs.val.fport);
1399 fwr->fpm = htons(f->fs.mask.fport);
1400 if (f->fs.newsmac)
1401 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1402
1403 /* Mark the filter as "pending" and ship off the Filter Work Request.
1404 * When we get the Work Request Reply we'll clear the pending status.
1405 */
1406 f->pending = 1;
1407 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1408 t4_ofld_send(adapter, skb);
1409 return 0;
1410}
1411
1412/* Delete the filter at a specified index.
1413 */
1414static int del_filter_wr(struct adapter *adapter, int fidx)
1415{
1416 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1417 struct sk_buff *skb;
1418 struct fw_filter_wr *fwr;
1419 unsigned int len, ftid;
1420
1421 len = sizeof(*fwr);
1422 ftid = adapter->tids.ftid_base + fidx;
1423
1424 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1425 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1426 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1427
1428 /* Mark the filter as "pending" and ship off the Filter Work Request.
1429 * When we get the Work Request Reply we'll clear the pending status.
1430 */
1431 f->pending = 1;
1432 t4_mgmt_tx(adapter, skb);
1433 return 0;
1434}
1435
Anish Bhatt688848b2014-06-19 21:37:13 -07001436static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1437 void *accel_priv, select_queue_fallback_t fallback)
1438{
1439 int txq;
1440
1441#ifdef CONFIG_CHELSIO_T4_DCB
1442 /* If a Data Center Bridging has been successfully negotiated on this
1443 * link then we'll use the skb's priority to map it to a TX Queue.
1444 * The skb's priority is determined via the VLAN Tag Priority Code
1445 * Point field.
1446 */
1447 if (cxgb4_dcb_enabled(dev)) {
1448 u16 vlan_tci;
1449 int err;
1450
1451 err = vlan_get_tag(skb, &vlan_tci);
1452 if (unlikely(err)) {
1453 if (net_ratelimit())
1454 netdev_warn(dev,
1455 "TX Packet without VLAN Tag on DCB Link\n");
1456 txq = 0;
1457 } else {
1458 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1459 }
1460 return txq;
1461 }
1462#endif /* CONFIG_CHELSIO_T4_DCB */
1463
1464 if (select_queue) {
1465 txq = (skb_rx_queue_recorded(skb)
1466 ? skb_get_rx_queue(skb)
1467 : smp_processor_id());
1468
1469 while (unlikely(txq >= dev->real_num_tx_queues))
1470 txq -= dev->real_num_tx_queues;
1471
1472 return txq;
1473 }
1474
1475 return fallback(dev, skb) % dev->real_num_tx_queues;
1476}
1477
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001478static inline int is_offload(const struct adapter *adap)
1479{
1480 return adap->params.offload;
1481}
1482
1483/*
1484 * Implementation of ethtool operations.
1485 */
1486
1487static u32 get_msglevel(struct net_device *dev)
1488{
1489 return netdev2adap(dev)->msg_enable;
1490}
1491
1492static void set_msglevel(struct net_device *dev, u32 val)
1493{
1494 netdev2adap(dev)->msg_enable = val;
1495}
1496
1497static char stats_strings[][ETH_GSTRING_LEN] = {
1498 "TxOctetsOK ",
1499 "TxFramesOK ",
1500 "TxBroadcastFrames ",
1501 "TxMulticastFrames ",
1502 "TxUnicastFrames ",
1503 "TxErrorFrames ",
1504
1505 "TxFrames64 ",
1506 "TxFrames65To127 ",
1507 "TxFrames128To255 ",
1508 "TxFrames256To511 ",
1509 "TxFrames512To1023 ",
1510 "TxFrames1024To1518 ",
1511 "TxFrames1519ToMax ",
1512
1513 "TxFramesDropped ",
1514 "TxPauseFrames ",
1515 "TxPPP0Frames ",
1516 "TxPPP1Frames ",
1517 "TxPPP2Frames ",
1518 "TxPPP3Frames ",
1519 "TxPPP4Frames ",
1520 "TxPPP5Frames ",
1521 "TxPPP6Frames ",
1522 "TxPPP7Frames ",
1523
1524 "RxOctetsOK ",
1525 "RxFramesOK ",
1526 "RxBroadcastFrames ",
1527 "RxMulticastFrames ",
1528 "RxUnicastFrames ",
1529
1530 "RxFramesTooLong ",
1531 "RxJabberErrors ",
1532 "RxFCSErrors ",
1533 "RxLengthErrors ",
1534 "RxSymbolErrors ",
1535 "RxRuntFrames ",
1536
1537 "RxFrames64 ",
1538 "RxFrames65To127 ",
1539 "RxFrames128To255 ",
1540 "RxFrames256To511 ",
1541 "RxFrames512To1023 ",
1542 "RxFrames1024To1518 ",
1543 "RxFrames1519ToMax ",
1544
1545 "RxPauseFrames ",
1546 "RxPPP0Frames ",
1547 "RxPPP1Frames ",
1548 "RxPPP2Frames ",
1549 "RxPPP3Frames ",
1550 "RxPPP4Frames ",
1551 "RxPPP5Frames ",
1552 "RxPPP6Frames ",
1553 "RxPPP7Frames ",
1554
1555 "RxBG0FramesDropped ",
1556 "RxBG1FramesDropped ",
1557 "RxBG2FramesDropped ",
1558 "RxBG3FramesDropped ",
1559 "RxBG0FramesTrunc ",
1560 "RxBG1FramesTrunc ",
1561 "RxBG2FramesTrunc ",
1562 "RxBG3FramesTrunc ",
1563
1564 "TSO ",
1565 "TxCsumOffload ",
1566 "RxCsumGood ",
1567 "VLANextractions ",
1568 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001569 "GROpackets ",
1570 "GROmerged ",
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001571 "WriteCoalSuccess ",
1572 "WriteCoalFail ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001573};
1574
1575static int get_sset_count(struct net_device *dev, int sset)
1576{
1577 switch (sset) {
1578 case ETH_SS_STATS:
1579 return ARRAY_SIZE(stats_strings);
1580 default:
1581 return -EOPNOTSUPP;
1582 }
1583}
1584
1585#define T4_REGMAP_SIZE (160 * 1024)
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001586#define T5_REGMAP_SIZE (332 * 1024)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001587
1588static int get_regs_len(struct net_device *dev)
1589{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001590 struct adapter *adap = netdev2adap(dev);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301591 if (is_t4(adap->params.chip))
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001592 return T4_REGMAP_SIZE;
1593 else
1594 return T5_REGMAP_SIZE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001595}
1596
1597static int get_eeprom_len(struct net_device *dev)
1598{
1599 return EEPROMSIZE;
1600}
1601
1602static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1603{
1604 struct adapter *adapter = netdev2adap(dev);
1605
Rick Jones23020ab2011-11-09 09:58:07 +00001606 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1607 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1608 strlcpy(info->bus_info, pci_name(adapter->pdev),
1609 sizeof(info->bus_info));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001610
Rick Jones84b40502011-11-21 10:54:05 +00001611 if (adapter->params.fw_vers)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001612 snprintf(info->fw_version, sizeof(info->fw_version),
1613 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1614 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1615 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1616 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1617 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1618 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1619 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1620 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1621 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1622}
1623
1624static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1625{
1626 if (stringset == ETH_SS_STATS)
1627 memcpy(data, stats_strings, sizeof(stats_strings));
1628}
1629
1630/*
1631 * port stats maintained per queue of the port. They should be in the same
1632 * order as in stats_strings above.
1633 */
1634struct queue_port_stats {
1635 u64 tso;
1636 u64 tx_csum;
1637 u64 rx_csum;
1638 u64 vlan_ex;
1639 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001640 u64 gro_pkts;
1641 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001642};
1643
1644static void collect_sge_port_stats(const struct adapter *adap,
1645 const struct port_info *p, struct queue_port_stats *s)
1646{
1647 int i;
1648 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1649 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1650
1651 memset(s, 0, sizeof(*s));
1652 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1653 s->tso += tx->tso;
1654 s->tx_csum += tx->tx_cso;
1655 s->rx_csum += rx->stats.rx_cso;
1656 s->vlan_ex += rx->stats.vlan_ex;
1657 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001658 s->gro_pkts += rx->stats.lro_pkts;
1659 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001660 }
1661}
1662
1663static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1664 u64 *data)
1665{
1666 struct port_info *pi = netdev_priv(dev);
1667 struct adapter *adapter = pi->adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001668 u32 val1, val2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001669
1670 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1671
1672 data += sizeof(struct port_stats) / sizeof(u64);
1673 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001674 data += sizeof(struct queue_port_stats) / sizeof(u64);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301675 if (!is_t4(adapter->params.chip)) {
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001676 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1677 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1678 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1679 *data = val1 - val2;
1680 data++;
1681 *data = val2;
1682 data++;
1683 } else {
1684 memset(data, 0, 2 * sizeof(u64));
1685 *data += 2;
1686 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001687}
1688
1689/*
1690 * Return a version number to identify the type of adapter. The scheme is:
1691 * - bits 0..9: chip version
1692 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001693 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001694 */
1695static inline unsigned int mk_adap_vers(const struct adapter *ap)
1696{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301697 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1698 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001699}
1700
1701static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1702 unsigned int end)
1703{
1704 u32 *p = buf + start;
1705
1706 for ( ; start <= end; start += sizeof(u32))
1707 *p++ = t4_read_reg(ap, start);
1708}
1709
1710static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1711 void *buf)
1712{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001713 static const unsigned int t4_reg_ranges[] = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001714 0x1008, 0x1108,
1715 0x1180, 0x11b4,
1716 0x11fc, 0x123c,
1717 0x1300, 0x173c,
1718 0x1800, 0x18fc,
1719 0x3000, 0x30d8,
1720 0x30e0, 0x5924,
1721 0x5960, 0x59d4,
1722 0x5a00, 0x5af8,
1723 0x6000, 0x6098,
1724 0x6100, 0x6150,
1725 0x6200, 0x6208,
1726 0x6240, 0x6248,
1727 0x6280, 0x6338,
1728 0x6370, 0x638c,
1729 0x6400, 0x643c,
1730 0x6500, 0x6524,
1731 0x6a00, 0x6a38,
1732 0x6a60, 0x6a78,
1733 0x6b00, 0x6b84,
1734 0x6bf0, 0x6c84,
1735 0x6cf0, 0x6d84,
1736 0x6df0, 0x6e84,
1737 0x6ef0, 0x6f84,
1738 0x6ff0, 0x7084,
1739 0x70f0, 0x7184,
1740 0x71f0, 0x7284,
1741 0x72f0, 0x7384,
1742 0x73f0, 0x7450,
1743 0x7500, 0x7530,
1744 0x7600, 0x761c,
1745 0x7680, 0x76cc,
1746 0x7700, 0x7798,
1747 0x77c0, 0x77fc,
1748 0x7900, 0x79fc,
1749 0x7b00, 0x7c38,
1750 0x7d00, 0x7efc,
1751 0x8dc0, 0x8e1c,
1752 0x8e30, 0x8e78,
1753 0x8ea0, 0x8f6c,
1754 0x8fc0, 0x9074,
1755 0x90fc, 0x90fc,
1756 0x9400, 0x9458,
1757 0x9600, 0x96bc,
1758 0x9800, 0x9808,
1759 0x9820, 0x983c,
1760 0x9850, 0x9864,
1761 0x9c00, 0x9c6c,
1762 0x9c80, 0x9cec,
1763 0x9d00, 0x9d6c,
1764 0x9d80, 0x9dec,
1765 0x9e00, 0x9e6c,
1766 0x9e80, 0x9eec,
1767 0x9f00, 0x9f6c,
1768 0x9f80, 0x9fec,
1769 0xd004, 0xd03c,
1770 0xdfc0, 0xdfe0,
1771 0xe000, 0xea7c,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301772 0xf000, 0x11110,
1773 0x11118, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001774 0x19040, 0x1906c,
1775 0x19078, 0x19080,
1776 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001777 0x19150, 0x191b0,
1778 0x191d0, 0x191e8,
1779 0x19238, 0x1924c,
1780 0x193f8, 0x19474,
1781 0x19490, 0x194f8,
1782 0x19800, 0x19f30,
1783 0x1a000, 0x1a06c,
1784 0x1a0b0, 0x1a120,
1785 0x1a128, 0x1a138,
1786 0x1a190, 0x1a1c4,
1787 0x1a1fc, 0x1a1fc,
1788 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001789 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001790 0x1e2c0, 0x1e2c0,
1791 0x1e2e0, 0x1e2e0,
1792 0x1e300, 0x1e384,
1793 0x1e3c0, 0x1e3c8,
1794 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001795 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001796 0x1e6c0, 0x1e6c0,
1797 0x1e6e0, 0x1e6e0,
1798 0x1e700, 0x1e784,
1799 0x1e7c0, 0x1e7c8,
1800 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001801 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001802 0x1eac0, 0x1eac0,
1803 0x1eae0, 0x1eae0,
1804 0x1eb00, 0x1eb84,
1805 0x1ebc0, 0x1ebc8,
1806 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001807 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001808 0x1eec0, 0x1eec0,
1809 0x1eee0, 0x1eee0,
1810 0x1ef00, 0x1ef84,
1811 0x1efc0, 0x1efc8,
1812 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001813 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001814 0x1f2c0, 0x1f2c0,
1815 0x1f2e0, 0x1f2e0,
1816 0x1f300, 0x1f384,
1817 0x1f3c0, 0x1f3c8,
1818 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001819 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001820 0x1f6c0, 0x1f6c0,
1821 0x1f6e0, 0x1f6e0,
1822 0x1f700, 0x1f784,
1823 0x1f7c0, 0x1f7c8,
1824 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001825 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001826 0x1fac0, 0x1fac0,
1827 0x1fae0, 0x1fae0,
1828 0x1fb00, 0x1fb84,
1829 0x1fbc0, 0x1fbc8,
1830 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001831 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001832 0x1fec0, 0x1fec0,
1833 0x1fee0, 0x1fee0,
1834 0x1ff00, 0x1ff84,
1835 0x1ffc0, 0x1ffc8,
1836 0x20000, 0x2002c,
1837 0x20100, 0x2013c,
1838 0x20190, 0x201c8,
1839 0x20200, 0x20318,
1840 0x20400, 0x20528,
1841 0x20540, 0x20614,
1842 0x21000, 0x21040,
1843 0x2104c, 0x21060,
1844 0x210c0, 0x210ec,
1845 0x21200, 0x21268,
1846 0x21270, 0x21284,
1847 0x212fc, 0x21388,
1848 0x21400, 0x21404,
1849 0x21500, 0x21518,
1850 0x2152c, 0x2153c,
1851 0x21550, 0x21554,
1852 0x21600, 0x21600,
1853 0x21608, 0x21628,
1854 0x21630, 0x2163c,
1855 0x21700, 0x2171c,
1856 0x21780, 0x2178c,
1857 0x21800, 0x21c38,
1858 0x21c80, 0x21d7c,
1859 0x21e00, 0x21e04,
1860 0x22000, 0x2202c,
1861 0x22100, 0x2213c,
1862 0x22190, 0x221c8,
1863 0x22200, 0x22318,
1864 0x22400, 0x22528,
1865 0x22540, 0x22614,
1866 0x23000, 0x23040,
1867 0x2304c, 0x23060,
1868 0x230c0, 0x230ec,
1869 0x23200, 0x23268,
1870 0x23270, 0x23284,
1871 0x232fc, 0x23388,
1872 0x23400, 0x23404,
1873 0x23500, 0x23518,
1874 0x2352c, 0x2353c,
1875 0x23550, 0x23554,
1876 0x23600, 0x23600,
1877 0x23608, 0x23628,
1878 0x23630, 0x2363c,
1879 0x23700, 0x2371c,
1880 0x23780, 0x2378c,
1881 0x23800, 0x23c38,
1882 0x23c80, 0x23d7c,
1883 0x23e00, 0x23e04,
1884 0x24000, 0x2402c,
1885 0x24100, 0x2413c,
1886 0x24190, 0x241c8,
1887 0x24200, 0x24318,
1888 0x24400, 0x24528,
1889 0x24540, 0x24614,
1890 0x25000, 0x25040,
1891 0x2504c, 0x25060,
1892 0x250c0, 0x250ec,
1893 0x25200, 0x25268,
1894 0x25270, 0x25284,
1895 0x252fc, 0x25388,
1896 0x25400, 0x25404,
1897 0x25500, 0x25518,
1898 0x2552c, 0x2553c,
1899 0x25550, 0x25554,
1900 0x25600, 0x25600,
1901 0x25608, 0x25628,
1902 0x25630, 0x2563c,
1903 0x25700, 0x2571c,
1904 0x25780, 0x2578c,
1905 0x25800, 0x25c38,
1906 0x25c80, 0x25d7c,
1907 0x25e00, 0x25e04,
1908 0x26000, 0x2602c,
1909 0x26100, 0x2613c,
1910 0x26190, 0x261c8,
1911 0x26200, 0x26318,
1912 0x26400, 0x26528,
1913 0x26540, 0x26614,
1914 0x27000, 0x27040,
1915 0x2704c, 0x27060,
1916 0x270c0, 0x270ec,
1917 0x27200, 0x27268,
1918 0x27270, 0x27284,
1919 0x272fc, 0x27388,
1920 0x27400, 0x27404,
1921 0x27500, 0x27518,
1922 0x2752c, 0x2753c,
1923 0x27550, 0x27554,
1924 0x27600, 0x27600,
1925 0x27608, 0x27628,
1926 0x27630, 0x2763c,
1927 0x27700, 0x2771c,
1928 0x27780, 0x2778c,
1929 0x27800, 0x27c38,
1930 0x27c80, 0x27d7c,
1931 0x27e00, 0x27e04
1932 };
1933
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001934 static const unsigned int t5_reg_ranges[] = {
1935 0x1008, 0x1148,
1936 0x1180, 0x11b4,
1937 0x11fc, 0x123c,
1938 0x1280, 0x173c,
1939 0x1800, 0x18fc,
1940 0x3000, 0x3028,
1941 0x3060, 0x30d8,
1942 0x30e0, 0x30fc,
1943 0x3140, 0x357c,
1944 0x35a8, 0x35cc,
1945 0x35ec, 0x35ec,
1946 0x3600, 0x5624,
1947 0x56cc, 0x575c,
1948 0x580c, 0x5814,
1949 0x5890, 0x58bc,
1950 0x5940, 0x59dc,
1951 0x59fc, 0x5a18,
1952 0x5a60, 0x5a9c,
1953 0x5b9c, 0x5bfc,
1954 0x6000, 0x6040,
1955 0x6058, 0x614c,
1956 0x7700, 0x7798,
1957 0x77c0, 0x78fc,
1958 0x7b00, 0x7c54,
1959 0x7d00, 0x7efc,
1960 0x8dc0, 0x8de0,
1961 0x8df8, 0x8e84,
1962 0x8ea0, 0x8f84,
1963 0x8fc0, 0x90f8,
1964 0x9400, 0x9470,
1965 0x9600, 0x96f4,
1966 0x9800, 0x9808,
1967 0x9820, 0x983c,
1968 0x9850, 0x9864,
1969 0x9c00, 0x9c6c,
1970 0x9c80, 0x9cec,
1971 0x9d00, 0x9d6c,
1972 0x9d80, 0x9dec,
1973 0x9e00, 0x9e6c,
1974 0x9e80, 0x9eec,
1975 0x9f00, 0x9f6c,
1976 0x9f80, 0xa020,
1977 0xd004, 0xd03c,
1978 0xdfc0, 0xdfe0,
1979 0xe000, 0x11088,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301980 0x1109c, 0x11110,
1981 0x11118, 0x1117c,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001982 0x11190, 0x11204,
1983 0x19040, 0x1906c,
1984 0x19078, 0x19080,
1985 0x1908c, 0x19124,
1986 0x19150, 0x191b0,
1987 0x191d0, 0x191e8,
1988 0x19238, 0x19290,
1989 0x193f8, 0x19474,
1990 0x19490, 0x194cc,
1991 0x194f0, 0x194f8,
1992 0x19c00, 0x19c60,
1993 0x19c94, 0x19e10,
1994 0x19e50, 0x19f34,
1995 0x19f40, 0x19f50,
1996 0x19f90, 0x19fe4,
1997 0x1a000, 0x1a06c,
1998 0x1a0b0, 0x1a120,
1999 0x1a128, 0x1a138,
2000 0x1a190, 0x1a1c4,
2001 0x1a1fc, 0x1a1fc,
2002 0x1e008, 0x1e00c,
2003 0x1e040, 0x1e04c,
2004 0x1e284, 0x1e290,
2005 0x1e2c0, 0x1e2c0,
2006 0x1e2e0, 0x1e2e0,
2007 0x1e300, 0x1e384,
2008 0x1e3c0, 0x1e3c8,
2009 0x1e408, 0x1e40c,
2010 0x1e440, 0x1e44c,
2011 0x1e684, 0x1e690,
2012 0x1e6c0, 0x1e6c0,
2013 0x1e6e0, 0x1e6e0,
2014 0x1e700, 0x1e784,
2015 0x1e7c0, 0x1e7c8,
2016 0x1e808, 0x1e80c,
2017 0x1e840, 0x1e84c,
2018 0x1ea84, 0x1ea90,
2019 0x1eac0, 0x1eac0,
2020 0x1eae0, 0x1eae0,
2021 0x1eb00, 0x1eb84,
2022 0x1ebc0, 0x1ebc8,
2023 0x1ec08, 0x1ec0c,
2024 0x1ec40, 0x1ec4c,
2025 0x1ee84, 0x1ee90,
2026 0x1eec0, 0x1eec0,
2027 0x1eee0, 0x1eee0,
2028 0x1ef00, 0x1ef84,
2029 0x1efc0, 0x1efc8,
2030 0x1f008, 0x1f00c,
2031 0x1f040, 0x1f04c,
2032 0x1f284, 0x1f290,
2033 0x1f2c0, 0x1f2c0,
2034 0x1f2e0, 0x1f2e0,
2035 0x1f300, 0x1f384,
2036 0x1f3c0, 0x1f3c8,
2037 0x1f408, 0x1f40c,
2038 0x1f440, 0x1f44c,
2039 0x1f684, 0x1f690,
2040 0x1f6c0, 0x1f6c0,
2041 0x1f6e0, 0x1f6e0,
2042 0x1f700, 0x1f784,
2043 0x1f7c0, 0x1f7c8,
2044 0x1f808, 0x1f80c,
2045 0x1f840, 0x1f84c,
2046 0x1fa84, 0x1fa90,
2047 0x1fac0, 0x1fac0,
2048 0x1fae0, 0x1fae0,
2049 0x1fb00, 0x1fb84,
2050 0x1fbc0, 0x1fbc8,
2051 0x1fc08, 0x1fc0c,
2052 0x1fc40, 0x1fc4c,
2053 0x1fe84, 0x1fe90,
2054 0x1fec0, 0x1fec0,
2055 0x1fee0, 0x1fee0,
2056 0x1ff00, 0x1ff84,
2057 0x1ffc0, 0x1ffc8,
2058 0x30000, 0x30030,
2059 0x30100, 0x30144,
2060 0x30190, 0x301d0,
2061 0x30200, 0x30318,
2062 0x30400, 0x3052c,
2063 0x30540, 0x3061c,
2064 0x30800, 0x30834,
2065 0x308c0, 0x30908,
2066 0x30910, 0x309ac,
2067 0x30a00, 0x30a04,
2068 0x30a0c, 0x30a2c,
2069 0x30a44, 0x30a50,
2070 0x30a74, 0x30c24,
2071 0x30d08, 0x30d14,
2072 0x30d1c, 0x30d20,
2073 0x30d3c, 0x30d50,
2074 0x31200, 0x3120c,
2075 0x31220, 0x31220,
2076 0x31240, 0x31240,
2077 0x31600, 0x31600,
2078 0x31608, 0x3160c,
2079 0x31a00, 0x31a1c,
2080 0x31e04, 0x31e20,
2081 0x31e38, 0x31e3c,
2082 0x31e80, 0x31e80,
2083 0x31e88, 0x31ea8,
2084 0x31eb0, 0x31eb4,
2085 0x31ec8, 0x31ed4,
2086 0x31fb8, 0x32004,
2087 0x32208, 0x3223c,
2088 0x32600, 0x32630,
2089 0x32a00, 0x32abc,
2090 0x32b00, 0x32b70,
2091 0x33000, 0x33048,
2092 0x33060, 0x3309c,
2093 0x330f0, 0x33148,
2094 0x33160, 0x3319c,
2095 0x331f0, 0x332e4,
2096 0x332f8, 0x333e4,
2097 0x333f8, 0x33448,
2098 0x33460, 0x3349c,
2099 0x334f0, 0x33548,
2100 0x33560, 0x3359c,
2101 0x335f0, 0x336e4,
2102 0x336f8, 0x337e4,
2103 0x337f8, 0x337fc,
2104 0x33814, 0x33814,
2105 0x3382c, 0x3382c,
2106 0x33880, 0x3388c,
2107 0x338e8, 0x338ec,
2108 0x33900, 0x33948,
2109 0x33960, 0x3399c,
2110 0x339f0, 0x33ae4,
2111 0x33af8, 0x33b10,
2112 0x33b28, 0x33b28,
2113 0x33b3c, 0x33b50,
2114 0x33bf0, 0x33c10,
2115 0x33c28, 0x33c28,
2116 0x33c3c, 0x33c50,
2117 0x33cf0, 0x33cfc,
2118 0x34000, 0x34030,
2119 0x34100, 0x34144,
2120 0x34190, 0x341d0,
2121 0x34200, 0x34318,
2122 0x34400, 0x3452c,
2123 0x34540, 0x3461c,
2124 0x34800, 0x34834,
2125 0x348c0, 0x34908,
2126 0x34910, 0x349ac,
2127 0x34a00, 0x34a04,
2128 0x34a0c, 0x34a2c,
2129 0x34a44, 0x34a50,
2130 0x34a74, 0x34c24,
2131 0x34d08, 0x34d14,
2132 0x34d1c, 0x34d20,
2133 0x34d3c, 0x34d50,
2134 0x35200, 0x3520c,
2135 0x35220, 0x35220,
2136 0x35240, 0x35240,
2137 0x35600, 0x35600,
2138 0x35608, 0x3560c,
2139 0x35a00, 0x35a1c,
2140 0x35e04, 0x35e20,
2141 0x35e38, 0x35e3c,
2142 0x35e80, 0x35e80,
2143 0x35e88, 0x35ea8,
2144 0x35eb0, 0x35eb4,
2145 0x35ec8, 0x35ed4,
2146 0x35fb8, 0x36004,
2147 0x36208, 0x3623c,
2148 0x36600, 0x36630,
2149 0x36a00, 0x36abc,
2150 0x36b00, 0x36b70,
2151 0x37000, 0x37048,
2152 0x37060, 0x3709c,
2153 0x370f0, 0x37148,
2154 0x37160, 0x3719c,
2155 0x371f0, 0x372e4,
2156 0x372f8, 0x373e4,
2157 0x373f8, 0x37448,
2158 0x37460, 0x3749c,
2159 0x374f0, 0x37548,
2160 0x37560, 0x3759c,
2161 0x375f0, 0x376e4,
2162 0x376f8, 0x377e4,
2163 0x377f8, 0x377fc,
2164 0x37814, 0x37814,
2165 0x3782c, 0x3782c,
2166 0x37880, 0x3788c,
2167 0x378e8, 0x378ec,
2168 0x37900, 0x37948,
2169 0x37960, 0x3799c,
2170 0x379f0, 0x37ae4,
2171 0x37af8, 0x37b10,
2172 0x37b28, 0x37b28,
2173 0x37b3c, 0x37b50,
2174 0x37bf0, 0x37c10,
2175 0x37c28, 0x37c28,
2176 0x37c3c, 0x37c50,
2177 0x37cf0, 0x37cfc,
2178 0x38000, 0x38030,
2179 0x38100, 0x38144,
2180 0x38190, 0x381d0,
2181 0x38200, 0x38318,
2182 0x38400, 0x3852c,
2183 0x38540, 0x3861c,
2184 0x38800, 0x38834,
2185 0x388c0, 0x38908,
2186 0x38910, 0x389ac,
2187 0x38a00, 0x38a04,
2188 0x38a0c, 0x38a2c,
2189 0x38a44, 0x38a50,
2190 0x38a74, 0x38c24,
2191 0x38d08, 0x38d14,
2192 0x38d1c, 0x38d20,
2193 0x38d3c, 0x38d50,
2194 0x39200, 0x3920c,
2195 0x39220, 0x39220,
2196 0x39240, 0x39240,
2197 0x39600, 0x39600,
2198 0x39608, 0x3960c,
2199 0x39a00, 0x39a1c,
2200 0x39e04, 0x39e20,
2201 0x39e38, 0x39e3c,
2202 0x39e80, 0x39e80,
2203 0x39e88, 0x39ea8,
2204 0x39eb0, 0x39eb4,
2205 0x39ec8, 0x39ed4,
2206 0x39fb8, 0x3a004,
2207 0x3a208, 0x3a23c,
2208 0x3a600, 0x3a630,
2209 0x3aa00, 0x3aabc,
2210 0x3ab00, 0x3ab70,
2211 0x3b000, 0x3b048,
2212 0x3b060, 0x3b09c,
2213 0x3b0f0, 0x3b148,
2214 0x3b160, 0x3b19c,
2215 0x3b1f0, 0x3b2e4,
2216 0x3b2f8, 0x3b3e4,
2217 0x3b3f8, 0x3b448,
2218 0x3b460, 0x3b49c,
2219 0x3b4f0, 0x3b548,
2220 0x3b560, 0x3b59c,
2221 0x3b5f0, 0x3b6e4,
2222 0x3b6f8, 0x3b7e4,
2223 0x3b7f8, 0x3b7fc,
2224 0x3b814, 0x3b814,
2225 0x3b82c, 0x3b82c,
2226 0x3b880, 0x3b88c,
2227 0x3b8e8, 0x3b8ec,
2228 0x3b900, 0x3b948,
2229 0x3b960, 0x3b99c,
2230 0x3b9f0, 0x3bae4,
2231 0x3baf8, 0x3bb10,
2232 0x3bb28, 0x3bb28,
2233 0x3bb3c, 0x3bb50,
2234 0x3bbf0, 0x3bc10,
2235 0x3bc28, 0x3bc28,
2236 0x3bc3c, 0x3bc50,
2237 0x3bcf0, 0x3bcfc,
2238 0x3c000, 0x3c030,
2239 0x3c100, 0x3c144,
2240 0x3c190, 0x3c1d0,
2241 0x3c200, 0x3c318,
2242 0x3c400, 0x3c52c,
2243 0x3c540, 0x3c61c,
2244 0x3c800, 0x3c834,
2245 0x3c8c0, 0x3c908,
2246 0x3c910, 0x3c9ac,
2247 0x3ca00, 0x3ca04,
2248 0x3ca0c, 0x3ca2c,
2249 0x3ca44, 0x3ca50,
2250 0x3ca74, 0x3cc24,
2251 0x3cd08, 0x3cd14,
2252 0x3cd1c, 0x3cd20,
2253 0x3cd3c, 0x3cd50,
2254 0x3d200, 0x3d20c,
2255 0x3d220, 0x3d220,
2256 0x3d240, 0x3d240,
2257 0x3d600, 0x3d600,
2258 0x3d608, 0x3d60c,
2259 0x3da00, 0x3da1c,
2260 0x3de04, 0x3de20,
2261 0x3de38, 0x3de3c,
2262 0x3de80, 0x3de80,
2263 0x3de88, 0x3dea8,
2264 0x3deb0, 0x3deb4,
2265 0x3dec8, 0x3ded4,
2266 0x3dfb8, 0x3e004,
2267 0x3e208, 0x3e23c,
2268 0x3e600, 0x3e630,
2269 0x3ea00, 0x3eabc,
2270 0x3eb00, 0x3eb70,
2271 0x3f000, 0x3f048,
2272 0x3f060, 0x3f09c,
2273 0x3f0f0, 0x3f148,
2274 0x3f160, 0x3f19c,
2275 0x3f1f0, 0x3f2e4,
2276 0x3f2f8, 0x3f3e4,
2277 0x3f3f8, 0x3f448,
2278 0x3f460, 0x3f49c,
2279 0x3f4f0, 0x3f548,
2280 0x3f560, 0x3f59c,
2281 0x3f5f0, 0x3f6e4,
2282 0x3f6f8, 0x3f7e4,
2283 0x3f7f8, 0x3f7fc,
2284 0x3f814, 0x3f814,
2285 0x3f82c, 0x3f82c,
2286 0x3f880, 0x3f88c,
2287 0x3f8e8, 0x3f8ec,
2288 0x3f900, 0x3f948,
2289 0x3f960, 0x3f99c,
2290 0x3f9f0, 0x3fae4,
2291 0x3faf8, 0x3fb10,
2292 0x3fb28, 0x3fb28,
2293 0x3fb3c, 0x3fb50,
2294 0x3fbf0, 0x3fc10,
2295 0x3fc28, 0x3fc28,
2296 0x3fc3c, 0x3fc50,
2297 0x3fcf0, 0x3fcfc,
2298 0x40000, 0x4000c,
2299 0x40040, 0x40068,
2300 0x40080, 0x40144,
2301 0x40180, 0x4018c,
2302 0x40200, 0x40298,
2303 0x402ac, 0x4033c,
2304 0x403f8, 0x403fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302305 0x41304, 0x413c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002306 0x41400, 0x4141c,
2307 0x41480, 0x414d0,
2308 0x44000, 0x44078,
2309 0x440c0, 0x44278,
2310 0x442c0, 0x44478,
2311 0x444c0, 0x44678,
2312 0x446c0, 0x44878,
2313 0x448c0, 0x449fc,
2314 0x45000, 0x45068,
2315 0x45080, 0x45084,
2316 0x450a0, 0x450b0,
2317 0x45200, 0x45268,
2318 0x45280, 0x45284,
2319 0x452a0, 0x452b0,
2320 0x460c0, 0x460e4,
2321 0x47000, 0x4708c,
2322 0x47200, 0x47250,
2323 0x47400, 0x47420,
2324 0x47600, 0x47618,
2325 0x47800, 0x47814,
2326 0x48000, 0x4800c,
2327 0x48040, 0x48068,
2328 0x48080, 0x48144,
2329 0x48180, 0x4818c,
2330 0x48200, 0x48298,
2331 0x482ac, 0x4833c,
2332 0x483f8, 0x483fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302333 0x49304, 0x493c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002334 0x49400, 0x4941c,
2335 0x49480, 0x494d0,
2336 0x4c000, 0x4c078,
2337 0x4c0c0, 0x4c278,
2338 0x4c2c0, 0x4c478,
2339 0x4c4c0, 0x4c678,
2340 0x4c6c0, 0x4c878,
2341 0x4c8c0, 0x4c9fc,
2342 0x4d000, 0x4d068,
2343 0x4d080, 0x4d084,
2344 0x4d0a0, 0x4d0b0,
2345 0x4d200, 0x4d268,
2346 0x4d280, 0x4d284,
2347 0x4d2a0, 0x4d2b0,
2348 0x4e0c0, 0x4e0e4,
2349 0x4f000, 0x4f08c,
2350 0x4f200, 0x4f250,
2351 0x4f400, 0x4f420,
2352 0x4f600, 0x4f618,
2353 0x4f800, 0x4f814,
2354 0x50000, 0x500cc,
2355 0x50400, 0x50400,
2356 0x50800, 0x508cc,
2357 0x50c00, 0x50c00,
2358 0x51000, 0x5101c,
2359 0x51300, 0x51308,
2360 };
2361
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002362 int i;
2363 struct adapter *ap = netdev2adap(dev);
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002364 static const unsigned int *reg_ranges;
2365 int arr_size = 0, buf_size = 0;
2366
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302367 if (is_t4(ap->params.chip)) {
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002368 reg_ranges = &t4_reg_ranges[0];
2369 arr_size = ARRAY_SIZE(t4_reg_ranges);
2370 buf_size = T4_REGMAP_SIZE;
2371 } else {
2372 reg_ranges = &t5_reg_ranges[0];
2373 arr_size = ARRAY_SIZE(t5_reg_ranges);
2374 buf_size = T5_REGMAP_SIZE;
2375 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002376
2377 regs->version = mk_adap_vers(ap);
2378
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002379 memset(buf, 0, buf_size);
2380 for (i = 0; i < arr_size; i += 2)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002381 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2382}
2383
2384static int restart_autoneg(struct net_device *dev)
2385{
2386 struct port_info *p = netdev_priv(dev);
2387
2388 if (!netif_running(dev))
2389 return -EAGAIN;
2390 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2391 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002392 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002393 return 0;
2394}
2395
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002396static int identify_port(struct net_device *dev,
2397 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002398{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002399 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002400 struct adapter *adap = netdev2adap(dev);
2401
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002402 if (state == ETHTOOL_ID_ACTIVE)
2403 val = 0xffff;
2404 else if (state == ETHTOOL_ID_INACTIVE)
2405 val = 0;
2406 else
2407 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002408
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002409 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002410}
2411
2412static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2413{
2414 unsigned int v = 0;
2415
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002416 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2417 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002418 v |= SUPPORTED_TP;
2419 if (caps & FW_PORT_CAP_SPEED_100M)
2420 v |= SUPPORTED_100baseT_Full;
2421 if (caps & FW_PORT_CAP_SPEED_1G)
2422 v |= SUPPORTED_1000baseT_Full;
2423 if (caps & FW_PORT_CAP_SPEED_10G)
2424 v |= SUPPORTED_10000baseT_Full;
2425 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2426 v |= SUPPORTED_Backplane;
2427 if (caps & FW_PORT_CAP_SPEED_1G)
2428 v |= SUPPORTED_1000baseKX_Full;
2429 if (caps & FW_PORT_CAP_SPEED_10G)
2430 v |= SUPPORTED_10000baseKX4_Full;
2431 } else if (type == FW_PORT_TYPE_KR)
2432 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002433 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00002434 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2435 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2436 else if (type == FW_PORT_TYPE_BP4_AP)
2437 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2438 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2439 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002440 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2441 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002442 v |= SUPPORTED_FIBRE;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302443 else if (type == FW_PORT_TYPE_BP40_BA)
2444 v |= SUPPORTED_40000baseSR4_Full;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002445
2446 if (caps & FW_PORT_CAP_ANEG)
2447 v |= SUPPORTED_Autoneg;
2448 return v;
2449}
2450
2451static unsigned int to_fw_linkcaps(unsigned int caps)
2452{
2453 unsigned int v = 0;
2454
2455 if (caps & ADVERTISED_100baseT_Full)
2456 v |= FW_PORT_CAP_SPEED_100M;
2457 if (caps & ADVERTISED_1000baseT_Full)
2458 v |= FW_PORT_CAP_SPEED_1G;
2459 if (caps & ADVERTISED_10000baseT_Full)
2460 v |= FW_PORT_CAP_SPEED_10G;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302461 if (caps & ADVERTISED_40000baseSR4_Full)
2462 v |= FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002463 return v;
2464}
2465
2466static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2467{
2468 const struct port_info *p = netdev_priv(dev);
2469
2470 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002471 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002472 p->port_type == FW_PORT_TYPE_BT_XAUI)
2473 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002474 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2475 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002476 cmd->port = PORT_FIBRE;
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302477 else if (p->port_type == FW_PORT_TYPE_SFP ||
2478 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2479 p->port_type == FW_PORT_TYPE_QSFP) {
2480 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2481 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2482 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2483 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2484 cmd->port = PORT_FIBRE;
2485 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2486 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002487 cmd->port = PORT_DA;
2488 else
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302489 cmd->port = PORT_OTHER;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002490 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002491 cmd->port = PORT_OTHER;
2492
2493 if (p->mdio_addr >= 0) {
2494 cmd->phy_address = p->mdio_addr;
2495 cmd->transceiver = XCVR_EXTERNAL;
2496 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2497 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2498 } else {
2499 cmd->phy_address = 0; /* not really, but no better option */
2500 cmd->transceiver = XCVR_INTERNAL;
2501 cmd->mdio_support = 0;
2502 }
2503
2504 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2505 cmd->advertising = from_fw_linkcaps(p->port_type,
2506 p->link_cfg.advertising);
David Decotigny70739492011-04-27 18:32:40 +00002507 ethtool_cmd_speed_set(cmd,
2508 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002509 cmd->duplex = DUPLEX_FULL;
2510 cmd->autoneg = p->link_cfg.autoneg;
2511 cmd->maxtxpkt = 0;
2512 cmd->maxrxpkt = 0;
2513 return 0;
2514}
2515
2516static unsigned int speed_to_caps(int speed)
2517{
Ben Hutchingse8b39012014-02-23 00:03:24 +00002518 if (speed == 100)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002519 return FW_PORT_CAP_SPEED_100M;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002520 if (speed == 1000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002521 return FW_PORT_CAP_SPEED_1G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002522 if (speed == 10000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002523 return FW_PORT_CAP_SPEED_10G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002524 if (speed == 40000)
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302525 return FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002526 return 0;
2527}
2528
2529static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2530{
2531 unsigned int cap;
2532 struct port_info *p = netdev_priv(dev);
2533 struct link_config *lc = &p->link_cfg;
David Decotigny25db0332011-04-27 18:32:39 +00002534 u32 speed = ethtool_cmd_speed(cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002535
2536 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2537 return -EINVAL;
2538
2539 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2540 /*
2541 * PHY offers a single speed. See if that's what's
2542 * being requested.
2543 */
2544 if (cmd->autoneg == AUTONEG_DISABLE &&
David Decotigny25db0332011-04-27 18:32:39 +00002545 (lc->supported & speed_to_caps(speed)))
2546 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002547 return -EINVAL;
2548 }
2549
2550 if (cmd->autoneg == AUTONEG_DISABLE) {
David Decotigny25db0332011-04-27 18:32:39 +00002551 cap = speed_to_caps(speed);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002552
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302553 if (!(lc->supported & cap) ||
Ben Hutchingse8b39012014-02-23 00:03:24 +00002554 (speed == 1000) ||
2555 (speed == 10000) ||
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302556 (speed == 40000))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002557 return -EINVAL;
2558 lc->requested_speed = cap;
2559 lc->advertising = 0;
2560 } else {
2561 cap = to_fw_linkcaps(cmd->advertising);
2562 if (!(lc->supported & cap))
2563 return -EINVAL;
2564 lc->requested_speed = 0;
2565 lc->advertising = cap | FW_PORT_CAP_ANEG;
2566 }
2567 lc->autoneg = cmd->autoneg;
2568
2569 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002570 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2571 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002572 return 0;
2573}
2574
2575static void get_pauseparam(struct net_device *dev,
2576 struct ethtool_pauseparam *epause)
2577{
2578 struct port_info *p = netdev_priv(dev);
2579
2580 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2581 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2582 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2583}
2584
2585static int set_pauseparam(struct net_device *dev,
2586 struct ethtool_pauseparam *epause)
2587{
2588 struct port_info *p = netdev_priv(dev);
2589 struct link_config *lc = &p->link_cfg;
2590
2591 if (epause->autoneg == AUTONEG_DISABLE)
2592 lc->requested_fc = 0;
2593 else if (lc->supported & FW_PORT_CAP_ANEG)
2594 lc->requested_fc = PAUSE_AUTONEG;
2595 else
2596 return -EINVAL;
2597
2598 if (epause->rx_pause)
2599 lc->requested_fc |= PAUSE_RX;
2600 if (epause->tx_pause)
2601 lc->requested_fc |= PAUSE_TX;
2602 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002603 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2604 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002605 return 0;
2606}
2607
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002608static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2609{
2610 const struct port_info *pi = netdev_priv(dev);
2611 const struct sge *s = &pi->adapter->sge;
2612
2613 e->rx_max_pending = MAX_RX_BUFFERS;
2614 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2615 e->rx_jumbo_max_pending = 0;
2616 e->tx_max_pending = MAX_TXQ_ENTRIES;
2617
2618 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2619 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2620 e->rx_jumbo_pending = 0;
2621 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2622}
2623
2624static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2625{
2626 int i;
2627 const struct port_info *pi = netdev_priv(dev);
2628 struct adapter *adapter = pi->adapter;
2629 struct sge *s = &adapter->sge;
2630
2631 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2632 e->tx_pending > MAX_TXQ_ENTRIES ||
2633 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2634 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2635 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2636 return -EINVAL;
2637
2638 if (adapter->flags & FULL_INIT_DONE)
2639 return -EBUSY;
2640
2641 for (i = 0; i < pi->nqsets; ++i) {
2642 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2643 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2644 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2645 }
2646 return 0;
2647}
2648
2649static int closest_timer(const struct sge *s, int time)
2650{
2651 int i, delta, match = 0, min_delta = INT_MAX;
2652
2653 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2654 delta = time - s->timer_val[i];
2655 if (delta < 0)
2656 delta = -delta;
2657 if (delta < min_delta) {
2658 min_delta = delta;
2659 match = i;
2660 }
2661 }
2662 return match;
2663}
2664
2665static int closest_thres(const struct sge *s, int thres)
2666{
2667 int i, delta, match = 0, min_delta = INT_MAX;
2668
2669 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2670 delta = thres - s->counter_val[i];
2671 if (delta < 0)
2672 delta = -delta;
2673 if (delta < min_delta) {
2674 min_delta = delta;
2675 match = i;
2676 }
2677 }
2678 return match;
2679}
2680
2681/*
2682 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2683 */
2684static unsigned int qtimer_val(const struct adapter *adap,
2685 const struct sge_rspq *q)
2686{
2687 unsigned int idx = q->intr_params >> 1;
2688
2689 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2690}
2691
2692/**
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302693 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002694 * @q: the Rx queue
2695 * @us: the hold-off time in us, or 0 to disable timer
2696 * @cnt: the hold-off packet count, or 0 to disable counter
2697 *
2698 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2699 * one of the two needs to be enabled for the queue to generate interrupts.
2700 */
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302701static int set_rspq_intr_params(struct sge_rspq *q,
2702 unsigned int us, unsigned int cnt)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002703{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302704 struct adapter *adap = q->adap;
2705
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002706 if ((us | cnt) == 0)
2707 cnt = 1;
2708
2709 if (cnt) {
2710 int err;
2711 u32 v, new_idx;
2712
2713 new_idx = closest_thres(&adap->sge, cnt);
2714 if (q->desc && q->pktcnt_idx != new_idx) {
2715 /* the queue has already been created, update it */
2716 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2717 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2718 FW_PARAMS_PARAM_YZ(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002719 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2720 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002721 if (err)
2722 return err;
2723 }
2724 q->pktcnt_idx = new_idx;
2725 }
2726
2727 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2728 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2729 return 0;
2730}
2731
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302732/**
2733 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2734 * @dev: the network device
2735 * @us: the hold-off time in us, or 0 to disable timer
2736 * @cnt: the hold-off packet count, or 0 to disable counter
2737 *
2738 * Set the RX interrupt hold-off parameters for a network device.
2739 */
2740static int set_rx_intr_params(struct net_device *dev,
2741 unsigned int us, unsigned int cnt)
2742{
2743 int i, err;
2744 struct port_info *pi = netdev_priv(dev);
2745 struct adapter *adap = pi->adapter;
2746 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2747
2748 for (i = 0; i < pi->nqsets; i++, q++) {
2749 err = set_rspq_intr_params(&q->rspq, us, cnt);
2750 if (err)
2751 return err;
2752 }
2753 return 0;
2754}
2755
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302756static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2757{
2758 int i;
2759 struct port_info *pi = netdev_priv(dev);
2760 struct adapter *adap = pi->adapter;
2761 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2762
2763 for (i = 0; i < pi->nqsets; i++, q++)
2764 q->rspq.adaptive_rx = adaptive_rx;
2765
2766 return 0;
2767}
2768
2769static int get_adaptive_rx_setting(struct net_device *dev)
2770{
2771 struct port_info *pi = netdev_priv(dev);
2772 struct adapter *adap = pi->adapter;
2773 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2774
2775 return q->rspq.adaptive_rx;
2776}
2777
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002778static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2779{
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302780 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302781 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2782 c->rx_max_coalesced_frames);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002783}
2784
2785static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2786{
2787 const struct port_info *pi = netdev_priv(dev);
2788 const struct adapter *adap = pi->adapter;
2789 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2790
2791 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2792 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2793 adap->sge.counter_val[rq->pktcnt_idx] : 0;
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302794 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002795 return 0;
2796}
2797
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002798/**
2799 * eeprom_ptov - translate a physical EEPROM address to virtual
2800 * @phys_addr: the physical EEPROM address
2801 * @fn: the PCI function number
2802 * @sz: size of function-specific area
2803 *
2804 * Translate a physical EEPROM address to virtual. The first 1K is
2805 * accessed through virtual addresses starting at 31K, the rest is
2806 * accessed through virtual addresses starting at 0.
2807 *
2808 * The mapping is as follows:
2809 * [0..1K) -> [31K..32K)
2810 * [1K..1K+A) -> [31K-A..31K)
2811 * [1K+A..ES) -> [0..ES-A-1K)
2812 *
2813 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002814 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002815static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002816{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002817 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002818 if (phys_addr < 1024)
2819 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002820 if (phys_addr < 1024 + fn)
2821 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002822 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002823 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002824 return -EINVAL;
2825}
2826
2827/*
2828 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002829 */
2830static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2831{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002832 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002833
2834 if (vaddr >= 0)
2835 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2836 return vaddr < 0 ? vaddr : 0;
2837}
2838
2839static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2840{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002841 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002842
2843 if (vaddr >= 0)
2844 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2845 return vaddr < 0 ? vaddr : 0;
2846}
2847
2848#define EEPROM_MAGIC 0x38E2F10C
2849
2850static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2851 u8 *data)
2852{
2853 int i, err = 0;
2854 struct adapter *adapter = netdev2adap(dev);
2855
2856 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2857 if (!buf)
2858 return -ENOMEM;
2859
2860 e->magic = EEPROM_MAGIC;
2861 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2862 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2863
2864 if (!err)
2865 memcpy(data, buf + e->offset, e->len);
2866 kfree(buf);
2867 return err;
2868}
2869
2870static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2871 u8 *data)
2872{
2873 u8 *buf;
2874 int err = 0;
2875 u32 aligned_offset, aligned_len, *p;
2876 struct adapter *adapter = netdev2adap(dev);
2877
2878 if (eeprom->magic != EEPROM_MAGIC)
2879 return -EINVAL;
2880
2881 aligned_offset = eeprom->offset & ~3;
2882 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2883
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002884 if (adapter->fn > 0) {
2885 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2886
2887 if (aligned_offset < start ||
2888 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2889 return -EPERM;
2890 }
2891
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002892 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2893 /*
2894 * RMW possibly needed for first or last words.
2895 */
2896 buf = kmalloc(aligned_len, GFP_KERNEL);
2897 if (!buf)
2898 return -ENOMEM;
2899 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2900 if (!err && aligned_len > 4)
2901 err = eeprom_rd_phys(adapter,
2902 aligned_offset + aligned_len - 4,
2903 (u32 *)&buf[aligned_len - 4]);
2904 if (err)
2905 goto out;
2906 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2907 } else
2908 buf = data;
2909
2910 err = t4_seeprom_wp(adapter, false);
2911 if (err)
2912 goto out;
2913
2914 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2915 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2916 aligned_offset += 4;
2917 }
2918
2919 if (!err)
2920 err = t4_seeprom_wp(adapter, true);
2921out:
2922 if (buf != data)
2923 kfree(buf);
2924 return err;
2925}
2926
2927static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2928{
2929 int ret;
2930 const struct firmware *fw;
2931 struct adapter *adap = netdev2adap(netdev);
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302932 unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002933
2934 ef->data[sizeof(ef->data) - 1] = '\0';
2935 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2936 if (ret < 0)
2937 return ret;
2938
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302939 /* If the adapter has been fully initialized then we'll go ahead and
2940 * try to get the firmware's cooperation in upgrading to the new
2941 * firmware image otherwise we'll try to do the entire job from the
2942 * host ... and we always "force" the operation in this path.
2943 */
2944 if (adap->flags & FULL_INIT_DONE)
2945 mbox = adap->mbox;
2946
2947 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002948 release_firmware(fw);
2949 if (!ret)
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302950 dev_info(adap->pdev_dev, "loaded firmware %s,"
2951 " reload cxgb4 driver\n", ef->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002952 return ret;
2953}
2954
2955#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2956#define BCAST_CRC 0xa0ccc1a6
2957
2958static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2959{
2960 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2961 wol->wolopts = netdev2adap(dev)->wol;
2962 memset(&wol->sopass, 0, sizeof(wol->sopass));
2963}
2964
2965static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2966{
2967 int err = 0;
2968 struct port_info *pi = netdev_priv(dev);
2969
2970 if (wol->wolopts & ~WOL_SUPPORTED)
2971 return -EINVAL;
2972 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2973 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2974 if (wol->wolopts & WAKE_BCAST) {
2975 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2976 ~0ULL, 0, false);
2977 if (!err)
2978 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2979 ~6ULL, ~0ULL, BCAST_CRC, true);
2980 } else
2981 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2982 return err;
2983}
2984
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002985static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002986{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002987 const struct port_info *pi = netdev_priv(dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002988 netdev_features_t changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002989 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002990
Patrick McHardyf6469682013-04-19 02:04:27 +00002991 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002992 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002993
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002994 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2995 -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +00002996 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002997 if (unlikely(err))
Patrick McHardyf6469682013-04-19 02:04:27 +00002998 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002999 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07003000}
3001
Ben Hutchings7850f632011-12-15 13:55:01 +00003002static u32 get_rss_table_size(struct net_device *dev)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003003{
3004 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003005
Ben Hutchings7850f632011-12-15 13:55:01 +00003006 return pi->rss_size;
3007}
3008
Ben Hutchingsfe62d002014-05-15 01:25:27 +01003009static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
Ben Hutchings7850f632011-12-15 13:55:01 +00003010{
3011 const struct port_info *pi = netdev_priv(dev);
3012 unsigned int n = pi->rss_size;
3013
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003014 while (n--)
Ben Hutchings7850f632011-12-15 13:55:01 +00003015 p[n] = pi->rss[n];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003016 return 0;
3017}
3018
Ben Hutchingsfe62d002014-05-15 01:25:27 +01003019static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003020{
3021 unsigned int i;
3022 struct port_info *pi = netdev_priv(dev);
3023
Ben Hutchings7850f632011-12-15 13:55:01 +00003024 for (i = 0; i < pi->rss_size; i++)
3025 pi->rss[i] = p[i];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003026 if (pi->adapter->flags & FULL_INIT_DONE)
3027 return write_rss(pi, pi->rss);
3028 return 0;
3029}
3030
3031static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
Ben Hutchings815c7db2011-09-06 13:49:12 +00003032 u32 *rules)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003033{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003034 const struct port_info *pi = netdev_priv(dev);
3035
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003036 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003037 case ETHTOOL_GRXFH: {
3038 unsigned int v = pi->rss_mode;
3039
3040 info->data = 0;
3041 switch (info->flow_type) {
3042 case TCP_V4_FLOW:
3043 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3044 info->data = RXH_IP_SRC | RXH_IP_DST |
3045 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3046 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3047 info->data = RXH_IP_SRC | RXH_IP_DST;
3048 break;
3049 case UDP_V4_FLOW:
3050 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3051 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3052 info->data = RXH_IP_SRC | RXH_IP_DST |
3053 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3054 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3055 info->data = RXH_IP_SRC | RXH_IP_DST;
3056 break;
3057 case SCTP_V4_FLOW:
3058 case AH_ESP_V4_FLOW:
3059 case IPV4_FLOW:
3060 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3061 info->data = RXH_IP_SRC | RXH_IP_DST;
3062 break;
3063 case TCP_V6_FLOW:
3064 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3065 info->data = RXH_IP_SRC | RXH_IP_DST |
3066 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3067 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3068 info->data = RXH_IP_SRC | RXH_IP_DST;
3069 break;
3070 case UDP_V6_FLOW:
3071 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3072 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3073 info->data = RXH_IP_SRC | RXH_IP_DST |
3074 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3075 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3076 info->data = RXH_IP_SRC | RXH_IP_DST;
3077 break;
3078 case SCTP_V6_FLOW:
3079 case AH_ESP_V6_FLOW:
3080 case IPV6_FLOW:
3081 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3082 info->data = RXH_IP_SRC | RXH_IP_DST;
3083 break;
3084 }
3085 return 0;
3086 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003087 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003088 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003089 return 0;
3090 }
3091 return -EOPNOTSUPP;
3092}
3093
stephen hemminger9b07be42012-01-04 12:59:49 +00003094static const struct ethtool_ops cxgb_ethtool_ops = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003095 .get_settings = get_settings,
3096 .set_settings = set_settings,
3097 .get_drvinfo = get_drvinfo,
3098 .get_msglevel = get_msglevel,
3099 .set_msglevel = set_msglevel,
3100 .get_ringparam = get_sge_param,
3101 .set_ringparam = set_sge_param,
3102 .get_coalesce = get_coalesce,
3103 .set_coalesce = set_coalesce,
3104 .get_eeprom_len = get_eeprom_len,
3105 .get_eeprom = get_eeprom,
3106 .set_eeprom = set_eeprom,
3107 .get_pauseparam = get_pauseparam,
3108 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003109 .get_link = ethtool_op_get_link,
3110 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07003111 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003112 .nway_reset = restart_autoneg,
3113 .get_sset_count = get_sset_count,
3114 .get_ethtool_stats = get_stats,
3115 .get_regs_len = get_regs_len,
3116 .get_regs = get_regs,
3117 .get_wol = get_wol,
3118 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003119 .get_rxnfc = get_rxnfc,
Ben Hutchings7850f632011-12-15 13:55:01 +00003120 .get_rxfh_indir_size = get_rss_table_size,
Ben Hutchingsfe62d002014-05-15 01:25:27 +01003121 .get_rxfh = get_rss_table,
3122 .set_rxfh = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003123 .flash_device = set_flash,
3124};
3125
3126/*
3127 * debugfs support
3128 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003129static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3130 loff_t *ppos)
3131{
3132 loff_t pos = *ppos;
Al Viro496ad9a2013-01-23 17:07:38 -05003133 loff_t avail = file_inode(file)->i_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003134 unsigned int mem = (uintptr_t)file->private_data & 3;
3135 struct adapter *adap = file->private_data - mem;
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303136 __be32 *data;
3137 int ret;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003138
3139 if (pos < 0)
3140 return -EINVAL;
3141 if (pos >= avail)
3142 return 0;
3143 if (count > avail - pos)
3144 count = avail - pos;
3145
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303146 data = t4_alloc_mem(count);
3147 if (!data)
3148 return -ENOMEM;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003149
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303150 spin_lock(&adap->win0_lock);
3151 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3152 spin_unlock(&adap->win0_lock);
3153 if (ret) {
3154 t4_free_mem(data);
3155 return ret;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003156 }
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303157 ret = copy_to_user(buf, data, count);
3158
3159 t4_free_mem(data);
3160 if (ret)
3161 return -EFAULT;
3162
3163 *ppos = pos + count;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003164 return count;
3165}
3166
3167static const struct file_operations mem_debugfs_fops = {
3168 .owner = THIS_MODULE,
Stephen Boyd234e3402012-04-05 14:25:11 -07003169 .open = simple_open,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003170 .read = mem_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02003171 .llseek = default_llseek,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003172};
3173
Bill Pemberton91744942012-12-03 09:23:02 -05003174static void add_debugfs_mem(struct adapter *adap, const char *name,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003175 unsigned int idx, unsigned int size_mb)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003176{
3177 struct dentry *de;
3178
3179 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3180 (void *)adap + idx, &mem_debugfs_fops);
3181 if (de && de->d_inode)
3182 de->d_inode->i_size = size_mb << 20;
3183}
3184
Bill Pemberton91744942012-12-03 09:23:02 -05003185static int setup_debugfs(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003186{
3187 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00003188 u32 size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003189
3190 if (IS_ERR_OR_NULL(adap->debugfs_root))
3191 return -1;
3192
3193 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00003194 if (i & EDRAM0_ENABLE) {
3195 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3196 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3197 }
3198 if (i & EDRAM1_ENABLE) {
3199 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3200 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3201 }
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303202 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00003203 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3204 if (i & EXT_MEM_ENABLE)
3205 add_debugfs_mem(adap, "mc", MEM_MC,
3206 EXT_MEM_SIZE_GET(size));
3207 } else {
3208 if (i & EXT_MEM_ENABLE) {
3209 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3210 add_debugfs_mem(adap, "mc0", MEM_MC0,
3211 EXT_MEM_SIZE_GET(size));
3212 }
3213 if (i & EXT_MEM1_ENABLE) {
3214 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3215 add_debugfs_mem(adap, "mc1", MEM_MC1,
3216 EXT_MEM_SIZE_GET(size));
3217 }
3218 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003219 if (adap->l2t)
3220 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3221 &t4_l2t_fops);
3222 return 0;
3223}
3224
3225/*
3226 * upper-layer driver support
3227 */
3228
3229/*
3230 * Allocate an active-open TID and set it to the supplied value.
3231 */
3232int cxgb4_alloc_atid(struct tid_info *t, void *data)
3233{
3234 int atid = -1;
3235
3236 spin_lock_bh(&t->atid_lock);
3237 if (t->afree) {
3238 union aopen_entry *p = t->afree;
3239
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003240 atid = (p - t->atid_tab) + t->atid_base;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003241 t->afree = p->next;
3242 p->data = data;
3243 t->atids_in_use++;
3244 }
3245 spin_unlock_bh(&t->atid_lock);
3246 return atid;
3247}
3248EXPORT_SYMBOL(cxgb4_alloc_atid);
3249
3250/*
3251 * Release an active-open TID.
3252 */
3253void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3254{
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003255 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003256
3257 spin_lock_bh(&t->atid_lock);
3258 p->next = t->afree;
3259 t->afree = p;
3260 t->atids_in_use--;
3261 spin_unlock_bh(&t->atid_lock);
3262}
3263EXPORT_SYMBOL(cxgb4_free_atid);
3264
3265/*
3266 * Allocate a server TID and set it to the supplied value.
3267 */
3268int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3269{
3270 int stid;
3271
3272 spin_lock_bh(&t->stid_lock);
3273 if (family == PF_INET) {
3274 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3275 if (stid < t->nstids)
3276 __set_bit(stid, t->stid_bmap);
3277 else
3278 stid = -1;
3279 } else {
3280 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3281 if (stid < 0)
3282 stid = -1;
3283 }
3284 if (stid >= 0) {
3285 t->stid_tab[stid].data = data;
3286 stid += t->stid_base;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303287 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3288 * This is equivalent to 4 TIDs. With CLIP enabled it
3289 * needs 2 TIDs.
3290 */
3291 if (family == PF_INET)
3292 t->stids_in_use++;
3293 else
3294 t->stids_in_use += 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003295 }
3296 spin_unlock_bh(&t->stid_lock);
3297 return stid;
3298}
3299EXPORT_SYMBOL(cxgb4_alloc_stid);
3300
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003301/* Allocate a server filter TID and set it to the supplied value.
3302 */
3303int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3304{
3305 int stid;
3306
3307 spin_lock_bh(&t->stid_lock);
3308 if (family == PF_INET) {
3309 stid = find_next_zero_bit(t->stid_bmap,
3310 t->nstids + t->nsftids, t->nstids);
3311 if (stid < (t->nstids + t->nsftids))
3312 __set_bit(stid, t->stid_bmap);
3313 else
3314 stid = -1;
3315 } else {
3316 stid = -1;
3317 }
3318 if (stid >= 0) {
3319 t->stid_tab[stid].data = data;
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303320 stid -= t->nstids;
3321 stid += t->sftid_base;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003322 t->stids_in_use++;
3323 }
3324 spin_unlock_bh(&t->stid_lock);
3325 return stid;
3326}
3327EXPORT_SYMBOL(cxgb4_alloc_sftid);
3328
3329/* Release a server TID.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003330 */
3331void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3332{
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303333 /* Is it a server filter TID? */
3334 if (t->nsftids && (stid >= t->sftid_base)) {
3335 stid -= t->sftid_base;
3336 stid += t->nstids;
3337 } else {
3338 stid -= t->stid_base;
3339 }
3340
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003341 spin_lock_bh(&t->stid_lock);
3342 if (family == PF_INET)
3343 __clear_bit(stid, t->stid_bmap);
3344 else
3345 bitmap_release_region(t->stid_bmap, stid, 2);
3346 t->stid_tab[stid].data = NULL;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303347 if (family == PF_INET)
3348 t->stids_in_use--;
3349 else
3350 t->stids_in_use -= 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003351 spin_unlock_bh(&t->stid_lock);
3352}
3353EXPORT_SYMBOL(cxgb4_free_stid);
3354
3355/*
3356 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3357 */
3358static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3359 unsigned int tid)
3360{
3361 struct cpl_tid_release *req;
3362
3363 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3364 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3365 INIT_TP_WR(req, tid);
3366 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3367}
3368
3369/*
3370 * Queue a TID release request and if necessary schedule a work queue to
3371 * process it.
3372 */
stephen hemminger31b9c192010-10-18 05:39:18 +00003373static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3374 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003375{
3376 void **p = &t->tid_tab[tid];
3377 struct adapter *adap = container_of(t, struct adapter, tids);
3378
3379 spin_lock_bh(&adap->tid_release_lock);
3380 *p = adap->tid_release_head;
3381 /* Low 2 bits encode the Tx channel number */
3382 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3383 if (!adap->tid_release_task_busy) {
3384 adap->tid_release_task_busy = true;
Anish Bhatt29aaee62014-08-20 13:44:06 -07003385 queue_work(adap->workq, &adap->tid_release_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003386 }
3387 spin_unlock_bh(&adap->tid_release_lock);
3388}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003389
3390/*
3391 * Process the list of pending TID release requests.
3392 */
3393static void process_tid_release_list(struct work_struct *work)
3394{
3395 struct sk_buff *skb;
3396 struct adapter *adap;
3397
3398 adap = container_of(work, struct adapter, tid_release_task);
3399
3400 spin_lock_bh(&adap->tid_release_lock);
3401 while (adap->tid_release_head) {
3402 void **p = adap->tid_release_head;
3403 unsigned int chan = (uintptr_t)p & 3;
3404 p = (void *)p - chan;
3405
3406 adap->tid_release_head = *p;
3407 *p = NULL;
3408 spin_unlock_bh(&adap->tid_release_lock);
3409
3410 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3411 GFP_KERNEL)))
3412 schedule_timeout_uninterruptible(1);
3413
3414 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3415 t4_ofld_send(adap, skb);
3416 spin_lock_bh(&adap->tid_release_lock);
3417 }
3418 adap->tid_release_task_busy = false;
3419 spin_unlock_bh(&adap->tid_release_lock);
3420}
3421
3422/*
3423 * Release a TID and inform HW. If we are unable to allocate the release
3424 * message we defer to a work queue.
3425 */
3426void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3427{
3428 void *old;
3429 struct sk_buff *skb;
3430 struct adapter *adap = container_of(t, struct adapter, tids);
3431
3432 old = t->tid_tab[tid];
3433 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3434 if (likely(skb)) {
3435 t->tid_tab[tid] = NULL;
3436 mk_tid_release(skb, chan, tid);
3437 t4_ofld_send(adap, skb);
3438 } else
3439 cxgb4_queue_tid_release(t, chan, tid);
3440 if (old)
3441 atomic_dec(&t->tids_in_use);
3442}
3443EXPORT_SYMBOL(cxgb4_remove_tid);
3444
3445/*
3446 * Allocate and initialize the TID tables. Returns 0 on success.
3447 */
3448static int tid_init(struct tid_info *t)
3449{
3450 size_t size;
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003451 unsigned int stid_bmap_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003452 unsigned int natids = t->natids;
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303453 struct adapter *adap = container_of(t, struct adapter, tids);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003454
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003455 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003456 size = t->ntids * sizeof(*t->tid_tab) +
3457 natids * sizeof(*t->atid_tab) +
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003458 t->nstids * sizeof(*t->stid_tab) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003459 t->nsftids * sizeof(*t->stid_tab) +
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003460 stid_bmap_size * sizeof(long) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003461 t->nftids * sizeof(*t->ftid_tab) +
3462 t->nsftids * sizeof(*t->ftid_tab);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003463
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003464 t->tid_tab = t4_alloc_mem(size);
3465 if (!t->tid_tab)
3466 return -ENOMEM;
3467
3468 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3469 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003470 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003471 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003472 spin_lock_init(&t->stid_lock);
3473 spin_lock_init(&t->atid_lock);
3474
3475 t->stids_in_use = 0;
3476 t->afree = NULL;
3477 t->atids_in_use = 0;
3478 atomic_set(&t->tids_in_use, 0);
3479
3480 /* Setup the free list for atid_tab and clear the stid bitmap. */
3481 if (natids) {
3482 while (--natids)
3483 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3484 t->afree = t->atid_tab;
3485 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003486 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303487 /* Reserve stid 0 for T4/T5 adapters */
3488 if (!t->stid_base &&
3489 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3490 __set_bit(0, t->stid_bmap);
3491
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003492 return 0;
3493}
3494
Anish Bhatta3e3b282014-07-17 00:18:16 -07003495int cxgb4_clip_get(const struct net_device *dev,
3496 const struct in6_addr *lip)
Vipul Pandya01bcca62013-07-04 16:10:46 +05303497{
3498 struct adapter *adap;
3499 struct fw_clip_cmd c;
3500
3501 adap = netdev2adap(dev);
3502 memset(&c, 0, sizeof(c));
3503 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3504 FW_CMD_REQUEST | FW_CMD_WRITE);
3505 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
Joe Perches12f2a472014-03-24 10:45:12 -07003506 c.ip_hi = *(__be64 *)(lip->s6_addr);
3507 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303508 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3509}
Anish Bhatta3e3b282014-07-17 00:18:16 -07003510EXPORT_SYMBOL(cxgb4_clip_get);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303511
Anish Bhatta3e3b282014-07-17 00:18:16 -07003512int cxgb4_clip_release(const struct net_device *dev,
3513 const struct in6_addr *lip)
Vipul Pandya01bcca62013-07-04 16:10:46 +05303514{
3515 struct adapter *adap;
3516 struct fw_clip_cmd c;
3517
3518 adap = netdev2adap(dev);
3519 memset(&c, 0, sizeof(c));
3520 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3521 FW_CMD_REQUEST | FW_CMD_READ);
3522 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
Joe Perches12f2a472014-03-24 10:45:12 -07003523 c.ip_hi = *(__be64 *)(lip->s6_addr);
3524 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303525 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3526}
Anish Bhatta3e3b282014-07-17 00:18:16 -07003527EXPORT_SYMBOL(cxgb4_clip_release);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303528
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003529/**
3530 * cxgb4_create_server - create an IP server
3531 * @dev: the device
3532 * @stid: the server TID
3533 * @sip: local IP address to bind server to
3534 * @sport: the server's TCP port
3535 * @queue: queue to direct messages from this server to
3536 *
3537 * Create an IP server for the given port and address.
3538 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3539 */
3540int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00003541 __be32 sip, __be16 sport, __be16 vlan,
3542 unsigned int queue)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003543{
3544 unsigned int chan;
3545 struct sk_buff *skb;
3546 struct adapter *adap;
3547 struct cpl_pass_open_req *req;
Vipul Pandya80f40c12013-07-04 16:10:45 +05303548 int ret;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003549
3550 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3551 if (!skb)
3552 return -ENOMEM;
3553
3554 adap = netdev2adap(dev);
3555 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3556 INIT_TP_WR(req, 0);
3557 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3558 req->local_port = sport;
3559 req->peer_port = htons(0);
3560 req->local_ip = sip;
3561 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003562 chan = rxq_to_chan(&adap->sge, queue);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003563 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3564 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3565 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303566 ret = t4_mgmt_tx(adap, skb);
3567 return net_xmit_eval(ret);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003568}
3569EXPORT_SYMBOL(cxgb4_create_server);
3570
Vipul Pandya80f40c12013-07-04 16:10:45 +05303571/* cxgb4_create_server6 - create an IPv6 server
3572 * @dev: the device
3573 * @stid: the server TID
3574 * @sip: local IPv6 address to bind server to
3575 * @sport: the server's TCP port
3576 * @queue: queue to direct messages from this server to
3577 *
3578 * Create an IPv6 server for the given port and address.
3579 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3580 */
3581int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3582 const struct in6_addr *sip, __be16 sport,
3583 unsigned int queue)
3584{
3585 unsigned int chan;
3586 struct sk_buff *skb;
3587 struct adapter *adap;
3588 struct cpl_pass_open_req6 *req;
3589 int ret;
3590
3591 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3592 if (!skb)
3593 return -ENOMEM;
3594
3595 adap = netdev2adap(dev);
3596 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3597 INIT_TP_WR(req, 0);
3598 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3599 req->local_port = sport;
3600 req->peer_port = htons(0);
3601 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3602 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3603 req->peer_ip_hi = cpu_to_be64(0);
3604 req->peer_ip_lo = cpu_to_be64(0);
3605 chan = rxq_to_chan(&adap->sge, queue);
3606 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3607 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3608 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3609 ret = t4_mgmt_tx(adap, skb);
3610 return net_xmit_eval(ret);
3611}
3612EXPORT_SYMBOL(cxgb4_create_server6);
3613
3614int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3615 unsigned int queue, bool ipv6)
3616{
3617 struct sk_buff *skb;
3618 struct adapter *adap;
3619 struct cpl_close_listsvr_req *req;
3620 int ret;
3621
3622 adap = netdev2adap(dev);
3623
3624 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3625 if (!skb)
3626 return -ENOMEM;
3627
3628 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3629 INIT_TP_WR(req, 0);
3630 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3631 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3632 LISTSVR_IPV6(0)) | QUEUENO(queue));
3633 ret = t4_mgmt_tx(adap, skb);
3634 return net_xmit_eval(ret);
3635}
3636EXPORT_SYMBOL(cxgb4_remove_server);
3637
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003638/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003639 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3640 * @mtus: the HW MTU table
3641 * @mtu: the target MTU
3642 * @idx: index of selected entry in the MTU table
3643 *
3644 * Returns the index and the value in the HW MTU table that is closest to
3645 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3646 * table, in which case that smallest available value is selected.
3647 */
3648unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3649 unsigned int *idx)
3650{
3651 unsigned int i = 0;
3652
3653 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3654 ++i;
3655 if (idx)
3656 *idx = i;
3657 return mtus[i];
3658}
3659EXPORT_SYMBOL(cxgb4_best_mtu);
3660
3661/**
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05303662 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3663 * @mtus: the HW MTU table
3664 * @header_size: Header Size
3665 * @data_size_max: maximum Data Segment Size
3666 * @data_size_align: desired Data Segment Size Alignment (2^N)
3667 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3668 *
3669 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3670 * MTU Table based solely on a Maximum MTU parameter, we break that
3671 * parameter up into a Header Size and Maximum Data Segment Size, and
3672 * provide a desired Data Segment Size Alignment. If we find an MTU in
3673 * the Hardware MTU Table which will result in a Data Segment Size with
3674 * the requested alignment _and_ that MTU isn't "too far" from the
3675 * closest MTU, then we'll return that rather than the closest MTU.
3676 */
3677unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3678 unsigned short header_size,
3679 unsigned short data_size_max,
3680 unsigned short data_size_align,
3681 unsigned int *mtu_idxp)
3682{
3683 unsigned short max_mtu = header_size + data_size_max;
3684 unsigned short data_size_align_mask = data_size_align - 1;
3685 int mtu_idx, aligned_mtu_idx;
3686
3687 /* Scan the MTU Table till we find an MTU which is larger than our
3688 * Maximum MTU or we reach the end of the table. Along the way,
3689 * record the last MTU found, if any, which will result in a Data
3690 * Segment Length matching the requested alignment.
3691 */
3692 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3693 unsigned short data_size = mtus[mtu_idx] - header_size;
3694
3695 /* If this MTU minus the Header Size would result in a
3696 * Data Segment Size of the desired alignment, remember it.
3697 */
3698 if ((data_size & data_size_align_mask) == 0)
3699 aligned_mtu_idx = mtu_idx;
3700
3701 /* If we're not at the end of the Hardware MTU Table and the
3702 * next element is larger than our Maximum MTU, drop out of
3703 * the loop.
3704 */
3705 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3706 break;
3707 }
3708
3709 /* If we fell out of the loop because we ran to the end of the table,
3710 * then we just have to use the last [largest] entry.
3711 */
3712 if (mtu_idx == NMTUS)
3713 mtu_idx--;
3714
3715 /* If we found an MTU which resulted in the requested Data Segment
3716 * Length alignment and that's "not far" from the largest MTU which is
3717 * less than or equal to the maximum MTU, then use that.
3718 */
3719 if (aligned_mtu_idx >= 0 &&
3720 mtu_idx - aligned_mtu_idx <= 1)
3721 mtu_idx = aligned_mtu_idx;
3722
3723 /* If the caller has passed in an MTU Index pointer, pass the
3724 * MTU Index back. Return the MTU value.
3725 */
3726 if (mtu_idxp)
3727 *mtu_idxp = mtu_idx;
3728 return mtus[mtu_idx];
3729}
3730EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3731
3732/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003733 * cxgb4_port_chan - get the HW channel of a port
3734 * @dev: the net device for the port
3735 *
3736 * Return the HW Tx channel of the given port.
3737 */
3738unsigned int cxgb4_port_chan(const struct net_device *dev)
3739{
3740 return netdev2pinfo(dev)->tx_chan;
3741}
3742EXPORT_SYMBOL(cxgb4_port_chan);
3743
Vipul Pandya881806b2012-05-18 15:29:24 +05303744unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3745{
3746 struct adapter *adap = netdev2adap(dev);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003747 u32 v1, v2, lp_count, hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303748
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003749 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3750 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303751 if (is_t4(adap->params.chip)) {
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003752 lp_count = G_LP_COUNT(v1);
3753 hp_count = G_HP_COUNT(v1);
3754 } else {
3755 lp_count = G_LP_COUNT_T5(v1);
3756 hp_count = G_HP_COUNT_T5(v2);
3757 }
3758 return lpfifo ? lp_count : hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303759}
3760EXPORT_SYMBOL(cxgb4_dbfifo_count);
3761
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003762/**
3763 * cxgb4_port_viid - get the VI id of a port
3764 * @dev: the net device for the port
3765 *
3766 * Return the VI id of the given port.
3767 */
3768unsigned int cxgb4_port_viid(const struct net_device *dev)
3769{
3770 return netdev2pinfo(dev)->viid;
3771}
3772EXPORT_SYMBOL(cxgb4_port_viid);
3773
3774/**
3775 * cxgb4_port_idx - get the index of a port
3776 * @dev: the net device for the port
3777 *
3778 * Return the index of the given port.
3779 */
3780unsigned int cxgb4_port_idx(const struct net_device *dev)
3781{
3782 return netdev2pinfo(dev)->port_id;
3783}
3784EXPORT_SYMBOL(cxgb4_port_idx);
3785
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003786void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3787 struct tp_tcp_stats *v6)
3788{
3789 struct adapter *adap = pci_get_drvdata(pdev);
3790
3791 spin_lock(&adap->stats_lock);
3792 t4_tp_get_tcp_stats(adap, v4, v6);
3793 spin_unlock(&adap->stats_lock);
3794}
3795EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3796
3797void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3798 const unsigned int *pgsz_order)
3799{
3800 struct adapter *adap = netdev2adap(dev);
3801
3802 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3803 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3804 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3805 HPZ3(pgsz_order[3]));
3806}
3807EXPORT_SYMBOL(cxgb4_iscsi_init);
3808
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303809int cxgb4_flush_eq_cache(struct net_device *dev)
3810{
3811 struct adapter *adap = netdev2adap(dev);
3812 int ret;
3813
3814 ret = t4_fwaddrspace_write(adap, adap->mbox,
3815 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3816 return ret;
3817}
3818EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3819
3820static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3821{
3822 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3823 __be64 indices;
3824 int ret;
3825
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303826 spin_lock(&adap->win0_lock);
3827 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3828 sizeof(indices), (__be32 *)&indices,
3829 T4_MEMORY_READ);
3830 spin_unlock(&adap->win0_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303831 if (!ret) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00003832 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3833 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303834 }
3835 return ret;
3836}
3837
3838int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3839 u16 size)
3840{
3841 struct adapter *adap = netdev2adap(dev);
3842 u16 hw_pidx, hw_cidx;
3843 int ret;
3844
3845 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3846 if (ret)
3847 goto out;
3848
3849 if (pidx != hw_pidx) {
3850 u16 delta;
3851
3852 if (pidx >= hw_pidx)
3853 delta = pidx - hw_pidx;
3854 else
3855 delta = size - hw_pidx + pidx;
3856 wmb();
Vipul Pandya840f3002012-09-05 02:01:55 +00003857 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3858 QID(qid) | PIDX(delta));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303859 }
3860out:
3861 return ret;
3862}
3863EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3864
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003865void cxgb4_disable_db_coalescing(struct net_device *dev)
3866{
3867 struct adapter *adap;
3868
3869 adap = netdev2adap(dev);
3870 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3871 F_NOCOALESCE);
3872}
3873EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3874
3875void cxgb4_enable_db_coalescing(struct net_device *dev)
3876{
3877 struct adapter *adap;
3878
3879 adap = netdev2adap(dev);
3880 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3881}
3882EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3883
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303884int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3885{
3886 struct adapter *adap;
3887 u32 offset, memtype, memaddr;
3888 u32 edc0_size, edc1_size, mc0_size, mc1_size;
3889 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3890 int ret;
3891
3892 adap = netdev2adap(dev);
3893
3894 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3895
3896 /* Figure out where the offset lands in the Memory Type/Address scheme.
3897 * This code assumes that the memory is laid out starting at offset 0
3898 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3899 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3900 * MC0, and some have both MC0 and MC1.
3901 */
3902 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3903 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3904 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3905
3906 edc0_end = edc0_size;
3907 edc1_end = edc0_end + edc1_size;
3908 mc0_end = edc1_end + mc0_size;
3909
3910 if (offset < edc0_end) {
3911 memtype = MEM_EDC0;
3912 memaddr = offset;
3913 } else if (offset < edc1_end) {
3914 memtype = MEM_EDC1;
3915 memaddr = offset - edc0_end;
3916 } else {
3917 if (offset < mc0_end) {
3918 memtype = MEM_MC0;
3919 memaddr = offset - edc1_end;
3920 } else if (is_t4(adap->params.chip)) {
3921 /* T4 only has a single memory channel */
3922 goto err;
3923 } else {
3924 mc1_size = EXT_MEM_SIZE_GET(
3925 t4_read_reg(adap,
3926 MA_EXT_MEMORY1_BAR)) << 20;
3927 mc1_end = mc0_end + mc1_size;
3928 if (offset < mc1_end) {
3929 memtype = MEM_MC1;
3930 memaddr = offset - mc0_end;
3931 } else {
3932 /* offset beyond the end of any memory */
3933 goto err;
3934 }
3935 }
3936 }
3937
3938 spin_lock(&adap->win0_lock);
3939 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3940 spin_unlock(&adap->win0_lock);
3941 return ret;
3942
3943err:
3944 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3945 stag, offset);
3946 return -EINVAL;
3947}
3948EXPORT_SYMBOL(cxgb4_read_tpte);
3949
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303950u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3951{
3952 u32 hi, lo;
3953 struct adapter *adap;
3954
3955 adap = netdev2adap(dev);
3956 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3957 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3958
3959 return ((u64)hi << 32) | (u64)lo;
3960}
3961EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3962
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003963static struct pci_driver cxgb4_driver;
3964
3965static void check_neigh_update(struct neighbour *neigh)
3966{
3967 const struct device *parent;
3968 const struct net_device *netdev = neigh->dev;
3969
3970 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3971 netdev = vlan_dev_real_dev(netdev);
3972 parent = netdev->dev.parent;
3973 if (parent && parent->driver == &cxgb4_driver.driver)
3974 t4_l2t_update(dev_get_drvdata(parent), neigh);
3975}
3976
3977static int netevent_cb(struct notifier_block *nb, unsigned long event,
3978 void *data)
3979{
3980 switch (event) {
3981 case NETEVENT_NEIGH_UPDATE:
3982 check_neigh_update(data);
3983 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003984 case NETEVENT_REDIRECT:
3985 default:
3986 break;
3987 }
3988 return 0;
3989}
3990
3991static bool netevent_registered;
3992static struct notifier_block cxgb4_netevent_nb = {
3993 .notifier_call = netevent_cb
3994};
3995
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303996static void drain_db_fifo(struct adapter *adap, int usecs)
3997{
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003998 u32 v1, v2, lp_count, hp_count;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303999
4000 do {
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004001 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
4002 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304003 if (is_t4(adap->params.chip)) {
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004004 lp_count = G_LP_COUNT(v1);
4005 hp_count = G_HP_COUNT(v1);
4006 } else {
4007 lp_count = G_LP_COUNT_T5(v1);
4008 hp_count = G_HP_COUNT_T5(v2);
4009 }
4010
4011 if (lp_count == 0 && hp_count == 0)
4012 break;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304013 set_current_state(TASK_UNINTERRUPTIBLE);
4014 schedule_timeout(usecs_to_jiffies(usecs));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304015 } while (1);
4016}
4017
4018static void disable_txq_db(struct sge_txq *q)
4019{
Steve Wise05eb2382014-03-14 21:52:08 +05304020 unsigned long flags;
4021
4022 spin_lock_irqsave(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304023 q->db_disabled = 1;
Steve Wise05eb2382014-03-14 21:52:08 +05304024 spin_unlock_irqrestore(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304025}
4026
Steve Wise05eb2382014-03-14 21:52:08 +05304027static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304028{
4029 spin_lock_irq(&q->db_lock);
Steve Wise05eb2382014-03-14 21:52:08 +05304030 if (q->db_pidx_inc) {
4031 /* Make sure that all writes to the TX descriptors
4032 * are committed before we tell HW about them.
4033 */
4034 wmb();
4035 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4036 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
4037 q->db_pidx_inc = 0;
4038 }
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304039 q->db_disabled = 0;
4040 spin_unlock_irq(&q->db_lock);
4041}
4042
4043static void disable_dbs(struct adapter *adap)
4044{
4045 int i;
4046
4047 for_each_ethrxq(&adap->sge, i)
4048 disable_txq_db(&adap->sge.ethtxq[i].q);
4049 for_each_ofldrxq(&adap->sge, i)
4050 disable_txq_db(&adap->sge.ofldtxq[i].q);
4051 for_each_port(adap, i)
4052 disable_txq_db(&adap->sge.ctrlq[i].q);
4053}
4054
4055static void enable_dbs(struct adapter *adap)
4056{
4057 int i;
4058
4059 for_each_ethrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05304060 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304061 for_each_ofldrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05304062 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304063 for_each_port(adap, i)
Steve Wise05eb2382014-03-14 21:52:08 +05304064 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
4065}
4066
4067static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
4068{
4069 if (adap->uld_handle[CXGB4_ULD_RDMA])
4070 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
4071 cmd);
4072}
4073
4074static void process_db_full(struct work_struct *work)
4075{
4076 struct adapter *adap;
4077
4078 adap = container_of(work, struct adapter, db_full_task);
4079
4080 drain_db_fifo(adap, dbfifo_drain_delay);
4081 enable_dbs(adap);
4082 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4083 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4084 DBFIFO_HP_INT | DBFIFO_LP_INT,
4085 DBFIFO_HP_INT | DBFIFO_LP_INT);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304086}
4087
4088static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4089{
4090 u16 hw_pidx, hw_cidx;
4091 int ret;
4092
Steve Wise05eb2382014-03-14 21:52:08 +05304093 spin_lock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304094 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4095 if (ret)
4096 goto out;
4097 if (q->db_pidx != hw_pidx) {
4098 u16 delta;
4099
4100 if (q->db_pidx >= hw_pidx)
4101 delta = q->db_pidx - hw_pidx;
4102 else
4103 delta = q->size - hw_pidx + q->db_pidx;
4104 wmb();
Vipul Pandya840f3002012-09-05 02:01:55 +00004105 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4106 QID(q->cntxt_id) | PIDX(delta));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304107 }
4108out:
4109 q->db_disabled = 0;
Steve Wise05eb2382014-03-14 21:52:08 +05304110 q->db_pidx_inc = 0;
4111 spin_unlock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304112 if (ret)
4113 CH_WARN(adap, "DB drop recovery failed.\n");
4114}
4115static void recover_all_queues(struct adapter *adap)
4116{
4117 int i;
4118
4119 for_each_ethrxq(&adap->sge, i)
4120 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4121 for_each_ofldrxq(&adap->sge, i)
4122 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4123 for_each_port(adap, i)
4124 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4125}
4126
Vipul Pandya881806b2012-05-18 15:29:24 +05304127static void process_db_drop(struct work_struct *work)
4128{
4129 struct adapter *adap;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304130
Vipul Pandya881806b2012-05-18 15:29:24 +05304131 adap = container_of(work, struct adapter, db_drop_task);
4132
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304133 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05304134 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004135 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
Steve Wise05eb2382014-03-14 21:52:08 +05304136 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004137 recover_all_queues(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05304138 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004139 enable_dbs(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05304140 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004141 } else {
4142 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4143 u16 qid = (dropped_db >> 15) & 0x1ffff;
4144 u16 pidx_inc = dropped_db & 0x1fff;
4145 unsigned int s_qpp;
4146 unsigned short udb_density;
4147 unsigned long qpshift;
4148 int page;
4149 u32 udb;
4150
4151 dev_warn(adap->pdev_dev,
4152 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4153 dropped_db, qid,
4154 (dropped_db >> 14) & 1,
4155 (dropped_db >> 13) & 1,
4156 pidx_inc);
4157
4158 drain_db_fifo(adap, 1);
4159
4160 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4161 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4162 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4163 qpshift = PAGE_SHIFT - ilog2(udb_density);
4164 udb = qid << qpshift;
4165 udb &= PAGE_MASK;
4166 page = udb / PAGE_SIZE;
4167 udb += (qid - (page * udb_density)) * 128;
4168
4169 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4170
4171 /* Re-enable BAR2 WC */
4172 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4173 }
4174
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304175 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
Vipul Pandya881806b2012-05-18 15:29:24 +05304176}
4177
4178void t4_db_full(struct adapter *adap)
4179{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304180 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05304181 disable_dbs(adap);
4182 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004183 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4184 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
Anish Bhatt29aaee62014-08-20 13:44:06 -07004185 queue_work(adap->workq, &adap->db_full_task);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004186 }
Vipul Pandya881806b2012-05-18 15:29:24 +05304187}
4188
4189void t4_db_dropped(struct adapter *adap)
4190{
Steve Wise05eb2382014-03-14 21:52:08 +05304191 if (is_t4(adap->params.chip)) {
4192 disable_dbs(adap);
4193 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4194 }
Anish Bhatt29aaee62014-08-20 13:44:06 -07004195 queue_work(adap->workq, &adap->db_drop_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05304196}
4197
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004198static void uld_attach(struct adapter *adap, unsigned int uld)
4199{
4200 void *handle;
4201 struct cxgb4_lld_info lli;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004202 unsigned short i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004203
4204 lli.pdev = adap->pdev;
Hariprasad Shenai35b1de52014-06-27 19:23:47 +05304205 lli.pf = adap->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004206 lli.l2t = adap->l2t;
4207 lli.tids = &adap->tids;
4208 lli.ports = adap->port;
4209 lli.vr = &adap->vres;
4210 lli.mtus = adap->params.mtus;
4211 if (uld == CXGB4_ULD_RDMA) {
4212 lli.rxq_ids = adap->sge.rdma_rxq;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05304213 lli.ciq_ids = adap->sge.rdma_ciq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004214 lli.nrxq = adap->sge.rdmaqs;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05304215 lli.nciq = adap->sge.rdmaciqs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004216 } else if (uld == CXGB4_ULD_ISCSI) {
4217 lli.rxq_ids = adap->sge.ofld_rxq;
4218 lli.nrxq = adap->sge.ofldqsets;
4219 }
4220 lli.ntxq = adap->sge.ofldqsets;
4221 lli.nchan = adap->params.nports;
4222 lli.nports = adap->params.nports;
4223 lli.wr_cred = adap->params.ofldq_wr_cred;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304224 lli.adapter_type = adap->params.chip;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004225 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05304226 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004227 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004228 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4229 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004230 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004231 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4232 (adap->fn * 4));
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05304233 lli.filt_mode = adap->params.tp.vlan_pri_map;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004234 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4235 for (i = 0; i < NCHAN; i++)
4236 lli.tx_modq[i] = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004237 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4238 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4239 lli.fw_vers = adap->params.fw_vers;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304240 lli.dbfifo_int_thresh = dbfifo_int_thresh;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +05304241 lli.sge_ingpadboundary = adap->sge.fl_align;
4242 lli.sge_egrstatuspagesize = adap->sge.stat_len;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004243 lli.sge_pktshift = adap->sge.pktshift;
4244 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05304245 lli.max_ordird_qp = adap->params.max_ordird_qp;
4246 lli.max_ird_adapter = adap->params.max_ird_adapter;
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05304247 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004248
4249 handle = ulds[uld].add(&lli);
4250 if (IS_ERR(handle)) {
4251 dev_warn(adap->pdev_dev,
4252 "could not attach to the %s driver, error %ld\n",
4253 uld_str[uld], PTR_ERR(handle));
4254 return;
4255 }
4256
4257 adap->uld_handle[uld] = handle;
4258
4259 if (!netevent_registered) {
4260 register_netevent_notifier(&cxgb4_netevent_nb);
4261 netevent_registered = true;
4262 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00004263
4264 if (adap->flags & FULL_INIT_DONE)
4265 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004266}
4267
4268static void attach_ulds(struct adapter *adap)
4269{
4270 unsigned int i;
4271
Vipul Pandya01bcca62013-07-04 16:10:46 +05304272 spin_lock(&adap_rcu_lock);
4273 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4274 spin_unlock(&adap_rcu_lock);
4275
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004276 mutex_lock(&uld_mutex);
4277 list_add_tail(&adap->list_node, &adapter_list);
4278 for (i = 0; i < CXGB4_ULD_MAX; i++)
4279 if (ulds[i].add)
4280 uld_attach(adap, i);
4281 mutex_unlock(&uld_mutex);
4282}
4283
4284static void detach_ulds(struct adapter *adap)
4285{
4286 unsigned int i;
4287
4288 mutex_lock(&uld_mutex);
4289 list_del(&adap->list_node);
4290 for (i = 0; i < CXGB4_ULD_MAX; i++)
4291 if (adap->uld_handle[i]) {
4292 ulds[i].state_change(adap->uld_handle[i],
4293 CXGB4_STATE_DETACH);
4294 adap->uld_handle[i] = NULL;
4295 }
4296 if (netevent_registered && list_empty(&adapter_list)) {
4297 unregister_netevent_notifier(&cxgb4_netevent_nb);
4298 netevent_registered = false;
4299 }
4300 mutex_unlock(&uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304301
4302 spin_lock(&adap_rcu_lock);
4303 list_del_rcu(&adap->rcu_node);
4304 spin_unlock(&adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004305}
4306
4307static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4308{
4309 unsigned int i;
4310
4311 mutex_lock(&uld_mutex);
4312 for (i = 0; i < CXGB4_ULD_MAX; i++)
4313 if (adap->uld_handle[i])
4314 ulds[i].state_change(adap->uld_handle[i], new_state);
4315 mutex_unlock(&uld_mutex);
4316}
4317
4318/**
4319 * cxgb4_register_uld - register an upper-layer driver
4320 * @type: the ULD type
4321 * @p: the ULD methods
4322 *
4323 * Registers an upper-layer driver with this driver and notifies the ULD
4324 * about any presently available devices that support its type. Returns
4325 * %-EBUSY if a ULD of the same type is already registered.
4326 */
4327int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4328{
4329 int ret = 0;
4330 struct adapter *adap;
4331
4332 if (type >= CXGB4_ULD_MAX)
4333 return -EINVAL;
4334 mutex_lock(&uld_mutex);
4335 if (ulds[type].add) {
4336 ret = -EBUSY;
4337 goto out;
4338 }
4339 ulds[type] = *p;
4340 list_for_each_entry(adap, &adapter_list, list_node)
4341 uld_attach(adap, type);
4342out: mutex_unlock(&uld_mutex);
4343 return ret;
4344}
4345EXPORT_SYMBOL(cxgb4_register_uld);
4346
4347/**
4348 * cxgb4_unregister_uld - unregister an upper-layer driver
4349 * @type: the ULD type
4350 *
4351 * Unregisters an existing upper-layer driver.
4352 */
4353int cxgb4_unregister_uld(enum cxgb4_uld type)
4354{
4355 struct adapter *adap;
4356
4357 if (type >= CXGB4_ULD_MAX)
4358 return -EINVAL;
4359 mutex_lock(&uld_mutex);
4360 list_for_each_entry(adap, &adapter_list, list_node)
4361 adap->uld_handle[type] = NULL;
4362 ulds[type].add = NULL;
4363 mutex_unlock(&uld_mutex);
4364 return 0;
4365}
4366EXPORT_SYMBOL(cxgb4_unregister_uld);
4367
Vipul Pandya01bcca62013-07-04 16:10:46 +05304368/* Check if netdev on which event is occured belongs to us or not. Return
Li RongQingee9a33b2014-06-20 17:32:36 +08004369 * success (true) if it belongs otherwise failure (false).
4370 * Called with rcu_read_lock() held.
Vipul Pandya01bcca62013-07-04 16:10:46 +05304371 */
Anish Bhatt1bb60372014-10-14 20:07:22 -07004372#if IS_ENABLED(CONFIG_IPV6)
Li RongQingee9a33b2014-06-20 17:32:36 +08004373static bool cxgb4_netdev(const struct net_device *netdev)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304374{
4375 struct adapter *adap;
4376 int i;
4377
Vipul Pandya01bcca62013-07-04 16:10:46 +05304378 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4379 for (i = 0; i < MAX_NPORTS; i++)
Li RongQingee9a33b2014-06-20 17:32:36 +08004380 if (adap->port[i] == netdev)
4381 return true;
4382 return false;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304383}
4384
4385static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4386 unsigned long event)
4387{
4388 int ret = NOTIFY_DONE;
4389
4390 rcu_read_lock();
4391 if (cxgb4_netdev(event_dev)) {
4392 switch (event) {
4393 case NETDEV_UP:
4394 ret = cxgb4_clip_get(event_dev,
4395 (const struct in6_addr *)ifa->addr.s6_addr);
4396 if (ret < 0) {
4397 rcu_read_unlock();
4398 return ret;
4399 }
4400 ret = NOTIFY_OK;
4401 break;
4402 case NETDEV_DOWN:
4403 cxgb4_clip_release(event_dev,
4404 (const struct in6_addr *)ifa->addr.s6_addr);
4405 ret = NOTIFY_OK;
4406 break;
4407 default:
4408 break;
4409 }
4410 }
4411 rcu_read_unlock();
4412 return ret;
4413}
4414
4415static int cxgb4_inet6addr_handler(struct notifier_block *this,
4416 unsigned long event, void *data)
4417{
4418 struct inet6_ifaddr *ifa = data;
4419 struct net_device *event_dev;
4420 int ret = NOTIFY_DONE;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304421 struct bonding *bond = netdev_priv(ifa->idev->dev);
Veaceslav Falico9caff1e72013-09-25 09:20:14 +02004422 struct list_head *iter;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304423 struct slave *slave;
4424 struct pci_dev *first_pdev = NULL;
4425
4426 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4427 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4428 ret = clip_add(event_dev, ifa, event);
4429 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4430 /* It is possible that two different adapters are bonded in one
4431 * bond. We need to find such different adapters and add clip
4432 * in all of them only once.
4433 */
Veaceslav Falico9caff1e72013-09-25 09:20:14 +02004434 bond_for_each_slave(bond, slave, iter) {
Vipul Pandya01bcca62013-07-04 16:10:46 +05304435 if (!first_pdev) {
4436 ret = clip_add(slave->dev, ifa, event);
4437 /* If clip_add is success then only initialize
4438 * first_pdev since it means it is our device
4439 */
4440 if (ret == NOTIFY_OK)
4441 first_pdev = to_pci_dev(
4442 slave->dev->dev.parent);
4443 } else if (first_pdev !=
4444 to_pci_dev(slave->dev->dev.parent))
4445 ret = clip_add(slave->dev, ifa, event);
4446 }
Vipul Pandya01bcca62013-07-04 16:10:46 +05304447 } else
4448 ret = clip_add(ifa->idev->dev, ifa, event);
4449
4450 return ret;
4451}
4452
4453static struct notifier_block cxgb4_inet6addr_notifier = {
4454 .notifier_call = cxgb4_inet6addr_handler
4455};
4456
4457/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4458 * a physical device.
4459 * The physical device reference is needed to send the actul CLIP command.
4460 */
4461static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4462{
4463 struct inet6_dev *idev = NULL;
4464 struct inet6_ifaddr *ifa;
4465 int ret = 0;
4466
4467 idev = __in6_dev_get(root_dev);
4468 if (!idev)
4469 return ret;
4470
4471 read_lock_bh(&idev->lock);
4472 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4473 ret = cxgb4_clip_get(dev,
4474 (const struct in6_addr *)ifa->addr.s6_addr);
4475 if (ret < 0)
4476 break;
4477 }
4478 read_unlock_bh(&idev->lock);
4479
4480 return ret;
4481}
4482
4483static int update_root_dev_clip(struct net_device *dev)
4484{
4485 struct net_device *root_dev = NULL;
4486 int i, ret = 0;
4487
4488 /* First populate the real net device's IPv6 addresses */
4489 ret = update_dev_clip(dev, dev);
4490 if (ret)
4491 return ret;
4492
4493 /* Parse all bond and vlan devices layered on top of the physical dev */
Anish Bhatt587ddfe2014-10-14 20:07:21 -07004494 root_dev = netdev_master_upper_dev_get_rcu(dev);
4495 if (root_dev) {
4496 ret = update_dev_clip(root_dev, dev);
4497 if (ret)
4498 return ret;
4499 }
4500
Vipul Pandya01bcca62013-07-04 16:10:46 +05304501 for (i = 0; i < VLAN_N_VID; i++) {
dingtianhongf06c7f9f2014-05-09 14:58:05 +08004502 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304503 if (!root_dev)
4504 continue;
4505
4506 ret = update_dev_clip(root_dev, dev);
4507 if (ret)
4508 break;
4509 }
4510 return ret;
4511}
4512
4513static void update_clip(const struct adapter *adap)
4514{
4515 int i;
4516 struct net_device *dev;
4517 int ret;
4518
4519 rcu_read_lock();
4520
4521 for (i = 0; i < MAX_NPORTS; i++) {
4522 dev = adap->port[i];
4523 ret = 0;
4524
4525 if (dev)
4526 ret = update_root_dev_clip(dev);
4527
4528 if (ret < 0)
4529 break;
4530 }
4531 rcu_read_unlock();
4532}
Anish Bhatt1bb60372014-10-14 20:07:22 -07004533#endif /* IS_ENABLED(CONFIG_IPV6) */
Vipul Pandya01bcca62013-07-04 16:10:46 +05304534
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004535/**
4536 * cxgb_up - enable the adapter
4537 * @adap: adapter being enabled
4538 *
4539 * Called when the first port is enabled, this function performs the
4540 * actions necessary to make an adapter operational, such as completing
4541 * the initialization of HW modules, and enabling interrupts.
4542 *
4543 * Must be called with the rtnl lock held.
4544 */
4545static int cxgb_up(struct adapter *adap)
4546{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004547 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004548
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004549 err = setup_sge_queues(adap);
4550 if (err)
4551 goto out;
4552 err = setup_rss(adap);
4553 if (err)
4554 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004555
4556 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004557 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004558 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4559 adap->msix_info[0].desc, adap);
4560 if (err)
4561 goto irq_err;
4562
4563 err = request_msix_queue_irqs(adap);
4564 if (err) {
4565 free_irq(adap->msix_info[0].vec, adap);
4566 goto irq_err;
4567 }
4568 } else {
4569 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4570 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00004571 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004572 if (err)
4573 goto irq_err;
4574 }
4575 enable_rx(adap);
4576 t4_sge_start(adap);
4577 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004578 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004579 notify_ulds(adap, CXGB4_STATE_UP);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004580#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304581 update_clip(adap);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004582#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004583 out:
4584 return err;
4585 irq_err:
4586 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004587 freeq:
4588 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004589 goto out;
4590}
4591
4592static void cxgb_down(struct adapter *adapter)
4593{
4594 t4_intr_disable(adapter);
4595 cancel_work_sync(&adapter->tid_release_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05304596 cancel_work_sync(&adapter->db_full_task);
4597 cancel_work_sync(&adapter->db_drop_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004598 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004599 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004600
4601 if (adapter->flags & USING_MSIX) {
4602 free_msix_queue_irqs(adapter);
4603 free_irq(adapter->msix_info[0].vec, adapter);
4604 } else
4605 free_irq(adapter->pdev->irq, adapter);
4606 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004607 t4_sge_stop(adapter);
4608 t4_free_sge_resources(adapter);
4609 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004610}
4611
4612/*
4613 * net_device operations
4614 */
4615static int cxgb_open(struct net_device *dev)
4616{
4617 int err;
4618 struct port_info *pi = netdev_priv(dev);
4619 struct adapter *adapter = pi->adapter;
4620
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00004621 netif_carrier_off(dev);
4622
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004623 if (!(adapter->flags & FULL_INIT_DONE)) {
4624 err = cxgb_up(adapter);
4625 if (err < 0)
4626 return err;
4627 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004628
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00004629 err = link_start(dev);
4630 if (!err)
4631 netif_tx_start_all_queues(dev);
4632 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004633}
4634
4635static int cxgb_close(struct net_device *dev)
4636{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004637 struct port_info *pi = netdev_priv(dev);
4638 struct adapter *adapter = pi->adapter;
4639
4640 netif_tx_stop_all_queues(dev);
4641 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004642 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004643}
4644
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004645/* Return an error number if the indicated filter isn't writable ...
4646 */
4647static int writable_filter(struct filter_entry *f)
4648{
4649 if (f->locked)
4650 return -EPERM;
4651 if (f->pending)
4652 return -EBUSY;
4653
4654 return 0;
4655}
4656
4657/* Delete the filter at the specified index (if valid). The checks for all
4658 * the common problems with doing this like the filter being locked, currently
4659 * pending in another operation, etc.
4660 */
4661static int delete_filter(struct adapter *adapter, unsigned int fidx)
4662{
4663 struct filter_entry *f;
4664 int ret;
4665
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004666 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004667 return -EINVAL;
4668
4669 f = &adapter->tids.ftid_tab[fidx];
4670 ret = writable_filter(f);
4671 if (ret)
4672 return ret;
4673 if (f->valid)
4674 return del_filter_wr(adapter, fidx);
4675
4676 return 0;
4677}
4678
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004679int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00004680 __be32 sip, __be16 sport, __be16 vlan,
4681 unsigned int queue, unsigned char port, unsigned char mask)
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004682{
4683 int ret;
4684 struct filter_entry *f;
4685 struct adapter *adap;
4686 int i;
4687 u8 *val;
4688
4689 adap = netdev2adap(dev);
4690
Vipul Pandya1cab7752012-12-10 09:30:55 +00004691 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304692 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004693 stid += adap->tids.nftids;
4694
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004695 /* Check to make sure the filter requested is writable ...
4696 */
4697 f = &adap->tids.ftid_tab[stid];
4698 ret = writable_filter(f);
4699 if (ret)
4700 return ret;
4701
4702 /* Clear out any old resources being used by the filter before
4703 * we start constructing the new filter.
4704 */
4705 if (f->valid)
4706 clear_filter(adap, f);
4707
4708 /* Clear out filter specifications */
4709 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4710 f->fs.val.lport = cpu_to_be16(sport);
4711 f->fs.mask.lport = ~0;
4712 val = (u8 *)&sip;
Vipul Pandya793dad92012-12-10 09:30:56 +00004713 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004714 for (i = 0; i < 4; i++) {
4715 f->fs.val.lip[i] = val[i];
4716 f->fs.mask.lip[i] = ~0;
4717 }
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05304718 if (adap->params.tp.vlan_pri_map & F_PORT) {
Vipul Pandya793dad92012-12-10 09:30:56 +00004719 f->fs.val.iport = port;
4720 f->fs.mask.iport = mask;
4721 }
4722 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004723
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05304724 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
Kumar Sanghvi7c89e552013-12-18 16:38:20 +05304725 f->fs.val.proto = IPPROTO_TCP;
4726 f->fs.mask.proto = ~0;
4727 }
4728
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004729 f->fs.dirsteer = 1;
4730 f->fs.iq = queue;
4731 /* Mark filter as locked */
4732 f->locked = 1;
4733 f->fs.rpttid = 1;
4734
4735 ret = set_filter_wr(adap, stid);
4736 if (ret) {
4737 clear_filter(adap, f);
4738 return ret;
4739 }
4740
4741 return 0;
4742}
4743EXPORT_SYMBOL(cxgb4_create_server_filter);
4744
4745int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4746 unsigned int queue, bool ipv6)
4747{
4748 int ret;
4749 struct filter_entry *f;
4750 struct adapter *adap;
4751
4752 adap = netdev2adap(dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00004753
4754 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304755 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004756 stid += adap->tids.nftids;
4757
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004758 f = &adap->tids.ftid_tab[stid];
4759 /* Unlock the filter */
4760 f->locked = 0;
4761
4762 ret = delete_filter(adap, stid);
4763 if (ret)
4764 return ret;
4765
4766 return 0;
4767}
4768EXPORT_SYMBOL(cxgb4_remove_server_filter);
4769
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00004770static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4771 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004772{
4773 struct port_stats stats;
4774 struct port_info *p = netdev_priv(dev);
4775 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004776
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004777 /* Block retrieving statistics during EEH error
4778 * recovery. Otherwise, the recovery might fail
4779 * and the PCI device will be removed permanently
4780 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004781 spin_lock(&adapter->stats_lock);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004782 if (!netif_device_present(dev)) {
4783 spin_unlock(&adapter->stats_lock);
4784 return ns;
4785 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004786 t4_get_port_stats(adapter, p->tx_chan, &stats);
4787 spin_unlock(&adapter->stats_lock);
4788
4789 ns->tx_bytes = stats.tx_octets;
4790 ns->tx_packets = stats.tx_frames;
4791 ns->rx_bytes = stats.rx_octets;
4792 ns->rx_packets = stats.rx_frames;
4793 ns->multicast = stats.rx_mcast_frames;
4794
4795 /* detailed rx_errors */
4796 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4797 stats.rx_runt;
4798 ns->rx_over_errors = 0;
4799 ns->rx_crc_errors = stats.rx_fcs_err;
4800 ns->rx_frame_errors = stats.rx_symbol_err;
4801 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4802 stats.rx_ovflow2 + stats.rx_ovflow3 +
4803 stats.rx_trunc0 + stats.rx_trunc1 +
4804 stats.rx_trunc2 + stats.rx_trunc3;
4805 ns->rx_missed_errors = 0;
4806
4807 /* detailed tx_errors */
4808 ns->tx_aborted_errors = 0;
4809 ns->tx_carrier_errors = 0;
4810 ns->tx_fifo_errors = 0;
4811 ns->tx_heartbeat_errors = 0;
4812 ns->tx_window_errors = 0;
4813
4814 ns->tx_errors = stats.tx_error_frames;
4815 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4816 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4817 return ns;
4818}
4819
4820static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4821{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004822 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004823 int ret = 0, prtad, devad;
4824 struct port_info *pi = netdev_priv(dev);
4825 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4826
4827 switch (cmd) {
4828 case SIOCGMIIPHY:
4829 if (pi->mdio_addr < 0)
4830 return -EOPNOTSUPP;
4831 data->phy_id = pi->mdio_addr;
4832 break;
4833 case SIOCGMIIREG:
4834 case SIOCSMIIREG:
4835 if (mdio_phy_id_is_c45(data->phy_id)) {
4836 prtad = mdio_phy_id_prtad(data->phy_id);
4837 devad = mdio_phy_id_devad(data->phy_id);
4838 } else if (data->phy_id < 32) {
4839 prtad = data->phy_id;
4840 devad = 0;
4841 data->reg_num &= 0x1f;
4842 } else
4843 return -EINVAL;
4844
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004845 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004846 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004847 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004848 data->reg_num, &data->val_out);
4849 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004850 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004851 data->reg_num, data->val_in);
4852 break;
4853 default:
4854 return -EOPNOTSUPP;
4855 }
4856 return ret;
4857}
4858
4859static void cxgb_set_rxmode(struct net_device *dev)
4860{
4861 /* unfortunately we can't return errors to the stack */
4862 set_rxmode(dev, -1, false);
4863}
4864
4865static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4866{
4867 int ret;
4868 struct port_info *pi = netdev_priv(dev);
4869
4870 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4871 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004872 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4873 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004874 if (!ret)
4875 dev->mtu = new_mtu;
4876 return ret;
4877}
4878
4879static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4880{
4881 int ret;
4882 struct sockaddr *addr = p;
4883 struct port_info *pi = netdev_priv(dev);
4884
4885 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00004886 return -EADDRNOTAVAIL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004887
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004888 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4889 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004890 if (ret < 0)
4891 return ret;
4892
4893 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4894 pi->xact_addr_filt = ret;
4895 return 0;
4896}
4897
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004898#ifdef CONFIG_NET_POLL_CONTROLLER
4899static void cxgb_netpoll(struct net_device *dev)
4900{
4901 struct port_info *pi = netdev_priv(dev);
4902 struct adapter *adap = pi->adapter;
4903
4904 if (adap->flags & USING_MSIX) {
4905 int i;
4906 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4907
4908 for (i = pi->nqsets; i; i--, rx++)
4909 t4_sge_intr_msix(0, &rx->rspq);
4910 } else
4911 t4_intr_handler(adap)(0, adap);
4912}
4913#endif
4914
4915static const struct net_device_ops cxgb4_netdev_ops = {
4916 .ndo_open = cxgb_open,
4917 .ndo_stop = cxgb_close,
4918 .ndo_start_xmit = t4_eth_xmit,
Anish Bhatt688848b2014-06-19 21:37:13 -07004919 .ndo_select_queue = cxgb_select_queue,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00004920 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004921 .ndo_set_rx_mode = cxgb_set_rxmode,
4922 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00004923 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004924 .ndo_validate_addr = eth_validate_addr,
4925 .ndo_do_ioctl = cxgb_ioctl,
4926 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004927#ifdef CONFIG_NET_POLL_CONTROLLER
4928 .ndo_poll_controller = cxgb_netpoll,
4929#endif
4930};
4931
4932void t4_fatal_err(struct adapter *adap)
4933{
4934 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4935 t4_intr_disable(adap);
4936 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4937}
4938
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304939/* Return the specified PCI-E Configuration Space register from our Physical
4940 * Function. We try first via a Firmware LDST Command since we prefer to let
4941 * the firmware own all of these registers, but if that fails we go for it
4942 * directly ourselves.
4943 */
4944static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4945{
4946 struct fw_ldst_cmd ldst_cmd;
4947 u32 val;
4948 int ret;
4949
4950 /* Construct and send the Firmware LDST Command to retrieve the
4951 * specified PCI-E Configuration Space register.
4952 */
4953 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4954 ldst_cmd.op_to_addrspace =
4955 htonl(FW_CMD_OP(FW_LDST_CMD) |
4956 FW_CMD_REQUEST |
4957 FW_CMD_READ |
4958 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4959 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4960 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4961 ldst_cmd.u.pcie.ctrl_to_fn =
4962 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4963 ldst_cmd.u.pcie.r = reg;
4964 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4965 &ldst_cmd);
4966
4967 /* If the LDST Command suucceeded, exctract the returned register
4968 * value. Otherwise read it directly ourself.
4969 */
4970 if (ret == 0)
4971 val = ntohl(ldst_cmd.u.pcie.data[0]);
4972 else
4973 t4_hw_pci_read_cfg4(adap, reg, &val);
4974
4975 return val;
4976}
4977
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004978static void setup_memwin(struct adapter *adap)
4979{
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304980 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004981
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304982 if (is_t4(adap->params.chip)) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304983 u32 bar0;
4984
4985 /* Truncation intentional: we only read the bottom 32-bits of
4986 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4987 * mechanism to read BAR0 instead of using
4988 * pci_resource_start() because we could be operating from
4989 * within a Virtual Machine which is trapping our accesses to
4990 * our Configuration Space and we need to set up the PCI-E
4991 * Memory Window decoders with the actual addresses which will
4992 * be coming across the PCI-E link.
4993 */
4994 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4995 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4996 adap->t4_bar0 = bar0;
4997
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004998 mem_win0_base = bar0 + MEMWIN0_BASE;
4999 mem_win1_base = bar0 + MEMWIN1_BASE;
5000 mem_win2_base = bar0 + MEMWIN2_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05305001 mem_win2_aperture = MEMWIN2_APERTURE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00005002 } else {
5003 /* For T5, only relative offset inside the PCIe BAR is passed */
5004 mem_win0_base = MEMWIN0_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05305005 mem_win1_base = MEMWIN1_BASE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00005006 mem_win2_base = MEMWIN2_BASE_T5;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05305007 mem_win2_aperture = MEMWIN2_APERTURE_T5;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00005008 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005009 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00005010 mem_win0_base | BIR(0) |
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005011 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
5012 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00005013 mem_win1_base | BIR(0) |
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005014 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
5015 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00005016 mem_win2_base | BIR(0) |
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05305017 WINDOW(ilog2(mem_win2_aperture) - 10));
5018 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005019}
5020
5021static void setup_memwin_rdma(struct adapter *adap)
5022{
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005023 if (adap->vres.ocq.size) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05305024 u32 start;
5025 unsigned int sz_kb;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005026
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05305027 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
5028 start &= PCI_BASE_ADDRESS_MEM_MASK;
5029 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005030 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
5031 t4_write_reg(adap,
5032 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
5033 start | BIR(1) | WINDOW(ilog2(sz_kb)));
5034 t4_write_reg(adap,
5035 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
5036 adap->vres.ocq.start);
5037 t4_read_reg(adap,
5038 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
5039 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005040}
5041
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005042static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
5043{
5044 u32 v;
5045 int ret;
5046
5047 /* get device capabilities */
5048 memset(c, 0, sizeof(*c));
5049 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5050 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305051 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005052 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005053 if (ret < 0)
5054 return ret;
5055
5056 /* select capabilities we'll be using */
5057 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5058 if (!vf_acls)
5059 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5060 else
5061 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5062 } else if (vf_acls) {
5063 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
5064 return ret;
5065 }
5066 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5067 FW_CMD_REQUEST | FW_CMD_WRITE);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005068 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005069 if (ret < 0)
5070 return ret;
5071
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005072 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005073 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5074 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5075 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
5076 if (ret < 0)
5077 return ret;
5078
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005079 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
5080 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005081 if (ret < 0)
5082 return ret;
5083
5084 t4_sge_init(adap);
5085
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005086 /* tweak some settings */
5087 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5088 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5089 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5090 v = t4_read_reg(adap, TP_PIO_DATA);
5091 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005092
Vipul Pandyadca4fae2012-12-10 09:30:53 +00005093 /* first 4 Tx modulation queues point to consecutive Tx channels */
5094 adap->params.tp.tx_modq_map = 0xE4;
5095 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5096 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5097
5098 /* associate each Tx modulation queue with consecutive Tx channels */
5099 v = 0x84218421;
5100 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5101 &v, 1, A_TP_TX_SCHED_HDR);
5102 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5103 &v, 1, A_TP_TX_SCHED_FIFO);
5104 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5105 &v, 1, A_TP_TX_SCHED_PCMD);
5106
5107#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5108 if (is_offload(adap)) {
5109 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5110 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5111 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5112 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5113 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5114 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5115 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5116 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5117 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5118 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5119 }
5120
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005121 /* get basic stuff going */
5122 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005123}
5124
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005125/*
5126 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5127 */
5128#define MAX_ATIDS 8192U
5129
5130/*
5131 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005132 *
5133 * If the firmware we're dealing with has Configuration File support, then
5134 * we use that to perform all configuration
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005135 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00005136
5137/*
5138 * Tweak configuration based on module parameters, etc. Most of these have
5139 * defaults assigned to them by Firmware Configuration Files (if we're using
5140 * them) but need to be explicitly set if we're using hard-coded
5141 * initialization. But even in the case of using Firmware Configuration
5142 * Files, we'd like to expose the ability to change these via module
5143 * parameters so these are essentially common tweaks/settings for
5144 * Configuration Files and hard-coded initialization ...
5145 */
5146static int adap_init0_tweaks(struct adapter *adapter)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005147{
Vipul Pandya636f9d32012-09-26 02:39:39 +00005148 /*
5149 * Fix up various Host-Dependent Parameters like Page Size, Cache
5150 * Line Size, etc. The firmware default is for a 4KB Page Size and
5151 * 64B Cache Line Size ...
5152 */
5153 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005154
Vipul Pandya636f9d32012-09-26 02:39:39 +00005155 /*
5156 * Process module parameters which affect early initialization.
5157 */
5158 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5159 dev_err(&adapter->pdev->dev,
5160 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5161 rx_dma_offset);
5162 rx_dma_offset = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005163 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005164 t4_set_reg_field(adapter, SGE_CONTROL,
5165 PKTSHIFT_MASK,
5166 PKTSHIFT(rx_dma_offset));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005167
Vipul Pandya636f9d32012-09-26 02:39:39 +00005168 /*
5169 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5170 * adds the pseudo header itself.
5171 */
5172 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5173 CSUM_HAS_PSEUDO_HDR, 0);
5174
5175 return 0;
5176}
5177
5178/*
5179 * Attempt to initialize the adapter via a Firmware Configuration File.
5180 */
5181static int adap_init0_config(struct adapter *adapter, int reset)
5182{
5183 struct fw_caps_config_cmd caps_cmd;
5184 const struct firmware *cf;
5185 unsigned long mtype = 0, maddr = 0;
5186 u32 finiver, finicsum, cfcsum;
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305187 int ret;
5188 int config_issued = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005189 char *fw_config_file, fw_config_file_path[256];
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305190 char *config_name = NULL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005191
5192 /*
5193 * Reset device if necessary.
5194 */
5195 if (reset) {
5196 ret = t4_fw_reset(adapter, adapter->mbox,
5197 PIORSTMODE | PIORST);
5198 if (ret < 0)
5199 goto bye;
5200 }
5201
5202 /*
5203 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5204 * then use that. Otherwise, use the configuration file stored
5205 * in the adapter flash ...
5206 */
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05305207 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005208 case CHELSIO_T4:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305209 fw_config_file = FW4_CFNAME;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005210 break;
5211 case CHELSIO_T5:
5212 fw_config_file = FW5_CFNAME;
5213 break;
5214 default:
5215 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5216 adapter->pdev->device);
5217 ret = -EINVAL;
5218 goto bye;
5219 }
5220
5221 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005222 if (ret < 0) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305223 config_name = "On FLASH";
Vipul Pandya636f9d32012-09-26 02:39:39 +00005224 mtype = FW_MEMTYPE_CF_FLASH;
5225 maddr = t4_flash_cfg_addr(adapter);
5226 } else {
5227 u32 params[7], val[7];
5228
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305229 sprintf(fw_config_file_path,
5230 "/lib/firmware/%s", fw_config_file);
5231 config_name = fw_config_file_path;
5232
Vipul Pandya636f9d32012-09-26 02:39:39 +00005233 if (cf->size >= FLASH_CFG_MAX_SIZE)
5234 ret = -ENOMEM;
5235 else {
5236 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5237 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5238 ret = t4_query_params(adapter, adapter->mbox,
5239 adapter->fn, 0, 1, params, val);
5240 if (ret == 0) {
5241 /*
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305242 * For t4_memory_rw() below addresses and
Vipul Pandya636f9d32012-09-26 02:39:39 +00005243 * sizes have to be in terms of multiples of 4
5244 * bytes. So, if the Configuration File isn't
5245 * a multiple of 4 bytes in length we'll have
5246 * to write that out separately since we can't
5247 * guarantee that the bytes following the
5248 * residual byte in the buffer returned by
5249 * request_firmware() are zeroed out ...
5250 */
5251 size_t resid = cf->size & 0x3;
5252 size_t size = cf->size & ~0x3;
5253 __be32 *data = (__be32 *)cf->data;
5254
5255 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5256 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5257
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305258 spin_lock(&adapter->win0_lock);
5259 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5260 size, data, T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005261 if (ret == 0 && resid != 0) {
5262 union {
5263 __be32 word;
5264 char buf[4];
5265 } last;
5266 int i;
5267
5268 last.word = data[size >> 2];
5269 for (i = resid; i < 4; i++)
5270 last.buf[i] = 0;
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305271 ret = t4_memory_rw(adapter, 0, mtype,
5272 maddr + size,
5273 4, &last.word,
5274 T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005275 }
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305276 spin_unlock(&adapter->win0_lock);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005277 }
5278 }
5279
5280 release_firmware(cf);
5281 if (ret)
5282 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005283 }
5284
Vipul Pandya636f9d32012-09-26 02:39:39 +00005285 /*
5286 * Issue a Capability Configuration command to the firmware to get it
5287 * to parse the Configuration File. We don't use t4_fw_config_file()
5288 * because we want the ability to modify various features after we've
5289 * processed the configuration file ...
5290 */
5291 memset(&caps_cmd, 0, sizeof(caps_cmd));
5292 caps_cmd.op_to_write =
5293 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5294 FW_CMD_REQUEST |
5295 FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305296 caps_cmd.cfvalid_to_len16 =
Vipul Pandya636f9d32012-09-26 02:39:39 +00005297 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5298 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5299 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5300 FW_LEN16(caps_cmd));
5301 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5302 &caps_cmd);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305303
5304 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5305 * Configuration File in FLASH), our last gasp effort is to use the
5306 * Firmware Configuration File which is embedded in the firmware. A
5307 * very few early versions of the firmware didn't have one embedded
5308 * but we can ignore those.
5309 */
5310 if (ret == -ENOENT) {
5311 memset(&caps_cmd, 0, sizeof(caps_cmd));
5312 caps_cmd.op_to_write =
5313 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5314 FW_CMD_REQUEST |
5315 FW_CMD_READ);
5316 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5317 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5318 sizeof(caps_cmd), &caps_cmd);
5319 config_name = "Firmware Default";
5320 }
5321
5322 config_issued = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005323 if (ret < 0)
5324 goto bye;
5325
Vipul Pandya636f9d32012-09-26 02:39:39 +00005326 finiver = ntohl(caps_cmd.finiver);
5327 finicsum = ntohl(caps_cmd.finicsum);
5328 cfcsum = ntohl(caps_cmd.cfcsum);
5329 if (finicsum != cfcsum)
5330 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5331 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5332 finicsum, cfcsum);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005333
Vipul Pandya636f9d32012-09-26 02:39:39 +00005334 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005335 * And now tell the firmware to use the configuration we just loaded.
5336 */
5337 caps_cmd.op_to_write =
5338 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5339 FW_CMD_REQUEST |
5340 FW_CMD_WRITE);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305341 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005342 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5343 NULL);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005344 if (ret < 0)
5345 goto bye;
5346
Vipul Pandya636f9d32012-09-26 02:39:39 +00005347 /*
5348 * Tweak configuration based on system architecture, module
5349 * parameters, etc.
5350 */
5351 ret = adap_init0_tweaks(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005352 if (ret < 0)
5353 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005354
Vipul Pandya636f9d32012-09-26 02:39:39 +00005355 /*
5356 * And finally tell the firmware to initialize itself using the
5357 * parameters from the Configuration File.
5358 */
5359 ret = t4_fw_initialize(adapter, adapter->mbox);
5360 if (ret < 0)
5361 goto bye;
5362
5363 /*
5364 * Return successfully and note that we're operating with parameters
5365 * not supplied by the driver, rather than from hard-wired
5366 * initialization constants burried in the driver.
5367 */
5368 adapter->flags |= USING_SOFT_PARAMS;
5369 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305370 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5371 config_name, finiver, cfcsum);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005372 return 0;
5373
5374 /*
5375 * Something bad happened. Return the error ... (If the "error"
5376 * is that there's no Configuration File on the adapter we don't
5377 * want to issue a warning since this is fairly common.)
5378 */
5379bye:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305380 if (config_issued && ret != -ENOENT)
5381 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5382 config_name, -ret);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005383 return ret;
5384}
5385
5386/*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005387 * Attempt to initialize the adapter via hard-coded, driver supplied
5388 * parameters ...
5389 */
5390static int adap_init0_no_config(struct adapter *adapter, int reset)
5391{
5392 struct sge *s = &adapter->sge;
5393 struct fw_caps_config_cmd caps_cmd;
5394 u32 v;
5395 int i, ret;
5396
5397 /*
5398 * Reset device if necessary
5399 */
5400 if (reset) {
5401 ret = t4_fw_reset(adapter, adapter->mbox,
5402 PIORSTMODE | PIORST);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005403 if (ret < 0)
5404 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005405 }
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005406
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005407 /*
5408 * Get device capabilities and select which we'll be using.
5409 */
5410 memset(&caps_cmd, 0, sizeof(caps_cmd));
5411 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5412 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305413 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005414 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5415 &caps_cmd);
5416 if (ret < 0)
5417 goto bye;
5418
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005419 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5420 if (!vf_acls)
5421 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5422 else
5423 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5424 } else if (vf_acls) {
5425 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5426 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005427 }
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005428 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5429 FW_CMD_REQUEST | FW_CMD_WRITE);
5430 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5431 NULL);
5432 if (ret < 0)
5433 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005434
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005435 /*
5436 * Tweak configuration based on system architecture, module
5437 * parameters, etc.
5438 */
5439 ret = adap_init0_tweaks(adapter);
5440 if (ret < 0)
5441 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005442
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005443 /*
5444 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5445 * mode which maps each Virtual Interface to its own section of
5446 * the RSS Table and we turn on all map and hash enables ...
5447 */
5448 adapter->flags |= RSS_TNLALLLOOKUP;
5449 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5450 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5451 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5452 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5453 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5454 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5455 if (ret < 0)
5456 goto bye;
5457
5458 /*
5459 * Set up our own fundamental resource provisioning ...
5460 */
5461 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5462 PFRES_NEQ, PFRES_NETHCTRL,
5463 PFRES_NIQFLINT, PFRES_NIQ,
5464 PFRES_TC, PFRES_NVI,
5465 FW_PFVF_CMD_CMASK_MASK,
5466 pfvfres_pmask(adapter, adapter->fn, 0),
5467 PFRES_NEXACTF,
5468 PFRES_R_CAPS, PFRES_WX_CAPS);
5469 if (ret < 0)
5470 goto bye;
5471
5472 /*
5473 * Perform low level SGE initialization. We need to do this before we
5474 * send the firmware the INITIALIZE command because that will cause
5475 * any other PF Drivers which are waiting for the Master
5476 * Initialization to proceed forward.
5477 */
5478 for (i = 0; i < SGE_NTIMERS - 1; i++)
5479 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5480 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5481 s->counter_val[0] = 1;
5482 for (i = 1; i < SGE_NCOUNTERS; i++)
5483 s->counter_val[i] = min(intr_cnt[i - 1],
5484 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5485 t4_sge_init(adapter);
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005486
5487#ifdef CONFIG_PCI_IOV
5488 /*
5489 * Provision resource limits for Virtual Functions. We currently
5490 * grant them all the same static resource limits except for the Port
5491 * Access Rights Mask which we're assigning based on the PF. All of
5492 * the static provisioning stuff for both the PF and VF really needs
5493 * to be managed in a persistent manner for each device which the
5494 * firmware controls.
5495 */
5496 {
5497 int pf, vf;
5498
Santosh Rastapur7d6727c2013-03-14 05:08:56 +00005499 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005500 if (num_vf[pf] <= 0)
5501 continue;
5502
5503 /* VF numbering starts at 1! */
5504 for (vf = 1; vf <= num_vf[pf]; vf++) {
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005505 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5506 pf, vf,
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005507 VFRES_NEQ, VFRES_NETHCTRL,
5508 VFRES_NIQFLINT, VFRES_NIQ,
5509 VFRES_TC, VFRES_NVI,
Vipul Pandya1f1e4952013-01-09 07:42:49 +00005510 FW_PFVF_CMD_CMASK_MASK,
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005511 pfvfres_pmask(
5512 adapter, pf, vf),
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005513 VFRES_NEXACTF,
5514 VFRES_R_CAPS, VFRES_WX_CAPS);
5515 if (ret < 0)
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005516 dev_warn(adapter->pdev_dev,
5517 "failed to "\
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005518 "provision pf/vf=%d/%d; "
5519 "err=%d\n", pf, vf, ret);
5520 }
5521 }
5522 }
5523#endif
5524
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005525 /*
5526 * Set up the default filter mode. Later we'll want to implement this
5527 * via a firmware command, etc. ... This needs to be done before the
5528 * firmare initialization command ... If the selected set of fields
5529 * isn't equal to the default value, we'll need to make sure that the
5530 * field selections will fit in the 36-bit budget.
5531 */
5532 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00005533 int j, bits = 0;
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005534
Vipul Pandya404d9e32012-10-08 02:59:43 +00005535 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5536 switch (tp_vlan_pri_map & (1 << j)) {
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005537 case 0:
5538 /* compressed filter field not enabled */
5539 break;
5540 case FCOE_MASK:
5541 bits += 1;
5542 break;
5543 case PORT_MASK:
5544 bits += 3;
5545 break;
5546 case VNIC_ID_MASK:
5547 bits += 17;
5548 break;
5549 case VLAN_MASK:
5550 bits += 17;
5551 break;
5552 case TOS_MASK:
5553 bits += 8;
5554 break;
5555 case PROTOCOL_MASK:
5556 bits += 8;
5557 break;
5558 case ETHERTYPE_MASK:
5559 bits += 16;
5560 break;
5561 case MACMATCH_MASK:
5562 bits += 9;
5563 break;
5564 case MPSHITTYPE_MASK:
5565 bits += 3;
5566 break;
5567 case FRAGMENTATION_MASK:
5568 bits += 1;
5569 break;
5570 }
5571
5572 if (bits > 36) {
5573 dev_err(adapter->pdev_dev,
5574 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5575 " using %#x\n", tp_vlan_pri_map, bits,
5576 TP_VLAN_PRI_MAP_DEFAULT);
5577 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5578 }
5579 }
5580 v = tp_vlan_pri_map;
5581 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5582 &v, 1, TP_VLAN_PRI_MAP);
5583
5584 /*
5585 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5586 * to support any of the compressed filter fields above. Newer
5587 * versions of the firmware do this automatically but it doesn't hurt
5588 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5589 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5590 * since the firmware automatically turns this on and off when we have
5591 * a non-zero number of filters active (since it does have a
5592 * performance impact).
5593 */
5594 if (tp_vlan_pri_map)
5595 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5596 FIVETUPLELOOKUP_MASK,
5597 FIVETUPLELOOKUP_MASK);
5598
5599 /*
5600 * Tweak some settings.
5601 */
5602 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5603 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5604 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5605 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5606
5607 /*
5608 * Get basic stuff going by issuing the Firmware Initialize command.
5609 * Note that this _must_ be after all PFVF commands ...
5610 */
5611 ret = t4_fw_initialize(adapter, adapter->mbox);
5612 if (ret < 0)
5613 goto bye;
5614
5615 /*
5616 * Return successfully!
5617 */
5618 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5619 "driver parameters\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005620 return 0;
5621
5622 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005623 * Something bad happened. Return the error ...
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005624 */
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005625bye:
5626 return ret;
5627}
5628
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305629static struct fw_info fw_info_array[] = {
5630 {
5631 .chip = CHELSIO_T4,
5632 .fs_name = FW4_CFNAME,
5633 .fw_mod_name = FW4_FNAME,
5634 .fw_hdr = {
5635 .chip = FW_HDR_CHIP_T4,
5636 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5637 .intfver_nic = FW_INTFVER(T4, NIC),
5638 .intfver_vnic = FW_INTFVER(T4, VNIC),
5639 .intfver_ri = FW_INTFVER(T4, RI),
5640 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5641 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5642 },
5643 }, {
5644 .chip = CHELSIO_T5,
5645 .fs_name = FW5_CFNAME,
5646 .fw_mod_name = FW5_FNAME,
5647 .fw_hdr = {
5648 .chip = FW_HDR_CHIP_T5,
5649 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5650 .intfver_nic = FW_INTFVER(T5, NIC),
5651 .intfver_vnic = FW_INTFVER(T5, VNIC),
5652 .intfver_ri = FW_INTFVER(T5, RI),
5653 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5654 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5655 },
5656 }
5657};
5658
5659static struct fw_info *find_fw_info(int chip)
5660{
5661 int i;
5662
5663 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5664 if (fw_info_array[i].chip == chip)
5665 return &fw_info_array[i];
5666 }
5667 return NULL;
5668}
5669
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005670/*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005671 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005672 */
5673static int adap_init0(struct adapter *adap)
5674{
5675 int ret;
5676 u32 v, port_vec;
5677 enum dev_state state;
5678 u32 params[7], val[7];
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005679 struct fw_caps_config_cmd caps_cmd;
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05305680 int reset = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005681
Vipul Pandya636f9d32012-09-26 02:39:39 +00005682 /*
5683 * Contact FW, advertising Master capability (and potentially forcing
5684 * ourselves as the Master PF if our module parameter force_init is
5685 * set).
5686 */
5687 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5688 force_init ? MASTER_MUST : MASTER_MAY,
5689 &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005690 if (ret < 0) {
5691 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5692 ret);
5693 return ret;
5694 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005695 if (ret == adap->mbox)
5696 adap->flags |= MASTER_PF;
5697 if (force_init && state == DEV_STATE_INIT)
5698 state = DEV_STATE_UNINIT;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005699
Vipul Pandya636f9d32012-09-26 02:39:39 +00005700 /*
5701 * If we're the Master PF Driver and the device is uninitialized,
5702 * then let's consider upgrading the firmware ... (We always want
5703 * to check the firmware version number in order to A. get it for
5704 * later reporting and B. to warn if the currently loaded firmware
5705 * is excessively mismatched relative to the driver.)
5706 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305707 t4_get_fw_version(adap, &adap->params.fw_vers);
5708 t4_get_tp_version(adap, &adap->params.tp_vers);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005709 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305710 struct fw_info *fw_info;
5711 struct fw_hdr *card_fw;
5712 const struct firmware *fw;
5713 const u8 *fw_data = NULL;
5714 unsigned int fw_size = 0;
5715
5716 /* This is the firmware whose headers the driver was compiled
5717 * against
5718 */
5719 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5720 if (fw_info == NULL) {
5721 dev_err(adap->pdev_dev,
5722 "unable to get firmware info for chip %d.\n",
5723 CHELSIO_CHIP_VERSION(adap->params.chip));
5724 return -EINVAL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005725 }
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305726
5727 /* allocate memory to read the header of the firmware on the
5728 * card
5729 */
5730 card_fw = t4_alloc_mem(sizeof(*card_fw));
5731
5732 /* Get FW from from /lib/firmware/ */
5733 ret = request_firmware(&fw, fw_info->fw_mod_name,
5734 adap->pdev_dev);
5735 if (ret < 0) {
5736 dev_err(adap->pdev_dev,
5737 "unable to load firmware image %s, error %d\n",
5738 fw_info->fw_mod_name, ret);
5739 } else {
5740 fw_data = fw->data;
5741 fw_size = fw->size;
5742 }
5743
5744 /* upgrade FW logic */
5745 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5746 state, &reset);
5747
5748 /* Cleaning up */
5749 if (fw != NULL)
5750 release_firmware(fw);
5751 t4_free_mem(card_fw);
5752
Vipul Pandya636f9d32012-09-26 02:39:39 +00005753 if (ret < 0)
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305754 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005755 }
5756
5757 /*
5758 * Grab VPD parameters. This should be done after we establish a
5759 * connection to the firmware since some of the VPD parameters
5760 * (notably the Core Clock frequency) are retrieved via requests to
5761 * the firmware. On the other hand, we need these fairly early on
5762 * so we do this right after getting ahold of the firmware.
5763 */
5764 ret = get_vpd_params(adap, &adap->params.vpd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005765 if (ret < 0)
5766 goto bye;
5767
Vipul Pandya636f9d32012-09-26 02:39:39 +00005768 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005769 * Find out what ports are available to us. Note that we need to do
5770 * this before calling adap_init0_no_config() since it needs nports
5771 * and portvec ...
Vipul Pandya636f9d32012-09-26 02:39:39 +00005772 */
5773 v =
5774 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5775 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5776 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5777 if (ret < 0)
5778 goto bye;
5779
5780 adap->params.nports = hweight32(port_vec);
5781 adap->params.portvec = port_vec;
5782
5783 /*
5784 * If the firmware is initialized already (and we're not forcing a
5785 * master initialization), note that we're living with existing
5786 * adapter parameters. Otherwise, it's time to try initializing the
5787 * adapter ...
5788 */
5789 if (state == DEV_STATE_INIT) {
5790 dev_info(adap->pdev_dev, "Coming up as %s: "\
5791 "Adapter already initialized\n",
5792 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5793 adap->flags |= USING_SOFT_PARAMS;
5794 } else {
5795 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5796 "Initializing adapter\n");
Vipul Pandya636f9d32012-09-26 02:39:39 +00005797
5798 /*
5799 * If the firmware doesn't support Configuration
5800 * Files warn user and exit,
5801 */
5802 if (ret < 0)
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005803 dev_warn(adap->pdev_dev, "Firmware doesn't support "
Vipul Pandya636f9d32012-09-26 02:39:39 +00005804 "configuration file.\n");
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005805 if (force_old_init)
5806 ret = adap_init0_no_config(adap, reset);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005807 else {
5808 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005809 * Find out whether we're dealing with a version of
5810 * the firmware which has configuration file support.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005811 */
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005812 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5813 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5814 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5815 params, val);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005816
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005817 /*
5818 * If the firmware doesn't support Configuration
5819 * Files, use the old Driver-based, hard-wired
5820 * initialization. Otherwise, try using the
5821 * Configuration File support and fall back to the
5822 * Driver-based initialization if there's no
5823 * Configuration File found.
5824 */
5825 if (ret < 0)
5826 ret = adap_init0_no_config(adap, reset);
5827 else {
5828 /*
5829 * The firmware provides us with a memory
5830 * buffer where we can load a Configuration
5831 * File from the host if we want to override
5832 * the Configuration File in flash.
5833 */
5834
5835 ret = adap_init0_config(adap, reset);
5836 if (ret == -ENOENT) {
5837 dev_info(adap->pdev_dev,
5838 "No Configuration File present "
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305839 "on adapter. Using hard-wired "
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005840 "configuration parameters.\n");
5841 ret = adap_init0_no_config(adap, reset);
5842 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005843 }
5844 }
5845 if (ret < 0) {
5846 dev_err(adap->pdev_dev,
5847 "could not initialize adapter, error %d\n",
5848 -ret);
5849 goto bye;
5850 }
5851 }
5852
5853 /*
5854 * If we're living with non-hard-coded parameters (either from a
5855 * Firmware Configuration File or values programmed by a different PF
5856 * Driver), give the SGE code a chance to pull in anything that it
5857 * needs ... Note that this must be called after we retrieve our VPD
5858 * parameters in order to know how to convert core ticks to seconds.
5859 */
5860 if (adap->flags & USING_SOFT_PARAMS) {
5861 ret = t4_sge_init(adap);
5862 if (ret < 0)
5863 goto bye;
5864 }
5865
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005866 if (is_bypass_device(adap->pdev->device))
5867 adap->params.bypass = 1;
5868
Vipul Pandya636f9d32012-09-26 02:39:39 +00005869 /*
5870 * Grab some of our basic fundamental operating parameters.
5871 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005872#define FW_PARAM_DEV(param) \
5873 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
Vipul Pandya636f9d32012-09-26 02:39:39 +00005874 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005875
5876#define FW_PARAM_PFVF(param) \
Vipul Pandya636f9d32012-09-26 02:39:39 +00005877 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5878 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5879 FW_PARAMS_PARAM_Y(0) | \
5880 FW_PARAMS_PARAM_Z(0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005881
Vipul Pandya636f9d32012-09-26 02:39:39 +00005882 params[0] = FW_PARAM_PFVF(EQ_START);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005883 params[1] = FW_PARAM_PFVF(L2T_START);
5884 params[2] = FW_PARAM_PFVF(L2T_END);
5885 params[3] = FW_PARAM_PFVF(FILTER_START);
5886 params[4] = FW_PARAM_PFVF(FILTER_END);
5887 params[5] = FW_PARAM_PFVF(IQFLINT_START);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005888 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005889 if (ret < 0)
5890 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005891 adap->sge.egr_start = val[0];
5892 adap->l2t_start = val[1];
5893 adap->l2t_end = val[2];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005894 adap->tids.ftid_base = val[3];
5895 adap->tids.nftids = val[4] - val[3] + 1;
5896 adap->sge.ingr_start = val[5];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005897
Vipul Pandya636f9d32012-09-26 02:39:39 +00005898 /* query params related to active filter region */
5899 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5900 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5901 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5902 /* If Active filter size is set we enable establishing
5903 * offload connection through firmware work request
5904 */
5905 if ((val[0] != val[1]) && (ret >= 0)) {
5906 adap->flags |= FW_OFLD_CONN;
5907 adap->tids.aftid_base = val[0];
5908 adap->tids.aftid_end = val[1];
5909 }
5910
Vipul Pandyab407a4a2013-04-29 04:04:40 +00005911 /* If we're running on newer firmware, let it know that we're
5912 * prepared to deal with encapsulated CPL messages. Older
5913 * firmware won't understand this and we'll just get
5914 * unencapsulated messages ...
5915 */
5916 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5917 val[0] = 1;
5918 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5919
Vipul Pandya636f9d32012-09-26 02:39:39 +00005920 /*
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05305921 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5922 * capability. Earlier versions of the firmware didn't have the
5923 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5924 * permission to use ULPTX MEMWRITE DSGL.
5925 */
5926 if (is_t4(adap->params.chip)) {
5927 adap->params.ulptx_memwrite_dsgl = false;
5928 } else {
5929 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5930 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5931 1, params, val);
5932 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5933 }
5934
5935 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005936 * Get device capabilities so we can determine what resources we need
5937 * to manage.
5938 */
5939 memset(&caps_cmd, 0, sizeof(caps_cmd));
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005940 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005941 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305942 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005943 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5944 &caps_cmd);
5945 if (ret < 0)
5946 goto bye;
5947
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005948 if (caps_cmd.ofldcaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005949 /* query offload-related parameters */
5950 params[0] = FW_PARAM_DEV(NTID);
5951 params[1] = FW_PARAM_PFVF(SERVER_START);
5952 params[2] = FW_PARAM_PFVF(SERVER_END);
5953 params[3] = FW_PARAM_PFVF(TDDP_START);
5954 params[4] = FW_PARAM_PFVF(TDDP_END);
5955 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005956 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5957 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005958 if (ret < 0)
5959 goto bye;
5960 adap->tids.ntids = val[0];
5961 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5962 adap->tids.stid_base = val[1];
5963 adap->tids.nstids = val[2] - val[1] + 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005964 /*
5965 * Setup server filter region. Divide the availble filter
5966 * region into two parts. Regular filters get 1/3rd and server
5967 * filters get 2/3rd part. This is only enabled if workarond
5968 * path is enabled.
5969 * 1. For regular filters.
5970 * 2. Server filter: This are special filters which are used
5971 * to redirect SYN packets to offload queue.
5972 */
5973 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5974 adap->tids.sftid_base = adap->tids.ftid_base +
5975 DIV_ROUND_UP(adap->tids.nftids, 3);
5976 adap->tids.nsftids = adap->tids.nftids -
5977 DIV_ROUND_UP(adap->tids.nftids, 3);
5978 adap->tids.nftids = adap->tids.sftid_base -
5979 adap->tids.ftid_base;
5980 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005981 adap->vres.ddp.start = val[3];
5982 adap->vres.ddp.size = val[4] - val[3] + 1;
5983 adap->params.ofldq_wr_cred = val[5];
Vipul Pandya636f9d32012-09-26 02:39:39 +00005984
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005985 adap->params.offload = 1;
5986 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005987 if (caps_cmd.rdmacaps) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005988 params[0] = FW_PARAM_PFVF(STAG_START);
5989 params[1] = FW_PARAM_PFVF(STAG_END);
5990 params[2] = FW_PARAM_PFVF(RQ_START);
5991 params[3] = FW_PARAM_PFVF(RQ_END);
5992 params[4] = FW_PARAM_PFVF(PBL_START);
5993 params[5] = FW_PARAM_PFVF(PBL_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005994 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5995 params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005996 if (ret < 0)
5997 goto bye;
5998 adap->vres.stag.start = val[0];
5999 adap->vres.stag.size = val[1] - val[0] + 1;
6000 adap->vres.rq.start = val[2];
6001 adap->vres.rq.size = val[3] - val[2] + 1;
6002 adap->vres.pbl.start = val[4];
6003 adap->vres.pbl.size = val[5] - val[4] + 1;
6004
6005 params[0] = FW_PARAM_PFVF(SQRQ_START);
6006 params[1] = FW_PARAM_PFVF(SQRQ_END);
6007 params[2] = FW_PARAM_PFVF(CQ_START);
6008 params[3] = FW_PARAM_PFVF(CQ_END);
6009 params[4] = FW_PARAM_PFVF(OCQ_START);
6010 params[5] = FW_PARAM_PFVF(OCQ_END);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05306011 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
6012 val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006013 if (ret < 0)
6014 goto bye;
6015 adap->vres.qp.start = val[0];
6016 adap->vres.qp.size = val[1] - val[0] + 1;
6017 adap->vres.cq.start = val[2];
6018 adap->vres.cq.size = val[3] - val[2] + 1;
6019 adap->vres.ocq.start = val[4];
6020 adap->vres.ocq.size = val[5] - val[4] + 1;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05306021
6022 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
6023 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05306024 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
6025 val);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05306026 if (ret < 0) {
6027 adap->params.max_ordird_qp = 8;
6028 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
6029 ret = 0;
6030 } else {
6031 adap->params.max_ordird_qp = val[0];
6032 adap->params.max_ird_adapter = val[1];
6033 }
6034 dev_info(adap->pdev_dev,
6035 "max_ordird_qp %d max_ird_adapter %d\n",
6036 adap->params.max_ordird_qp,
6037 adap->params.max_ird_adapter);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006038 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00006039 if (caps_cmd.iscsicaps) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006040 params[0] = FW_PARAM_PFVF(ISCSI_START);
6041 params[1] = FW_PARAM_PFVF(ISCSI_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00006042 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
6043 params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006044 if (ret < 0)
6045 goto bye;
6046 adap->vres.iscsi.start = val[0];
6047 adap->vres.iscsi.size = val[1] - val[0] + 1;
6048 }
6049#undef FW_PARAM_PFVF
6050#undef FW_PARAM_DEV
6051
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05306052 /* The MTU/MSS Table is initialized by now, so load their values. If
6053 * we're initializing the adapter, then we'll make any modifications
6054 * we want to the MTU/MSS Table and also initialize the congestion
6055 * parameters.
Vipul Pandya636f9d32012-09-26 02:39:39 +00006056 */
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006057 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05306058 if (state != DEV_STATE_INIT) {
6059 int i;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006060
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05306061 /* The default MTU Table contains values 1492 and 1500.
6062 * However, for TCP, it's better to have two values which are
6063 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6064 * This allows us to have a TCP Data Payload which is a
6065 * multiple of 8 regardless of what combination of TCP Options
6066 * are in use (always a multiple of 4 bytes) which is
6067 * important for performance reasons. For instance, if no
6068 * options are in use, then we have a 20-byte IP header and a
6069 * 20-byte TCP header. In this case, a 1500-byte MSS would
6070 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6071 * which is not a multiple of 8. So using an MSS of 1488 in
6072 * this case results in a TCP Data Payload of 1448 bytes which
6073 * is a multiple of 8. On the other hand, if 12-byte TCP Time
6074 * Stamps have been negotiated, then an MTU of 1500 bytes
6075 * results in a TCP Data Payload of 1448 bytes which, as
6076 * above, is a multiple of 8 bytes ...
6077 */
6078 for (i = 0; i < NMTUS; i++)
6079 if (adap->params.mtus[i] == 1492) {
6080 adap->params.mtus[i] = 1488;
6081 break;
6082 }
6083
6084 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6085 adap->params.b_wnd);
6086 }
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05306087 t4_init_tp_params(adap);
Vipul Pandya636f9d32012-09-26 02:39:39 +00006088 adap->flags |= FW_OK;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006089 return 0;
6090
6091 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00006092 * Something bad happened. If a command timed out or failed with EIO
6093 * FW does not operate within its spec or something catastrophic
6094 * happened to HW/FW, stop issuing commands.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006095 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00006096bye:
6097 if (ret != -ETIMEDOUT && ret != -EIO)
6098 t4_fw_bye(adap, adap->mbox);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006099 return ret;
6100}
6101
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006102/* EEH callbacks */
6103
6104static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6105 pci_channel_state_t state)
6106{
6107 int i;
6108 struct adapter *adap = pci_get_drvdata(pdev);
6109
6110 if (!adap)
6111 goto out;
6112
6113 rtnl_lock();
6114 adap->flags &= ~FW_OK;
6115 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08006116 spin_lock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006117 for_each_port(adap, i) {
6118 struct net_device *dev = adap->port[i];
6119
6120 netif_device_detach(dev);
6121 netif_carrier_off(dev);
6122 }
Gavin Shan9fe6cb52014-01-23 12:27:35 +08006123 spin_unlock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006124 if (adap->flags & FULL_INIT_DONE)
6125 cxgb_down(adap);
6126 rtnl_unlock();
Gavin Shan144be3d2014-01-23 12:27:34 +08006127 if ((adap->flags & DEV_ENABLED)) {
6128 pci_disable_device(pdev);
6129 adap->flags &= ~DEV_ENABLED;
6130 }
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006131out: return state == pci_channel_io_perm_failure ?
6132 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6133}
6134
6135static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6136{
6137 int i, ret;
6138 struct fw_caps_config_cmd c;
6139 struct adapter *adap = pci_get_drvdata(pdev);
6140
6141 if (!adap) {
6142 pci_restore_state(pdev);
6143 pci_save_state(pdev);
6144 return PCI_ERS_RESULT_RECOVERED;
6145 }
6146
Gavin Shan144be3d2014-01-23 12:27:34 +08006147 if (!(adap->flags & DEV_ENABLED)) {
6148 if (pci_enable_device(pdev)) {
6149 dev_err(&pdev->dev, "Cannot reenable PCI "
6150 "device after reset\n");
6151 return PCI_ERS_RESULT_DISCONNECT;
6152 }
6153 adap->flags |= DEV_ENABLED;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006154 }
6155
6156 pci_set_master(pdev);
6157 pci_restore_state(pdev);
6158 pci_save_state(pdev);
6159 pci_cleanup_aer_uncorrect_error_status(pdev);
6160
Hariprasad Shenai8203b502014-10-09 05:48:47 +05306161 if (t4_wait_dev_ready(adap->regs) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006162 return PCI_ERS_RESULT_DISCONNECT;
Thadeu Lima de Souza Cascardo777c2302013-05-03 08:11:04 +00006163 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006164 return PCI_ERS_RESULT_DISCONNECT;
6165 adap->flags |= FW_OK;
6166 if (adap_init1(adap, &c))
6167 return PCI_ERS_RESULT_DISCONNECT;
6168
6169 for_each_port(adap, i) {
6170 struct port_info *p = adap2pinfo(adap, i);
6171
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006172 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6173 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006174 if (ret < 0)
6175 return PCI_ERS_RESULT_DISCONNECT;
6176 p->viid = ret;
6177 p->xact_addr_filt = -1;
6178 }
6179
6180 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6181 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00006182 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006183 if (cxgb_up(adap))
6184 return PCI_ERS_RESULT_DISCONNECT;
6185 return PCI_ERS_RESULT_RECOVERED;
6186}
6187
6188static void eeh_resume(struct pci_dev *pdev)
6189{
6190 int i;
6191 struct adapter *adap = pci_get_drvdata(pdev);
6192
6193 if (!adap)
6194 return;
6195
6196 rtnl_lock();
6197 for_each_port(adap, i) {
6198 struct net_device *dev = adap->port[i];
6199
6200 if (netif_running(dev)) {
6201 link_start(dev);
6202 cxgb_set_rxmode(dev);
6203 }
6204 netif_device_attach(dev);
6205 }
6206 rtnl_unlock();
6207}
6208
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006209static const struct pci_error_handlers cxgb4_eeh = {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006210 .error_detected = eeh_err_detected,
6211 .slot_reset = eeh_slot_reset,
6212 .resume = eeh_resume,
6213};
6214
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05306215static inline bool is_x_10g_port(const struct link_config *lc)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006216{
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05306217 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6218 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006219}
6220
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306221static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6222 unsigned int us, unsigned int cnt,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006223 unsigned int size, unsigned int iqe_size)
6224{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306225 q->adap = adap;
6226 set_rspq_intr_params(q, us, cnt);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006227 q->iqe_len = iqe_size;
6228 q->size = size;
6229}
6230
6231/*
6232 * Perform default configuration of DMA queues depending on the number and type
6233 * of ports we found and the number of available CPUs. Most settings can be
6234 * modified by the admin prior to actual use.
6235 */
Bill Pemberton91744942012-12-03 09:23:02 -05006236static void cfg_queues(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006237{
6238 struct sge *s = &adap->sge;
Anish Bhatt688848b2014-06-19 21:37:13 -07006239 int i, n10g = 0, qidx = 0;
6240#ifndef CONFIG_CHELSIO_T4_DCB
6241 int q10g = 0;
6242#endif
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306243 int ciq_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006244
6245 for_each_port(adap, i)
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05306246 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
Anish Bhatt688848b2014-06-19 21:37:13 -07006247#ifdef CONFIG_CHELSIO_T4_DCB
6248 /* For Data Center Bridging support we need to be able to support up
6249 * to 8 Traffic Priorities; each of which will be assigned to its
6250 * own TX Queue in order to prevent Head-Of-Line Blocking.
6251 */
6252 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6253 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6254 MAX_ETH_QSETS, adap->params.nports * 8);
6255 BUG_ON(1);
6256 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006257
Anish Bhatt688848b2014-06-19 21:37:13 -07006258 for_each_port(adap, i) {
6259 struct port_info *pi = adap2pinfo(adap, i);
6260
6261 pi->first_qset = qidx;
6262 pi->nqsets = 8;
6263 qidx += pi->nqsets;
6264 }
6265#else /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006266 /*
6267 * We default to 1 queue per non-10G port and up to # of cores queues
6268 * per 10G port.
6269 */
6270 if (n10g)
6271 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
Yuval Mintz5952dde2012-07-01 03:18:55 +00006272 if (q10g > netif_get_num_default_rss_queues())
6273 q10g = netif_get_num_default_rss_queues();
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006274
6275 for_each_port(adap, i) {
6276 struct port_info *pi = adap2pinfo(adap, i);
6277
6278 pi->first_qset = qidx;
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05306279 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006280 qidx += pi->nqsets;
6281 }
Anish Bhatt688848b2014-06-19 21:37:13 -07006282#endif /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006283
6284 s->ethqsets = qidx;
6285 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6286
6287 if (is_offload(adap)) {
6288 /*
6289 * For offload we use 1 queue/channel if all ports are up to 1G,
6290 * otherwise we divide all available queues amongst the channels
6291 * capped by the number of available cores.
6292 */
6293 if (n10g) {
6294 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6295 num_online_cpus());
6296 s->ofldqsets = roundup(i, adap->params.nports);
6297 } else
6298 s->ofldqsets = adap->params.nports;
6299 /* For RDMA one Rx queue per channel suffices */
6300 s->rdmaqs = adap->params.nports;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306301 s->rdmaciqs = adap->params.nports;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006302 }
6303
6304 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6305 struct sge_eth_rxq *r = &s->ethrxq[i];
6306
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306307 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006308 r->fl.size = 72;
6309 }
6310
6311 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6312 s->ethtxq[i].q.size = 1024;
6313
6314 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6315 s->ctrlq[i].q.size = 512;
6316
6317 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6318 s->ofldtxq[i].q.size = 1024;
6319
6320 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6321 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6322
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306323 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006324 r->rspq.uld = CXGB4_ULD_ISCSI;
6325 r->fl.size = 72;
6326 }
6327
6328 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6329 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6330
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306331 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006332 r->rspq.uld = CXGB4_ULD_RDMA;
6333 r->fl.size = 72;
6334 }
6335
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306336 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6337 if (ciq_size > SGE_MAX_IQ_SIZE) {
6338 CH_WARN(adap, "CIQ size too small for available IQs\n");
6339 ciq_size = SGE_MAX_IQ_SIZE;
6340 }
6341
6342 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6343 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6344
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306345 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306346 r->rspq.uld = CXGB4_ULD_RDMA;
6347 }
6348
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306349 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6350 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006351}
6352
6353/*
6354 * Reduce the number of Ethernet queues across all ports to at most n.
6355 * n provides at least one queue per port.
6356 */
Bill Pemberton91744942012-12-03 09:23:02 -05006357static void reduce_ethqs(struct adapter *adap, int n)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006358{
6359 int i;
6360 struct port_info *pi;
6361
6362 while (n < adap->sge.ethqsets)
6363 for_each_port(adap, i) {
6364 pi = adap2pinfo(adap, i);
6365 if (pi->nqsets > 1) {
6366 pi->nqsets--;
6367 adap->sge.ethqsets--;
6368 if (adap->sge.ethqsets <= n)
6369 break;
6370 }
6371 }
6372
6373 n = 0;
6374 for_each_port(adap, i) {
6375 pi = adap2pinfo(adap, i);
6376 pi->first_qset = n;
6377 n += pi->nqsets;
6378 }
6379}
6380
6381/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6382#define EXTRA_VECS 2
6383
Bill Pemberton91744942012-12-03 09:23:02 -05006384static int enable_msix(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006385{
6386 int ofld_need = 0;
Alexander Gordeevc32ad222014-02-18 11:07:59 +01006387 int i, want, need;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006388 struct sge *s = &adap->sge;
6389 unsigned int nchan = adap->params.nports;
6390 struct msix_entry entries[MAX_INGQ + 1];
6391
6392 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6393 entries[i].entry = i;
6394
6395 want = s->max_ethqsets + EXTRA_VECS;
6396 if (is_offload(adap)) {
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306397 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006398 /* need nchan for each possible ULD */
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306399 ofld_need = 3 * nchan;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006400 }
Anish Bhatt688848b2014-06-19 21:37:13 -07006401#ifdef CONFIG_CHELSIO_T4_DCB
6402 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6403 * each port.
6404 */
6405 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6406#else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006407 need = adap->params.nports + EXTRA_VECS + ofld_need;
Anish Bhatt688848b2014-06-19 21:37:13 -07006408#endif
Alexander Gordeevc32ad222014-02-18 11:07:59 +01006409 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6410 if (want < 0)
6411 return want;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006412
Alexander Gordeevc32ad222014-02-18 11:07:59 +01006413 /*
6414 * Distribute available vectors to the various queue groups.
6415 * Every group gets its minimum requirement and NIC gets top
6416 * priority for leftovers.
6417 */
6418 i = want - EXTRA_VECS - ofld_need;
6419 if (i < s->max_ethqsets) {
6420 s->max_ethqsets = i;
6421 if (i < s->ethqsets)
6422 reduce_ethqs(adap, i);
6423 }
6424 if (is_offload(adap)) {
6425 i = want - EXTRA_VECS - s->max_ethqsets;
6426 i -= ofld_need - nchan;
6427 s->ofldqsets = (i / nchan) * nchan; /* round down */
6428 }
6429 for (i = 0; i < want; ++i)
6430 adap->msix_info[i].vec = entries[i].vector;
6431
6432 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006433}
6434
6435#undef EXTRA_VECS
6436
Bill Pemberton91744942012-12-03 09:23:02 -05006437static int init_rss(struct adapter *adap)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006438{
6439 unsigned int i, j;
6440
6441 for_each_port(adap, i) {
6442 struct port_info *pi = adap2pinfo(adap, i);
6443
6444 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6445 if (!pi->rss)
6446 return -ENOMEM;
6447 for (j = 0; j < pi->rss_size; j++)
Ben Hutchings278bc422011-12-15 13:56:49 +00006448 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006449 }
6450 return 0;
6451}
6452
Bill Pemberton91744942012-12-03 09:23:02 -05006453static void print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006454{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006455 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006456 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00006457 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006458 const struct port_info *pi = netdev_priv(dev);
6459 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00006460
6461 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6462 spd = " 2.5 GT/s";
6463 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6464 spd = " 5 GT/s";
Roland Dreierd2e752d2014-04-28 17:36:20 -07006465 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6466 spd = " 8 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006467
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006468 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6469 bufp += sprintf(bufp, "100/");
6470 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6471 bufp += sprintf(bufp, "1000/");
6472 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6473 bufp += sprintf(bufp, "10G/");
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05306474 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6475 bufp += sprintf(bufp, "40G/");
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006476 if (bufp != buf)
6477 --bufp;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05306478 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006479
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006480 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Santosh Rastapur0a57a532013-03-14 05:08:49 +00006481 adap->params.vpd.id,
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306482 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006483 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6484 (adap->flags & USING_MSIX) ? " MSI-X" :
6485 (adap->flags & USING_MSI) ? " MSI" : "");
Kumar Sanghvia94cd702014-02-18 17:56:09 +05306486 netdev_info(dev, "S/N: %s, P/N: %s\n",
6487 adap->params.vpd.sn, adap->params.vpd.pn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006488}
6489
Bill Pemberton91744942012-12-03 09:23:02 -05006490static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
Dimitris Michailidisef306b52010-12-14 21:36:44 +00006491{
Jiang Liue5c8ae52012-08-20 13:53:19 -06006492 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00006493}
6494
Dimitris Michailidis06546392010-07-11 12:01:16 +00006495/*
6496 * Free the following resources:
6497 * - memory used for tables
6498 * - MSI/MSI-X
6499 * - net devices
6500 * - resources FW is holding for us
6501 */
6502static void free_some_resources(struct adapter *adapter)
6503{
6504 unsigned int i;
6505
6506 t4_free_mem(adapter->l2t);
6507 t4_free_mem(adapter->tids.tid_tab);
6508 disable_msi(adapter);
6509
6510 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006511 if (adapter->port[i]) {
6512 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00006513 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006514 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00006515 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006516 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00006517}
6518
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00006519#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00006520#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006521 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006522#define SEGMENT_SIZE 128
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006523
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00006524static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006525{
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006526 int func, i, err, s_qpp, qpp, num_seg;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006527 struct port_info *pi;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006528 bool highdma = false;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006529 struct adapter *adapter = NULL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306530 void __iomem *regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006531
6532 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6533
6534 err = pci_request_regions(pdev, KBUILD_MODNAME);
6535 if (err) {
6536 /* Just info, some other driver may have claimed the device. */
6537 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6538 return err;
6539 }
6540
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006541 err = pci_enable_device(pdev);
6542 if (err) {
6543 dev_err(&pdev->dev, "cannot enable PCI device\n");
6544 goto out_release_regions;
6545 }
6546
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306547 regs = pci_ioremap_bar(pdev, 0);
6548 if (!regs) {
6549 dev_err(&pdev->dev, "cannot map device registers\n");
6550 err = -ENOMEM;
6551 goto out_disable_device;
6552 }
6553
Hariprasad Shenai8203b502014-10-09 05:48:47 +05306554 err = t4_wait_dev_ready(regs);
6555 if (err < 0)
6556 goto out_unmap_bar0;
6557
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306558 /* We control everything through one PF */
6559 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6560 if (func != ent->driver_data) {
6561 iounmap(regs);
6562 pci_disable_device(pdev);
6563 pci_save_state(pdev); /* to restore SR-IOV later */
6564 goto sriov;
6565 }
6566
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006567 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006568 highdma = true;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006569 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6570 if (err) {
6571 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6572 "coherent allocations\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306573 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006574 }
6575 } else {
6576 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6577 if (err) {
6578 dev_err(&pdev->dev, "no usable DMA configuration\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306579 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006580 }
6581 }
6582
6583 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00006584 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006585 pci_set_master(pdev);
6586 pci_save_state(pdev);
6587
6588 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6589 if (!adapter) {
6590 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306591 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006592 }
6593
Anish Bhatt29aaee62014-08-20 13:44:06 -07006594 adapter->workq = create_singlethread_workqueue("cxgb4");
6595 if (!adapter->workq) {
6596 err = -ENOMEM;
6597 goto out_free_adapter;
6598 }
6599
Gavin Shan144be3d2014-01-23 12:27:34 +08006600 /* PCI device has been enabled */
6601 adapter->flags |= DEV_ENABLED;
6602
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306603 adapter->regs = regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006604 adapter->pdev = pdev;
6605 adapter->pdev_dev = &pdev->dev;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05306606 adapter->mbox = func;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006607 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006608 adapter->msg_enable = dflt_msg_enable;
6609 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6610
6611 spin_lock_init(&adapter->stats_lock);
6612 spin_lock_init(&adapter->tid_release_lock);
6613
6614 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
Vipul Pandya881806b2012-05-18 15:29:24 +05306615 INIT_WORK(&adapter->db_full_task, process_db_full);
6616 INIT_WORK(&adapter->db_drop_task, process_db_drop);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006617
6618 err = t4_prep_adapter(adapter);
6619 if (err)
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306620 goto out_free_adapter;
6621
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006622
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306623 if (!is_t4(adapter->params.chip)) {
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006624 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6625 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6626 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6627 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6628
6629 /* Each segment size is 128B. Write coalescing is enabled only
6630 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6631 * queue is less no of segments that can be accommodated in
6632 * a page size.
6633 */
6634 if (qpp > num_seg) {
6635 dev_err(&pdev->dev,
6636 "Incorrect number of egress queues per page\n");
6637 err = -EINVAL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306638 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006639 }
6640 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6641 pci_resource_len(pdev, 2));
6642 if (!adapter->bar2) {
6643 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6644 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306645 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006646 }
6647 }
6648
Vipul Pandya636f9d32012-09-26 02:39:39 +00006649 setup_memwin(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006650 err = adap_init0(adapter);
Vipul Pandya636f9d32012-09-26 02:39:39 +00006651 setup_memwin_rdma(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006652 if (err)
6653 goto out_unmap_bar;
6654
6655 for_each_port(adapter, i) {
6656 struct net_device *netdev;
6657
6658 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6659 MAX_ETH_QSETS);
6660 if (!netdev) {
6661 err = -ENOMEM;
6662 goto out_free_dev;
6663 }
6664
6665 SET_NETDEV_DEV(netdev, &pdev->dev);
6666
6667 adapter->port[i] = netdev;
6668 pi = netdev_priv(netdev);
6669 pi->adapter = adapter;
6670 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006671 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006672 netdev->irq = pdev->irq;
6673
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00006674 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6675 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6676 NETIF_F_RXCSUM | NETIF_F_RXHASH |
Patrick McHardyf6469682013-04-19 02:04:27 +00006677 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006678 if (highdma)
6679 netdev->hw_features |= NETIF_F_HIGHDMA;
6680 netdev->features |= netdev->hw_features;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006681 netdev->vlan_features = netdev->features & VLAN_FEAT;
6682
Jiri Pirko01789342011-08-16 06:29:00 +00006683 netdev->priv_flags |= IFF_UNICAST_FLT;
6684
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006685 netdev->netdev_ops = &cxgb4_netdev_ops;
Anish Bhatt688848b2014-06-19 21:37:13 -07006686#ifdef CONFIG_CHELSIO_T4_DCB
6687 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6688 cxgb4_dcb_state_init(netdev);
6689#endif
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00006690 netdev->ethtool_ops = &cxgb_ethtool_ops;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006691 }
6692
6693 pci_set_drvdata(pdev, adapter);
6694
6695 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006696 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006697 if (err)
6698 goto out_free_dev;
6699 }
6700
6701 /*
6702 * Configure queues and allocate tables now, they can be needed as
6703 * soon as the first register_netdev completes.
6704 */
6705 cfg_queues(adapter);
6706
6707 adapter->l2t = t4_init_l2t();
6708 if (!adapter->l2t) {
6709 /* We tolerate a lack of L2T, giving up some functionality */
6710 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6711 adapter->params.offload = 0;
6712 }
6713
6714 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6715 dev_warn(&pdev->dev, "could not allocate TID table, "
6716 "continuing\n");
6717 adapter->params.offload = 0;
6718 }
6719
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00006720 /* See what interrupts we'll be using */
6721 if (msi > 1 && enable_msix(adapter) == 0)
6722 adapter->flags |= USING_MSIX;
6723 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6724 adapter->flags |= USING_MSI;
6725
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006726 err = init_rss(adapter);
6727 if (err)
6728 goto out_free_dev;
6729
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006730 /*
6731 * The card is now ready to go. If any errors occur during device
6732 * registration we do not fail the whole card but rather proceed only
6733 * with the ports we manage to register successfully. However we must
6734 * register at least one net device.
6735 */
6736 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00006737 pi = adap2pinfo(adapter, i);
6738 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6739 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6740
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006741 err = register_netdev(adapter->port[i]);
6742 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006743 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006744 adapter->chan_map[pi->tx_chan] = i;
6745 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006746 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006747 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006748 dev_err(&pdev->dev, "could not register any net devices\n");
6749 goto out_free_dev;
6750 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006751 if (err) {
6752 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6753 err = 0;
Joe Perches6403eab2011-06-03 11:51:20 +00006754 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006755
6756 if (cxgb4_debugfs_root) {
6757 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6758 cxgb4_debugfs_root);
6759 setup_debugfs(adapter);
6760 }
6761
David S. Miller88c51002011-10-07 13:38:43 -04006762 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6763 pdev->needs_freset = 1;
6764
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006765 if (is_offload(adapter))
6766 attach_ulds(adapter);
6767
Hariprasad Shenai8e1e6052014-08-06 17:10:59 +05306768sriov:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006769#ifdef CONFIG_PCI_IOV
Santosh Rastapur7d6727c2013-03-14 05:08:56 +00006770 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006771 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6772 dev_info(&pdev->dev,
6773 "instantiated %u virtual functions\n",
6774 num_vf[func]);
6775#endif
6776 return 0;
6777
6778 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00006779 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006780 out_unmap_bar:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306781 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006782 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006783 out_free_adapter:
Anish Bhatt29aaee62014-08-20 13:44:06 -07006784 if (adapter->workq)
6785 destroy_workqueue(adapter->workq);
6786
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006787 kfree(adapter);
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306788 out_unmap_bar0:
6789 iounmap(regs);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006790 out_disable_device:
6791 pci_disable_pcie_error_reporting(pdev);
6792 pci_disable_device(pdev);
6793 out_release_regions:
6794 pci_release_regions(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006795 return err;
6796}
6797
Bill Pemberton91744942012-12-03 09:23:02 -05006798static void remove_one(struct pci_dev *pdev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006799{
6800 struct adapter *adapter = pci_get_drvdata(pdev);
6801
Vipul Pandya636f9d32012-09-26 02:39:39 +00006802#ifdef CONFIG_PCI_IOV
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006803 pci_disable_sriov(pdev);
6804
Vipul Pandya636f9d32012-09-26 02:39:39 +00006805#endif
6806
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006807 if (adapter) {
6808 int i;
6809
Anish Bhatt29aaee62014-08-20 13:44:06 -07006810 /* Tear down per-adapter Work Queue first since it can contain
6811 * references to our adapter data structure.
6812 */
6813 destroy_workqueue(adapter->workq);
6814
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006815 if (is_offload(adapter))
6816 detach_ulds(adapter);
6817
6818 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00006819 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006820 unregister_netdev(adapter->port[i]);
6821
Fabian Frederick9f16dc22014-06-27 22:51:52 +02006822 debugfs_remove_recursive(adapter->debugfs_root);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006823
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006824 /* If we allocated filters, free up state associated with any
6825 * valid filters ...
6826 */
6827 if (adapter->tids.ftid_tab) {
6828 struct filter_entry *f = &adapter->tids.ftid_tab[0];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00006829 for (i = 0; i < (adapter->tids.nftids +
6830 adapter->tids.nsftids); i++, f++)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006831 if (f->valid)
6832 clear_filter(adapter, f);
6833 }
6834
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00006835 if (adapter->flags & FULL_INIT_DONE)
6836 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006837
Dimitris Michailidis06546392010-07-11 12:01:16 +00006838 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006839 iounmap(adapter->regs);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306840 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006841 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006842 pci_disable_pcie_error_reporting(pdev);
Gavin Shan144be3d2014-01-23 12:27:34 +08006843 if ((adapter->flags & DEV_ENABLED)) {
6844 pci_disable_device(pdev);
6845 adapter->flags &= ~DEV_ENABLED;
6846 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006847 pci_release_regions(pdev);
Li RongQingee9a33b2014-06-20 17:32:36 +08006848 synchronize_rcu();
Gavin Shan8b662fe2014-01-24 17:12:03 +08006849 kfree(adapter);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00006850 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006851 pci_release_regions(pdev);
6852}
6853
6854static struct pci_driver cxgb4_driver = {
6855 .name = KBUILD_MODNAME,
6856 .id_table = cxgb4_pci_tbl,
6857 .probe = init_one,
Bill Pemberton91744942012-12-03 09:23:02 -05006858 .remove = remove_one,
Thadeu Lima de Souza Cascardo687d7052014-02-24 17:04:52 -03006859 .shutdown = remove_one,
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006860 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006861};
6862
6863static int __init cxgb4_init_module(void)
6864{
6865 int ret;
6866
6867 /* Debugfs support is optional, just warn if this fails */
6868 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6869 if (!cxgb4_debugfs_root)
Joe Perches428ac432013-01-06 13:34:49 +00006870 pr_warn("could not create debugfs entry, continuing\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006871
6872 ret = pci_register_driver(&cxgb4_driver);
Anish Bhatt29aaee62014-08-20 13:44:06 -07006873 if (ret < 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006874 debugfs_remove(cxgb4_debugfs_root);
Vipul Pandya01bcca62013-07-04 16:10:46 +05306875
Anish Bhatt1bb60372014-10-14 20:07:22 -07006876#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05306877 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
Anish Bhatt1bb60372014-10-14 20:07:22 -07006878#endif
Vipul Pandya01bcca62013-07-04 16:10:46 +05306879
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006880 return ret;
6881}
6882
6883static void __exit cxgb4_cleanup_module(void)
6884{
Anish Bhatt1bb60372014-10-14 20:07:22 -07006885#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05306886 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
Anish Bhatt1bb60372014-10-14 20:07:22 -07006887#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006888 pci_unregister_driver(&cxgb4_driver);
6889 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006890}
6891
6892module_init(cxgb4_init_module);
6893module_exit(cxgb4_cleanup_module);