blob: a9c117fdf77c5198924c9f655980ca15b74cc592 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000045#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
Vipul Pandya01bcca62013-07-04 16:10:46 +053063#include <net/addrconf.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000064#include <asm/uaccess.h>
65
66#include "cxgb4.h"
67#include "t4_regs.h"
68#include "t4_msg.h"
69#include "t4fw_api.h"
Anish Bhatt688848b2014-06-19 21:37:13 -070070#include "cxgb4_dcb.h"
Hariprasad Shenaifd88b312014-11-07 09:35:23 +053071#include "cxgb4_debugfs.h"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000072#include "l2t.h"
73
Vipul Pandya01bcca62013-07-04 16:10:46 +053074#include <../drivers/net/bonding/bonding.h>
75
76#ifdef DRV_VERSION
77#undef DRV_VERSION
78#endif
Santosh Rastapur3a7f8552013-03-14 05:08:55 +000079#define DRV_VERSION "2.0.0-ko"
80#define DRV_DESC "Chelsio T4/T5 Network Driver"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000081
82/*
83 * Max interrupt hold-off timer value in us. Queues fall back to this value
84 * under extreme memory pressure so it's largish to give the system time to
85 * recover.
86 */
87#define MAX_SGE_TIMERVAL 200U
88
Casey Leedom7ee9ff92010-06-25 12:11:46 +000089enum {
Vipul Pandya13ee15d2012-09-26 02:39:40 +000090 /*
91 * Physical Function provisioning constants.
92 */
93 PFRES_NVI = 4, /* # of Virtual Interfaces */
94 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
95 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
96 */
97 PFRES_NEQ = 256, /* # of egress queues */
98 PFRES_NIQ = 0, /* # of ingress queues */
99 PFRES_TC = 0, /* PCI-E traffic class */
100 PFRES_NEXACTF = 128, /* # of exact MPS filters */
101
102 PFRES_R_CAPS = FW_CMD_CAP_PF,
103 PFRES_WX_CAPS = FW_CMD_CAP_PF,
104
105#ifdef CONFIG_PCI_IOV
106 /*
107 * Virtual Function provisioning constants. We need two extra Ingress
108 * Queues with Interrupt capability to serve as the VF's Firmware
109 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
110 * neither will have Free Lists associated with them). For each
111 * Ethernet/Control Egress Queue and for each Free List, we need an
112 * Egress Context.
113 */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000114 VFRES_NPORTS = 1, /* # of "ports" per VF */
115 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
116
117 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
118 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
119 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000120 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000121 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000122 VFRES_TC = 0, /* PCI-E traffic class */
123 VFRES_NEXACTF = 16, /* # of exact MPS filters */
124
125 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
126 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000127#endif
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000128};
129
130/*
131 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
132 * static and likely not to be useful in the long run. We really need to
133 * implement some form of persistent configuration which the firmware
134 * controls.
135 */
136static unsigned int pfvfres_pmask(struct adapter *adapter,
137 unsigned int pf, unsigned int vf)
138{
139 unsigned int portn, portvec;
140
141 /*
142 * Give PF's access to all of the ports.
143 */
144 if (vf == 0)
145 return FW_PFVF_CMD_PMASK_MASK;
146
147 /*
148 * For VFs, we'll assign them access to the ports based purely on the
149 * PF. We assign active ports in order, wrapping around if there are
150 * fewer active ports than PFs: e.g. active port[pf % nports].
151 * Unfortunately the adapter's port_info structs haven't been
152 * initialized yet so we have to compute this.
153 */
154 if (adapter->params.nports == 0)
155 return 0;
156
157 portn = pf % adapter->params.nports;
158 portvec = adapter->params.portvec;
159 for (;;) {
160 /*
161 * Isolate the lowest set bit in the port vector. If we're at
162 * the port number that we want, return that as the pmask.
163 * otherwise mask that bit out of the port vector and
164 * decrement our port number ...
165 */
166 unsigned int pmask = portvec ^ (portvec & (portvec-1));
167 if (portn == 0)
168 return pmask;
169 portn--;
170 portvec &= ~pmask;
171 }
172 /*NOTREACHED*/
173}
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000174
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000175enum {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000176 MAX_TXQ_ENTRIES = 16384,
177 MAX_CTRL_TXQ_ENTRIES = 1024,
178 MAX_RSPQ_ENTRIES = 16384,
179 MAX_RX_BUFFERS = 16384,
180 MIN_TXQ_ENTRIES = 32,
181 MIN_CTRL_TXQ_ENTRIES = 32,
182 MIN_RSPQ_ENTRIES = 128,
183 MIN_FL_ENTRIES = 16
184};
185
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000186/* Host shadow copy of ingress filter entry. This is in host native format
187 * and doesn't match the ordering or bit order, etc. of the hardware of the
188 * firmware command. The use of bit-field structure elements is purely to
189 * remind ourselves of the field size limitations and save memory in the case
190 * where the filter table is large.
191 */
192struct filter_entry {
193 /* Administrative fields for filter.
194 */
195 u32 valid:1; /* filter allocated and valid */
196 u32 locked:1; /* filter is administratively locked */
197
198 u32 pending:1; /* filter action is pending firmware reply */
199 u32 smtidx:8; /* Source MAC Table index for smac */
200 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
201
202 /* The filter itself. Most of this is a straight copy of information
203 * provided by the extended ioctl(). Some fields are translated to
204 * internal forms -- for instance the Ingress Queue ID passed in from
205 * the ioctl() is translated into the Absolute Ingress Queue ID.
206 */
207 struct ch_filter_specification fs;
208};
209
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000210#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
211 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
212 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
213
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000214#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000215
Benoit Taine9baa3c32014-08-08 15:56:03 +0200216static const struct pci_device_id cxgb4_pci_tbl[] = {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000217 CH_DEVICE(0xa000, 0), /* PE10K */
Dimitris Michailidisccea7902010-08-23 17:21:01 +0000218 CH_DEVICE(0x4001, -1),
219 CH_DEVICE(0x4002, -1),
220 CH_DEVICE(0x4003, -1),
221 CH_DEVICE(0x4004, -1),
222 CH_DEVICE(0x4005, -1),
223 CH_DEVICE(0x4006, -1),
224 CH_DEVICE(0x4007, -1),
225 CH_DEVICE(0x4008, -1),
226 CH_DEVICE(0x4009, -1),
227 CH_DEVICE(0x400a, -1),
Hariprasad Shenaifb1e9332014-06-27 19:23:50 +0530228 CH_DEVICE(0x400d, -1),
229 CH_DEVICE(0x400e, -1),
230 CH_DEVICE(0x4080, -1),
231 CH_DEVICE(0x4081, -1),
232 CH_DEVICE(0x4082, -1),
233 CH_DEVICE(0x4083, -1),
234 CH_DEVICE(0x4084, -1),
235 CH_DEVICE(0x4085, -1),
236 CH_DEVICE(0x4086, -1),
237 CH_DEVICE(0x4087, -1),
238 CH_DEVICE(0x4088, -1),
Dimitris Michailidisccea7902010-08-23 17:21:01 +0000239 CH_DEVICE(0x4401, 4),
240 CH_DEVICE(0x4402, 4),
241 CH_DEVICE(0x4403, 4),
242 CH_DEVICE(0x4404, 4),
243 CH_DEVICE(0x4405, 4),
244 CH_DEVICE(0x4406, 4),
245 CH_DEVICE(0x4407, 4),
246 CH_DEVICE(0x4408, 4),
247 CH_DEVICE(0x4409, 4),
248 CH_DEVICE(0x440a, 4),
Vipul Pandyaf637d572012-03-05 22:56:36 +0000249 CH_DEVICE(0x440d, 4),
250 CH_DEVICE(0x440e, 4),
Hariprasad Shenaifb1e9332014-06-27 19:23:50 +0530251 CH_DEVICE(0x4480, 4),
252 CH_DEVICE(0x4481, 4),
253 CH_DEVICE(0x4482, 4),
254 CH_DEVICE(0x4483, 4),
255 CH_DEVICE(0x4484, 4),
256 CH_DEVICE(0x4485, 4),
257 CH_DEVICE(0x4486, 4),
258 CH_DEVICE(0x4487, 4),
259 CH_DEVICE(0x4488, 4),
Vipul Pandya9ef603a2013-04-29 04:04:39 +0000260 CH_DEVICE(0x5001, 4),
261 CH_DEVICE(0x5002, 4),
262 CH_DEVICE(0x5003, 4),
263 CH_DEVICE(0x5004, 4),
264 CH_DEVICE(0x5005, 4),
265 CH_DEVICE(0x5006, 4),
266 CH_DEVICE(0x5007, 4),
267 CH_DEVICE(0x5008, 4),
268 CH_DEVICE(0x5009, 4),
269 CH_DEVICE(0x500A, 4),
270 CH_DEVICE(0x500B, 4),
271 CH_DEVICE(0x500C, 4),
272 CH_DEVICE(0x500D, 4),
273 CH_DEVICE(0x500E, 4),
274 CH_DEVICE(0x500F, 4),
275 CH_DEVICE(0x5010, 4),
276 CH_DEVICE(0x5011, 4),
277 CH_DEVICE(0x5012, 4),
278 CH_DEVICE(0x5013, 4),
Hariprasad Shenaif0a8e6d2014-02-18 17:56:15 +0530279 CH_DEVICE(0x5014, 4),
280 CH_DEVICE(0x5015, 4),
Hariprasad Shenai0183aa62014-03-27 18:17:09 +0530281 CH_DEVICE(0x5080, 4),
282 CH_DEVICE(0x5081, 4),
283 CH_DEVICE(0x5082, 4),
284 CH_DEVICE(0x5083, 4),
285 CH_DEVICE(0x5084, 4),
286 CH_DEVICE(0x5085, 4),
Hariprasad Shenai56e03e52014-09-10 17:44:31 +0530287 CH_DEVICE(0x5086, 4),
Hariprasad Shenai91c04a92014-09-26 00:23:54 +0530288 CH_DEVICE(0x5087, 4),
289 CH_DEVICE(0x5088, 4),
Vipul Pandya9ef603a2013-04-29 04:04:39 +0000290 CH_DEVICE(0x5401, 4),
291 CH_DEVICE(0x5402, 4),
292 CH_DEVICE(0x5403, 4),
293 CH_DEVICE(0x5404, 4),
294 CH_DEVICE(0x5405, 4),
295 CH_DEVICE(0x5406, 4),
296 CH_DEVICE(0x5407, 4),
297 CH_DEVICE(0x5408, 4),
298 CH_DEVICE(0x5409, 4),
299 CH_DEVICE(0x540A, 4),
300 CH_DEVICE(0x540B, 4),
301 CH_DEVICE(0x540C, 4),
302 CH_DEVICE(0x540D, 4),
303 CH_DEVICE(0x540E, 4),
304 CH_DEVICE(0x540F, 4),
305 CH_DEVICE(0x5410, 4),
306 CH_DEVICE(0x5411, 4),
307 CH_DEVICE(0x5412, 4),
308 CH_DEVICE(0x5413, 4),
Hariprasad Shenaif0a8e6d2014-02-18 17:56:15 +0530309 CH_DEVICE(0x5414, 4),
310 CH_DEVICE(0x5415, 4),
Hariprasad Shenai0183aa62014-03-27 18:17:09 +0530311 CH_DEVICE(0x5480, 4),
312 CH_DEVICE(0x5481, 4),
313 CH_DEVICE(0x5482, 4),
314 CH_DEVICE(0x5483, 4),
315 CH_DEVICE(0x5484, 4),
316 CH_DEVICE(0x5485, 4),
Hariprasad Shenai56e03e52014-09-10 17:44:31 +0530317 CH_DEVICE(0x5486, 4),
Hariprasad Shenai91c04a92014-09-26 00:23:54 +0530318 CH_DEVICE(0x5487, 4),
319 CH_DEVICE(0x5488, 4),
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000320 { 0, }
321};
322
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530323#define FW4_FNAME "cxgb4/t4fw.bin"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000324#define FW5_FNAME "cxgb4/t5fw.bin"
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530325#define FW4_CFNAME "cxgb4/t4-config.txt"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000326#define FW5_CFNAME "cxgb4/t5-config.txt"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000327
328MODULE_DESCRIPTION(DRV_DESC);
329MODULE_AUTHOR("Chelsio Communications");
330MODULE_LICENSE("Dual BSD/GPL");
331MODULE_VERSION(DRV_VERSION);
332MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530333MODULE_FIRMWARE(FW4_FNAME);
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000334MODULE_FIRMWARE(FW5_FNAME);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000335
Vipul Pandya636f9d32012-09-26 02:39:39 +0000336/*
337 * Normally we're willing to become the firmware's Master PF but will be happy
338 * if another PF has already become the Master and initialized the adapter.
339 * Setting "force_init" will cause this driver to forcibly establish itself as
340 * the Master PF and initialize the adapter.
341 */
342static uint force_init;
343
344module_param(force_init, uint, 0644);
345MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
346
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000347/*
348 * Normally if the firmware we connect to has Configuration File support, we
349 * use that and only fall back to the old Driver-based initialization if the
350 * Configuration File fails for some reason. If force_old_init is set, then
351 * we'll always use the old Driver-based initialization sequence.
352 */
353static uint force_old_init;
354
355module_param(force_old_init, uint, 0644);
356MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
357
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000358static int dflt_msg_enable = DFLT_MSG_ENABLE;
359
360module_param(dflt_msg_enable, int, 0644);
361MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
362
363/*
364 * The driver uses the best interrupt scheme available on a platform in the
365 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
366 * of these schemes the driver may consider as follows:
367 *
368 * msi = 2: choose from among all three options
369 * msi = 1: only consider MSI and INTx interrupts
370 * msi = 0: force INTx interrupts
371 */
372static int msi = 2;
373
374module_param(msi, int, 0644);
375MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
376
377/*
378 * Queue interrupt hold-off timer values. Queues default to the first of these
379 * upon creation.
380 */
381static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
382
383module_param_array(intr_holdoff, uint, NULL, 0644);
384MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
385 "0..4 in microseconds");
386
387static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
388
389module_param_array(intr_cnt, uint, NULL, 0644);
390MODULE_PARM_DESC(intr_cnt,
391 "thresholds 1..3 for queue interrupt packet counters");
392
Vipul Pandya636f9d32012-09-26 02:39:39 +0000393/*
394 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
395 * offset by 2 bytes in order to have the IP headers line up on 4-byte
396 * boundaries. This is a requirement for many architectures which will throw
397 * a machine check fault if an attempt is made to access one of the 4-byte IP
398 * header fields on a non-4-byte boundary. And it's a major performance issue
399 * even on some architectures which allow it like some implementations of the
400 * x86 ISA. However, some architectures don't mind this and for some very
401 * edge-case performance sensitive applications (like forwarding large volumes
402 * of small packets), setting this DMA offset to 0 will decrease the number of
403 * PCI-E Bus transfers enough to measurably affect performance.
404 */
405static int rx_dma_offset = 2;
406
Rusty Russelleb939922011-12-19 14:08:01 +0000407static bool vf_acls;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000408
409#ifdef CONFIG_PCI_IOV
410module_param(vf_acls, bool, 0644);
411MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
412
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000413/* Configure the number of PCI-E Virtual Function which are to be instantiated
414 * on SR-IOV Capable Physical Functions.
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000415 */
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000416static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000417
418module_param_array(num_vf, uint, NULL, 0644);
Santosh Rastapur7d6727c2013-03-14 05:08:56 +0000419MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000420#endif
421
Anish Bhatt688848b2014-06-19 21:37:13 -0700422/* TX Queue select used to determine what algorithm to use for selecting TX
423 * queue. Select between the kernel provided function (select_queue=0) or user
424 * cxgb_select_queue function (select_queue=1)
425 *
426 * Default: select_queue=0
427 */
428static int select_queue;
429module_param(select_queue, int, 0644);
430MODULE_PARM_DESC(select_queue,
431 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
432
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000433/*
434 * The filter TCAM has a fixed portion and a variable portion. The fixed
435 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
436 * ports. The variable portion is 36 bits which can include things like Exact
437 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
438 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
439 * far exceed the 36-bit budget for this "compressed" header portion of the
440 * filter. Thus, we have a scarce resource which must be carefully managed.
441 *
442 * By default we set this up to mostly match the set of filter matching
443 * capabilities of T3 but with accommodations for some of T4's more
444 * interesting features:
445 *
446 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
447 * [Inner] VLAN (17), Port (3), FCoE (1) }
448 */
449enum {
450 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
451 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
452 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
453};
454
455static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
456
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000457module_param(tp_vlan_pri_map, uint, 0644);
458MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
459
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000460static struct dentry *cxgb4_debugfs_root;
461
462static LIST_HEAD(adapter_list);
463static DEFINE_MUTEX(uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +0530464/* Adapter list to be accessed from atomic context */
465static LIST_HEAD(adap_rcu_list);
466static DEFINE_SPINLOCK(adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000467static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
468static const char *uld_str[] = { "RDMA", "iSCSI" };
469
470static void link_report(struct net_device *dev)
471{
472 if (!netif_carrier_ok(dev))
473 netdev_info(dev, "link down\n");
474 else {
475 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
476
477 const char *s = "10Mbps";
478 const struct port_info *p = netdev_priv(dev);
479
480 switch (p->link_cfg.speed) {
Ben Hutchingse8b39012014-02-23 00:03:24 +0000481 case 10000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000482 s = "10Gbps";
483 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000484 case 1000:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000485 s = "1000Mbps";
486 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000487 case 100:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000488 s = "100Mbps";
489 break;
Ben Hutchingse8b39012014-02-23 00:03:24 +0000490 case 40000:
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +0530491 s = "40Gbps";
492 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000493 }
494
495 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
496 fc[p->link_cfg.fc]);
497 }
498}
499
Anish Bhatt688848b2014-06-19 21:37:13 -0700500#ifdef CONFIG_CHELSIO_T4_DCB
501/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
502static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
503{
504 struct port_info *pi = netdev_priv(dev);
505 struct adapter *adap = pi->adapter;
506 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
507 int i;
508
509 /* We use a simple mapping of Port TX Queue Index to DCB
510 * Priority when we're enabling DCB.
511 */
512 for (i = 0; i < pi->nqsets; i++, txq++) {
513 u32 name, value;
514 int err;
515
516 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
517 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
518 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
519 value = enable ? i : 0xffffffff;
520
521 /* Since we can be called while atomic (from "interrupt
522 * level") we need to issue the Set Parameters Commannd
523 * without sleeping (timeout < 0).
524 */
525 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
526 &name, &value);
527
528 if (err)
529 dev_err(adap->pdev_dev,
530 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
531 enable ? "set" : "unset", pi->port_id, i, -err);
Anish Bhatt10b00462014-08-07 16:14:03 -0700532 else
533 txq->dcb_prio = value;
Anish Bhatt688848b2014-06-19 21:37:13 -0700534 }
535}
536#endif /* CONFIG_CHELSIO_T4_DCB */
537
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000538void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
539{
540 struct net_device *dev = adapter->port[port_id];
541
542 /* Skip changes from disabled ports. */
543 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
544 if (link_stat)
545 netif_carrier_on(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700546 else {
547#ifdef CONFIG_CHELSIO_T4_DCB
548 cxgb4_dcb_state_init(dev);
549 dcb_tx_queue_prio_enable(dev, false);
550#endif /* CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000551 netif_carrier_off(dev);
Anish Bhatt688848b2014-06-19 21:37:13 -0700552 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000553
554 link_report(dev);
555 }
556}
557
558void t4_os_portmod_changed(const struct adapter *adap, int port_id)
559{
560 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000561 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000562 };
563
564 const struct net_device *dev = adap->port[port_id];
565 const struct port_info *pi = netdev_priv(dev);
566
567 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
568 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000569 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000570 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
571}
572
573/*
574 * Configure the exact and hash address filters to handle a port's multicast
575 * and secondary unicast MAC addresses.
576 */
577static int set_addr_filters(const struct net_device *dev, bool sleep)
578{
579 u64 mhash = 0;
580 u64 uhash = 0;
581 bool free = true;
582 u16 filt_idx[7];
583 const u8 *addr[7];
584 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000585 const struct netdev_hw_addr *ha;
586 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700587 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000588 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000589 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000590
591 /* first do the secondary unicast addresses */
592 netdev_for_each_uc_addr(ha, dev) {
593 addr[naddr++] = ha->addr;
594 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000595 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000596 naddr, addr, filt_idx, &uhash, sleep);
597 if (ret < 0)
598 return ret;
599
600 free = false;
601 naddr = 0;
602 }
603 }
604
605 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700606 netdev_for_each_mc_addr(ha, dev) {
607 addr[naddr++] = ha->addr;
608 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000609 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000610 naddr, addr, filt_idx, &mhash, sleep);
611 if (ret < 0)
612 return ret;
613
614 free = false;
615 naddr = 0;
616 }
617 }
618
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000619 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000620 uhash | mhash, sleep);
621}
622
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530623int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
624module_param(dbfifo_int_thresh, int, 0644);
625MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
626
Vipul Pandya404d9e32012-10-08 02:59:43 +0000627/*
628 * usecs to sleep while draining the dbfifo
629 */
630static int dbfifo_drain_delay = 1000;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530631module_param(dbfifo_drain_delay, int, 0644);
632MODULE_PARM_DESC(dbfifo_drain_delay,
633 "usecs to sleep while draining the dbfifo");
634
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000635/*
636 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
637 * If @mtu is -1 it is left unchanged.
638 */
639static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
640{
641 int ret;
642 struct port_info *pi = netdev_priv(dev);
643
644 ret = set_addr_filters(dev, sleep_ok);
645 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000646 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000647 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000648 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000649 sleep_ok);
650 return ret;
651}
652
653/**
654 * link_start - enable a port
655 * @dev: the port to enable
656 *
657 * Performs the MAC and PHY actions needed to enable a port.
658 */
659static int link_start(struct net_device *dev)
660{
661 int ret;
662 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000663 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000664
665 /*
666 * We do not set address filters and promiscuity here, the stack does
667 * that step explicitly.
668 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000669 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +0000670 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000671 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000672 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000673 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000674 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000675 if (ret >= 0) {
676 pi->xact_addr_filt = ret;
677 ret = 0;
678 }
679 }
680 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000681 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
682 &pi->link_cfg);
Anish Bhatt30f00842014-08-05 16:05:23 -0700683 if (ret == 0) {
684 local_bh_disable();
Anish Bhatt688848b2014-06-19 21:37:13 -0700685 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
686 true, CXGB4_DCB_ENABLED);
Anish Bhatt30f00842014-08-05 16:05:23 -0700687 local_bh_enable();
688 }
Anish Bhatt688848b2014-06-19 21:37:13 -0700689
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000690 return ret;
691}
692
Anish Bhatt688848b2014-06-19 21:37:13 -0700693int cxgb4_dcb_enabled(const struct net_device *dev)
694{
695#ifdef CONFIG_CHELSIO_T4_DCB
696 struct port_info *pi = netdev_priv(dev);
697
Anish Bhatt3bb06262014-10-23 14:37:31 -0700698 if (!pi->dcb.enabled)
699 return 0;
700
701 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
702 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
Anish Bhatt688848b2014-06-19 21:37:13 -0700703#else
704 return 0;
705#endif
706}
707EXPORT_SYMBOL(cxgb4_dcb_enabled);
708
709#ifdef CONFIG_CHELSIO_T4_DCB
710/* Handle a Data Center Bridging update message from the firmware. */
711static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
712{
713 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
714 struct net_device *dev = adap->port[port];
715 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
716 int new_dcb_enabled;
717
718 cxgb4_dcb_handle_fw_update(adap, pcmd);
719 new_dcb_enabled = cxgb4_dcb_enabled(dev);
720
721 /* If the DCB has become enabled or disabled on the port then we're
722 * going to need to set up/tear down DCB Priority parameters for the
723 * TX Queues associated with the port.
724 */
725 if (new_dcb_enabled != old_dcb_enabled)
726 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
727}
728#endif /* CONFIG_CHELSIO_T4_DCB */
729
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000730/* Clear a filter and release any of its resources that we own. This also
731 * clears the filter's "pending" status.
732 */
733static void clear_filter(struct adapter *adap, struct filter_entry *f)
734{
735 /* If the new or old filter have loopback rewriteing rules then we'll
736 * need to free any existing Layer Two Table (L2T) entries of the old
737 * filter rule. The firmware will handle freeing up any Source MAC
738 * Table (SMT) entries used for rewriting Source MAC Addresses in
739 * loopback rules.
740 */
741 if (f->l2t)
742 cxgb4_l2t_release(f->l2t);
743
744 /* The zeroing of the filter rule below clears the filter valid,
745 * pending, locked flags, l2t pointer, etc. so it's all we need for
746 * this operation.
747 */
748 memset(f, 0, sizeof(*f));
749}
750
751/* Handle a filter write/deletion reply.
752 */
753static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
754{
755 unsigned int idx = GET_TID(rpl);
756 unsigned int nidx = idx - adap->tids.ftid_base;
757 unsigned int ret;
758 struct filter_entry *f;
759
760 if (idx >= adap->tids.ftid_base && nidx <
761 (adap->tids.nftids + adap->tids.nsftids)) {
762 idx = nidx;
763 ret = GET_TCB_COOKIE(rpl->cookie);
764 f = &adap->tids.ftid_tab[idx];
765
766 if (ret == FW_FILTER_WR_FLT_DELETED) {
767 /* Clear the filter when we get confirmation from the
768 * hardware that the filter has been deleted.
769 */
770 clear_filter(adap, f);
771 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
772 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
773 idx);
774 clear_filter(adap, f);
775 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
776 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
777 f->pending = 0; /* asynchronous setup completed */
778 f->valid = 1;
779 } else {
780 /* Something went wrong. Issue a warning about the
781 * problem and clear everything out.
782 */
783 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
784 idx, ret);
785 clear_filter(adap, f);
786 }
787 }
788}
789
790/* Response queue handler for the FW event queue.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000791 */
792static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
793 const struct pkt_gl *gl)
794{
795 u8 opcode = ((const struct rss_header *)rsp)->opcode;
796
797 rsp++; /* skip RSS header */
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000798
799 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
800 */
801 if (unlikely(opcode == CPL_FW4_MSG &&
802 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
803 rsp++;
804 opcode = ((const struct rss_header *)rsp)->opcode;
805 rsp++;
806 if (opcode != CPL_SGE_EGR_UPDATE) {
807 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
808 , opcode);
809 goto out;
810 }
811 }
812
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000813 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
814 const struct cpl_sge_egr_update *p = (void *)rsp;
815 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000816 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000817
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000818 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000819 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000820 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000821 struct sge_eth_txq *eq;
822
823 eq = container_of(txq, struct sge_eth_txq, q);
824 netif_tx_wake_queue(eq->txq);
825 } else {
826 struct sge_ofld_txq *oq;
827
828 oq = container_of(txq, struct sge_ofld_txq, q);
829 tasklet_schedule(&oq->qresume_tsk);
830 }
831 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
832 const struct cpl_fw6_msg *p = (void *)rsp;
833
Anish Bhatt688848b2014-06-19 21:37:13 -0700834#ifdef CONFIG_CHELSIO_T4_DCB
835 const struct fw_port_cmd *pcmd = (const void *)p->data;
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530836 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
Anish Bhatt688848b2014-06-19 21:37:13 -0700837 unsigned int action =
838 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
839
840 if (cmd == FW_PORT_CMD &&
841 action == FW_PORT_ACTION_GET_PORT_INFO) {
842 int port = FW_PORT_CMD_PORTID_GET(
843 be32_to_cpu(pcmd->op_to_portid));
844 struct net_device *dev = q->adap->port[port];
845 int state_input = ((pcmd->u.info.dcbxdis_pkd &
846 FW_PORT_CMD_DCBXDIS)
847 ? CXGB4_DCB_INPUT_FW_DISABLED
848 : CXGB4_DCB_INPUT_FW_ENABLED);
849
850 cxgb4_dcb_state_fsm(dev, state_input);
851 }
852
853 if (cmd == FW_PORT_CMD &&
854 action == FW_PORT_ACTION_L2_DCB_CFG)
855 dcb_rpl(q->adap, pcmd);
856 else
857#endif
858 if (p->type == 0)
859 t4_handle_fw_rpl(q->adap, p->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000860 } else if (opcode == CPL_L2T_WRITE_RPL) {
861 const struct cpl_l2t_write_rpl *p = (void *)rsp;
862
863 do_l2t_write_rpl(q->adap, p);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000864 } else if (opcode == CPL_SET_TCB_RPL) {
865 const struct cpl_set_tcb_rpl *p = (void *)rsp;
866
867 filter_rpl(q->adap, p);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000868 } else
869 dev_err(q->adap->pdev_dev,
870 "unexpected CPL %#x on FW event queue\n", opcode);
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000871out:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000872 return 0;
873}
874
875/**
876 * uldrx_handler - response queue handler for ULD queues
877 * @q: the response queue that received the packet
878 * @rsp: the response queue descriptor holding the offload message
879 * @gl: the gather list of packet fragments
880 *
881 * Deliver an ingress offload packet to a ULD. All processing is done by
882 * the ULD, we just maintain statistics.
883 */
884static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
885 const struct pkt_gl *gl)
886{
887 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
888
Vipul Pandyab407a4a2013-04-29 04:04:40 +0000889 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
890 */
891 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
892 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
893 rsp += 2;
894
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000895 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
896 rxq->stats.nomem++;
897 return -1;
898 }
899 if (gl == NULL)
900 rxq->stats.imm++;
901 else if (gl == CXGB4_MSG_AN)
902 rxq->stats.an++;
903 else
904 rxq->stats.pkts++;
905 return 0;
906}
907
908static void disable_msi(struct adapter *adapter)
909{
910 if (adapter->flags & USING_MSIX) {
911 pci_disable_msix(adapter->pdev);
912 adapter->flags &= ~USING_MSIX;
913 } else if (adapter->flags & USING_MSI) {
914 pci_disable_msi(adapter->pdev);
915 adapter->flags &= ~USING_MSI;
916 }
917}
918
919/*
920 * Interrupt handler for non-data events used with MSI-X.
921 */
922static irqreturn_t t4_nondata_intr(int irq, void *cookie)
923{
924 struct adapter *adap = cookie;
925
926 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
927 if (v & PFSW) {
928 adap->swintr = 1;
929 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
930 }
931 t4_slow_intr_handler(adap);
932 return IRQ_HANDLED;
933}
934
935/*
936 * Name the MSI-X interrupts.
937 */
938static void name_msix_vecs(struct adapter *adap)
939{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000940 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000941
942 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000943 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000944
945 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000946 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
947 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000948
949 /* Ethernet queues */
950 for_each_port(adap, j) {
951 struct net_device *d = adap->port[j];
952 const struct port_info *pi = netdev_priv(d);
953
Dimitris Michailidisba278162010-12-14 21:36:50 +0000954 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000955 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
956 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000957 }
958
959 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000960 for_each_ofldrxq(&adap->sge, i)
961 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000962 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000963
964 for_each_rdmarxq(&adap->sge, i)
965 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000966 adap->port[0]->name, i);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530967
968 for_each_rdmaciq(&adap->sge, i)
969 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
970 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000971}
972
973static int request_msix_queue_irqs(struct adapter *adap)
974{
975 struct sge *s = &adap->sge;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530976 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
977 int msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000978
979 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
980 adap->msix_info[1].desc, &s->fw_evtq);
981 if (err)
982 return err;
983
984 for_each_ethrxq(s, ethqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000985 err = request_irq(adap->msix_info[msi_index].vec,
986 t4_sge_intr_msix, 0,
987 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000988 &s->ethrxq[ethqidx].rspq);
989 if (err)
990 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000991 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000992 }
993 for_each_ofldrxq(s, ofldqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000994 err = request_irq(adap->msix_info[msi_index].vec,
995 t4_sge_intr_msix, 0,
996 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000997 &s->ofldrxq[ofldqidx].rspq);
998 if (err)
999 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +00001000 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001001 }
1002 for_each_rdmarxq(s, rdmaqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00001003 err = request_irq(adap->msix_info[msi_index].vec,
1004 t4_sge_intr_msix, 0,
1005 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001006 &s->rdmarxq[rdmaqidx].rspq);
1007 if (err)
1008 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +00001009 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001010 }
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301011 for_each_rdmaciq(s, rdmaciqqidx) {
1012 err = request_irq(adap->msix_info[msi_index].vec,
1013 t4_sge_intr_msix, 0,
1014 adap->msix_info[msi_index].desc,
1015 &s->rdmaciq[rdmaciqqidx].rspq);
1016 if (err)
1017 goto unwind;
1018 msi_index++;
1019 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001020 return 0;
1021
1022unwind:
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301023 while (--rdmaciqqidx >= 0)
1024 free_irq(adap->msix_info[--msi_index].vec,
1025 &s->rdmaciq[rdmaciqqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001026 while (--rdmaqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001027 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001028 &s->rdmarxq[rdmaqidx].rspq);
1029 while (--ofldqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001030 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001031 &s->ofldrxq[ofldqidx].rspq);
1032 while (--ethqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001033 free_irq(adap->msix_info[--msi_index].vec,
1034 &s->ethrxq[ethqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001035 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1036 return err;
1037}
1038
1039static void free_msix_queue_irqs(struct adapter *adap)
1040{
Vipul Pandya404d9e32012-10-08 02:59:43 +00001041 int i, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001042 struct sge *s = &adap->sge;
1043
1044 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1045 for_each_ethrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001046 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001047 for_each_ofldrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001048 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001049 for_each_rdmarxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +00001050 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301051 for_each_rdmaciq(s, i)
1052 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001053}
1054
1055/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001056 * write_rss - write the RSS table for a given port
1057 * @pi: the port
1058 * @queues: array of queue indices for RSS
1059 *
1060 * Sets up the portion of the HW RSS table for the port's VI to distribute
1061 * packets to the Rx queues in @queues.
1062 */
1063static int write_rss(const struct port_info *pi, const u16 *queues)
1064{
1065 u16 *rss;
1066 int i, err;
1067 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1068
1069 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1070 if (!rss)
1071 return -ENOMEM;
1072
1073 /* map the queue indices to queue ids */
1074 for (i = 0; i < pi->rss_size; i++, queues++)
1075 rss[i] = q[*queues].rspq.abs_id;
1076
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001077 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1078 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001079 kfree(rss);
1080 return err;
1081}
1082
1083/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001084 * setup_rss - configure RSS
1085 * @adap: the adapter
1086 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001087 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001088 */
1089static int setup_rss(struct adapter *adap)
1090{
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001091 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001092
1093 for_each_port(adap, i) {
1094 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001095
Dimitris Michailidis671b0062010-07-11 12:01:17 +00001096 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001097 if (err)
1098 return err;
1099 }
1100 return 0;
1101}
1102
1103/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +00001104 * Return the channel of the ingress queue with the given qid.
1105 */
1106static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1107{
1108 qid -= p->ingr_start;
1109 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1110}
1111
1112/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001113 * Wait until all NAPI handlers are descheduled.
1114 */
1115static void quiesce_rx(struct adapter *adap)
1116{
1117 int i;
1118
1119 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1120 struct sge_rspq *q = adap->sge.ingr_map[i];
1121
1122 if (q && q->handler)
1123 napi_disable(&q->napi);
1124 }
1125}
1126
1127/*
1128 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1129 */
1130static void enable_rx(struct adapter *adap)
1131{
1132 int i;
1133
1134 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1135 struct sge_rspq *q = adap->sge.ingr_map[i];
1136
1137 if (!q)
1138 continue;
1139 if (q->handler)
1140 napi_enable(&q->napi);
1141 /* 0-increment GTS to start the timer and enable interrupts */
1142 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1143 SEINTARM(q->intr_params) |
1144 INGRESSQID(q->cntxt_id));
1145 }
1146}
1147
1148/**
1149 * setup_sge_queues - configure SGE Tx/Rx/response queues
1150 * @adap: the adapter
1151 *
1152 * Determines how many sets of SGE queues to use and initializes them.
1153 * We support multiple queue sets per port if we have MSI-X, otherwise
1154 * just one queue set per port.
1155 */
1156static int setup_sge_queues(struct adapter *adap)
1157{
1158 int err, msi_idx, i, j;
1159 struct sge *s = &adap->sge;
1160
1161 bitmap_zero(s->starving_fl, MAX_EGRQ);
1162 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1163
1164 if (adap->flags & USING_MSIX)
1165 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1166 else {
1167 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1168 NULL, NULL);
1169 if (err)
1170 return err;
1171 msi_idx = -((int)s->intrq.abs_id + 1);
1172 }
1173
1174 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1175 msi_idx, NULL, fwevtq_handler);
1176 if (err) {
1177freeout: t4_free_sge_resources(adap);
1178 return err;
1179 }
1180
1181 for_each_port(adap, i) {
1182 struct net_device *dev = adap->port[i];
1183 struct port_info *pi = netdev_priv(dev);
1184 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1185 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1186
1187 for (j = 0; j < pi->nqsets; j++, q++) {
1188 if (msi_idx > 0)
1189 msi_idx++;
1190 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1191 msi_idx, &q->fl,
1192 t4_ethrx_handler);
1193 if (err)
1194 goto freeout;
1195 q->rspq.idx = j;
1196 memset(&q->stats, 0, sizeof(q->stats));
1197 }
1198 for (j = 0; j < pi->nqsets; j++, t++) {
1199 err = t4_sge_alloc_eth_txq(adap, t, dev,
1200 netdev_get_tx_queue(dev, j),
1201 s->fw_evtq.cntxt_id);
1202 if (err)
1203 goto freeout;
1204 }
1205 }
1206
1207 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1208 for_each_ofldrxq(s, i) {
1209 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1210 struct net_device *dev = adap->port[i / j];
1211
1212 if (msi_idx > 0)
1213 msi_idx++;
1214 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301215 q->fl.size ? &q->fl : NULL,
1216 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001217 if (err)
1218 goto freeout;
1219 memset(&q->stats, 0, sizeof(q->stats));
1220 s->ofld_rxq[i] = q->rspq.abs_id;
1221 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1222 s->fw_evtq.cntxt_id);
1223 if (err)
1224 goto freeout;
1225 }
1226
1227 for_each_rdmarxq(s, i) {
1228 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1229
1230 if (msi_idx > 0)
1231 msi_idx++;
1232 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301233 msi_idx, q->fl.size ? &q->fl : NULL,
1234 uldrx_handler);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001235 if (err)
1236 goto freeout;
1237 memset(&q->stats, 0, sizeof(q->stats));
1238 s->rdma_rxq[i] = q->rspq.abs_id;
1239 }
1240
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05301241 for_each_rdmaciq(s, i) {
1242 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1243
1244 if (msi_idx > 0)
1245 msi_idx++;
1246 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1247 msi_idx, q->fl.size ? &q->fl : NULL,
1248 uldrx_handler);
1249 if (err)
1250 goto freeout;
1251 memset(&q->stats, 0, sizeof(q->stats));
1252 s->rdma_ciq[i] = q->rspq.abs_id;
1253 }
1254
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001255 for_each_port(adap, i) {
1256 /*
1257 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1258 * have RDMA queues, and that's the right value.
1259 */
1260 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1261 s->fw_evtq.cntxt_id,
1262 s->rdmarxq[i].rspq.cntxt_id);
1263 if (err)
1264 goto freeout;
1265 }
1266
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301267 t4_write_reg(adap, is_t4(adap->params.chip) ?
1268 MPS_TRC_RSS_CONTROL :
1269 MPS_T5_TRC_RSS_CONTROL,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001270 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1271 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1272 return 0;
1273}
1274
1275/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001276 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1277 * The allocated memory is cleared.
1278 */
1279void *t4_alloc_mem(size_t size)
1280{
Joe Perches8be04b92013-06-19 12:15:53 -07001281 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001282
1283 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001284 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001285 return p;
1286}
1287
1288/*
1289 * Free memory allocated through alloc_mem().
1290 */
Hariprasad Shenaifd88b312014-11-07 09:35:23 +05301291void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001292{
1293 if (is_vmalloc_addr(addr))
1294 vfree(addr);
1295 else
1296 kfree(addr);
1297}
1298
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001299/* Send a Work Request to write the filter at a specified index. We construct
1300 * a Firmware Filter Work Request to have the work done and put the indicated
1301 * filter into "pending" mode which will prevent any further actions against
1302 * it till we get a reply from the firmware on the completion status of the
1303 * request.
1304 */
1305static int set_filter_wr(struct adapter *adapter, int fidx)
1306{
1307 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1308 struct sk_buff *skb;
1309 struct fw_filter_wr *fwr;
1310 unsigned int ftid;
1311
1312 /* If the new filter requires loopback Destination MAC and/or VLAN
1313 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1314 * the filter.
1315 */
1316 if (f->fs.newdmac || f->fs.newvlan) {
1317 /* allocate L2T entry for new filter */
1318 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1319 if (f->l2t == NULL)
1320 return -EAGAIN;
1321 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1322 f->fs.eport, f->fs.dmac)) {
1323 cxgb4_l2t_release(f->l2t);
1324 f->l2t = NULL;
1325 return -ENOMEM;
1326 }
1327 }
1328
1329 ftid = adapter->tids.ftid_base + fidx;
1330
1331 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1332 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1333 memset(fwr, 0, sizeof(*fwr));
1334
1335 /* It would be nice to put most of the following in t4_hw.c but most
1336 * of the work is translating the cxgbtool ch_filter_specification
1337 * into the Work Request and the definition of that structure is
1338 * currently in cxgbtool.h which isn't appropriate to pull into the
1339 * common code. We may eventually try to come up with a more neutral
1340 * filter specification structure but for now it's easiest to simply
1341 * put this fairly direct code in line ...
1342 */
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301343 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1344 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001345 fwr->tid_to_iq =
1346 htonl(V_FW_FILTER_WR_TID(ftid) |
1347 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1348 V_FW_FILTER_WR_NOREPLY(0) |
1349 V_FW_FILTER_WR_IQ(f->fs.iq));
1350 fwr->del_filter_to_l2tix =
1351 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1352 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1353 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1354 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1355 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1356 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1357 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1358 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1359 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1360 f->fs.newvlan == VLAN_REWRITE) |
1361 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1362 f->fs.newvlan == VLAN_REWRITE) |
1363 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1364 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1365 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1366 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1367 fwr->ethtype = htons(f->fs.val.ethtype);
1368 fwr->ethtypem = htons(f->fs.mask.ethtype);
1369 fwr->frag_to_ovlan_vldm =
1370 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1371 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1372 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1373 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1374 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1375 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1376 fwr->smac_sel = 0;
1377 fwr->rx_chan_rx_rpl_iq =
1378 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1379 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1380 fwr->maci_to_matchtypem =
1381 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1382 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1383 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1384 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1385 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1386 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1387 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1388 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1389 fwr->ptcl = f->fs.val.proto;
1390 fwr->ptclm = f->fs.mask.proto;
1391 fwr->ttyp = f->fs.val.tos;
1392 fwr->ttypm = f->fs.mask.tos;
1393 fwr->ivlan = htons(f->fs.val.ivlan);
1394 fwr->ivlanm = htons(f->fs.mask.ivlan);
1395 fwr->ovlan = htons(f->fs.val.ovlan);
1396 fwr->ovlanm = htons(f->fs.mask.ovlan);
1397 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1398 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1399 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1400 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1401 fwr->lp = htons(f->fs.val.lport);
1402 fwr->lpm = htons(f->fs.mask.lport);
1403 fwr->fp = htons(f->fs.val.fport);
1404 fwr->fpm = htons(f->fs.mask.fport);
1405 if (f->fs.newsmac)
1406 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1407
1408 /* Mark the filter as "pending" and ship off the Filter Work Request.
1409 * When we get the Work Request Reply we'll clear the pending status.
1410 */
1411 f->pending = 1;
1412 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1413 t4_ofld_send(adapter, skb);
1414 return 0;
1415}
1416
1417/* Delete the filter at a specified index.
1418 */
1419static int del_filter_wr(struct adapter *adapter, int fidx)
1420{
1421 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1422 struct sk_buff *skb;
1423 struct fw_filter_wr *fwr;
1424 unsigned int len, ftid;
1425
1426 len = sizeof(*fwr);
1427 ftid = adapter->tids.ftid_base + fidx;
1428
1429 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1430 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1431 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1432
1433 /* Mark the filter as "pending" and ship off the Filter Work Request.
1434 * When we get the Work Request Reply we'll clear the pending status.
1435 */
1436 f->pending = 1;
1437 t4_mgmt_tx(adapter, skb);
1438 return 0;
1439}
1440
Anish Bhatt688848b2014-06-19 21:37:13 -07001441static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1442 void *accel_priv, select_queue_fallback_t fallback)
1443{
1444 int txq;
1445
1446#ifdef CONFIG_CHELSIO_T4_DCB
1447 /* If a Data Center Bridging has been successfully negotiated on this
1448 * link then we'll use the skb's priority to map it to a TX Queue.
1449 * The skb's priority is determined via the VLAN Tag Priority Code
1450 * Point field.
1451 */
1452 if (cxgb4_dcb_enabled(dev)) {
1453 u16 vlan_tci;
1454 int err;
1455
1456 err = vlan_get_tag(skb, &vlan_tci);
1457 if (unlikely(err)) {
1458 if (net_ratelimit())
1459 netdev_warn(dev,
1460 "TX Packet without VLAN Tag on DCB Link\n");
1461 txq = 0;
1462 } else {
1463 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1464 }
1465 return txq;
1466 }
1467#endif /* CONFIG_CHELSIO_T4_DCB */
1468
1469 if (select_queue) {
1470 txq = (skb_rx_queue_recorded(skb)
1471 ? skb_get_rx_queue(skb)
1472 : smp_processor_id());
1473
1474 while (unlikely(txq >= dev->real_num_tx_queues))
1475 txq -= dev->real_num_tx_queues;
1476
1477 return txq;
1478 }
1479
1480 return fallback(dev, skb) % dev->real_num_tx_queues;
1481}
1482
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001483static inline int is_offload(const struct adapter *adap)
1484{
1485 return adap->params.offload;
1486}
1487
1488/*
1489 * Implementation of ethtool operations.
1490 */
1491
1492static u32 get_msglevel(struct net_device *dev)
1493{
1494 return netdev2adap(dev)->msg_enable;
1495}
1496
1497static void set_msglevel(struct net_device *dev, u32 val)
1498{
1499 netdev2adap(dev)->msg_enable = val;
1500}
1501
1502static char stats_strings[][ETH_GSTRING_LEN] = {
1503 "TxOctetsOK ",
1504 "TxFramesOK ",
1505 "TxBroadcastFrames ",
1506 "TxMulticastFrames ",
1507 "TxUnicastFrames ",
1508 "TxErrorFrames ",
1509
1510 "TxFrames64 ",
1511 "TxFrames65To127 ",
1512 "TxFrames128To255 ",
1513 "TxFrames256To511 ",
1514 "TxFrames512To1023 ",
1515 "TxFrames1024To1518 ",
1516 "TxFrames1519ToMax ",
1517
1518 "TxFramesDropped ",
1519 "TxPauseFrames ",
1520 "TxPPP0Frames ",
1521 "TxPPP1Frames ",
1522 "TxPPP2Frames ",
1523 "TxPPP3Frames ",
1524 "TxPPP4Frames ",
1525 "TxPPP5Frames ",
1526 "TxPPP6Frames ",
1527 "TxPPP7Frames ",
1528
1529 "RxOctetsOK ",
1530 "RxFramesOK ",
1531 "RxBroadcastFrames ",
1532 "RxMulticastFrames ",
1533 "RxUnicastFrames ",
1534
1535 "RxFramesTooLong ",
1536 "RxJabberErrors ",
1537 "RxFCSErrors ",
1538 "RxLengthErrors ",
1539 "RxSymbolErrors ",
1540 "RxRuntFrames ",
1541
1542 "RxFrames64 ",
1543 "RxFrames65To127 ",
1544 "RxFrames128To255 ",
1545 "RxFrames256To511 ",
1546 "RxFrames512To1023 ",
1547 "RxFrames1024To1518 ",
1548 "RxFrames1519ToMax ",
1549
1550 "RxPauseFrames ",
1551 "RxPPP0Frames ",
1552 "RxPPP1Frames ",
1553 "RxPPP2Frames ",
1554 "RxPPP3Frames ",
1555 "RxPPP4Frames ",
1556 "RxPPP5Frames ",
1557 "RxPPP6Frames ",
1558 "RxPPP7Frames ",
1559
1560 "RxBG0FramesDropped ",
1561 "RxBG1FramesDropped ",
1562 "RxBG2FramesDropped ",
1563 "RxBG3FramesDropped ",
1564 "RxBG0FramesTrunc ",
1565 "RxBG1FramesTrunc ",
1566 "RxBG2FramesTrunc ",
1567 "RxBG3FramesTrunc ",
1568
1569 "TSO ",
1570 "TxCsumOffload ",
1571 "RxCsumGood ",
1572 "VLANextractions ",
1573 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001574 "GROpackets ",
1575 "GROmerged ",
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001576 "WriteCoalSuccess ",
1577 "WriteCoalFail ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001578};
1579
1580static int get_sset_count(struct net_device *dev, int sset)
1581{
1582 switch (sset) {
1583 case ETH_SS_STATS:
1584 return ARRAY_SIZE(stats_strings);
1585 default:
1586 return -EOPNOTSUPP;
1587 }
1588}
1589
1590#define T4_REGMAP_SIZE (160 * 1024)
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001591#define T5_REGMAP_SIZE (332 * 1024)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001592
1593static int get_regs_len(struct net_device *dev)
1594{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001595 struct adapter *adap = netdev2adap(dev);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301596 if (is_t4(adap->params.chip))
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001597 return T4_REGMAP_SIZE;
1598 else
1599 return T5_REGMAP_SIZE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001600}
1601
1602static int get_eeprom_len(struct net_device *dev)
1603{
1604 return EEPROMSIZE;
1605}
1606
1607static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1608{
1609 struct adapter *adapter = netdev2adap(dev);
1610
Rick Jones23020ab2011-11-09 09:58:07 +00001611 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1612 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1613 strlcpy(info->bus_info, pci_name(adapter->pdev),
1614 sizeof(info->bus_info));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001615
Rick Jones84b40502011-11-21 10:54:05 +00001616 if (adapter->params.fw_vers)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001617 snprintf(info->fw_version, sizeof(info->fw_version),
1618 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1619 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1620 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1621 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1622 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1623 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1624 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1625 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1626 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1627}
1628
1629static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1630{
1631 if (stringset == ETH_SS_STATS)
1632 memcpy(data, stats_strings, sizeof(stats_strings));
1633}
1634
1635/*
1636 * port stats maintained per queue of the port. They should be in the same
1637 * order as in stats_strings above.
1638 */
1639struct queue_port_stats {
1640 u64 tso;
1641 u64 tx_csum;
1642 u64 rx_csum;
1643 u64 vlan_ex;
1644 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001645 u64 gro_pkts;
1646 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001647};
1648
1649static void collect_sge_port_stats(const struct adapter *adap,
1650 const struct port_info *p, struct queue_port_stats *s)
1651{
1652 int i;
1653 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1654 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1655
1656 memset(s, 0, sizeof(*s));
1657 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1658 s->tso += tx->tso;
1659 s->tx_csum += tx->tx_cso;
1660 s->rx_csum += rx->stats.rx_cso;
1661 s->vlan_ex += rx->stats.vlan_ex;
1662 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001663 s->gro_pkts += rx->stats.lro_pkts;
1664 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001665 }
1666}
1667
1668static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1669 u64 *data)
1670{
1671 struct port_info *pi = netdev_priv(dev);
1672 struct adapter *adapter = pi->adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001673 u32 val1, val2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001674
1675 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1676
1677 data += sizeof(struct port_stats) / sizeof(u64);
1678 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001679 data += sizeof(struct queue_port_stats) / sizeof(u64);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301680 if (!is_t4(adapter->params.chip)) {
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001681 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1682 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1683 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1684 *data = val1 - val2;
1685 data++;
1686 *data = val2;
1687 data++;
1688 } else {
1689 memset(data, 0, 2 * sizeof(u64));
1690 *data += 2;
1691 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001692}
1693
1694/*
1695 * Return a version number to identify the type of adapter. The scheme is:
1696 * - bits 0..9: chip version
1697 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001698 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001699 */
1700static inline unsigned int mk_adap_vers(const struct adapter *ap)
1701{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301702 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1703 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001704}
1705
1706static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1707 unsigned int end)
1708{
1709 u32 *p = buf + start;
1710
1711 for ( ; start <= end; start += sizeof(u32))
1712 *p++ = t4_read_reg(ap, start);
1713}
1714
1715static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1716 void *buf)
1717{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001718 static const unsigned int t4_reg_ranges[] = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001719 0x1008, 0x1108,
1720 0x1180, 0x11b4,
1721 0x11fc, 0x123c,
1722 0x1300, 0x173c,
1723 0x1800, 0x18fc,
1724 0x3000, 0x30d8,
1725 0x30e0, 0x5924,
1726 0x5960, 0x59d4,
1727 0x5a00, 0x5af8,
1728 0x6000, 0x6098,
1729 0x6100, 0x6150,
1730 0x6200, 0x6208,
1731 0x6240, 0x6248,
1732 0x6280, 0x6338,
1733 0x6370, 0x638c,
1734 0x6400, 0x643c,
1735 0x6500, 0x6524,
1736 0x6a00, 0x6a38,
1737 0x6a60, 0x6a78,
1738 0x6b00, 0x6b84,
1739 0x6bf0, 0x6c84,
1740 0x6cf0, 0x6d84,
1741 0x6df0, 0x6e84,
1742 0x6ef0, 0x6f84,
1743 0x6ff0, 0x7084,
1744 0x70f0, 0x7184,
1745 0x71f0, 0x7284,
1746 0x72f0, 0x7384,
1747 0x73f0, 0x7450,
1748 0x7500, 0x7530,
1749 0x7600, 0x761c,
1750 0x7680, 0x76cc,
1751 0x7700, 0x7798,
1752 0x77c0, 0x77fc,
1753 0x7900, 0x79fc,
1754 0x7b00, 0x7c38,
1755 0x7d00, 0x7efc,
1756 0x8dc0, 0x8e1c,
1757 0x8e30, 0x8e78,
1758 0x8ea0, 0x8f6c,
1759 0x8fc0, 0x9074,
1760 0x90fc, 0x90fc,
1761 0x9400, 0x9458,
1762 0x9600, 0x96bc,
1763 0x9800, 0x9808,
1764 0x9820, 0x983c,
1765 0x9850, 0x9864,
1766 0x9c00, 0x9c6c,
1767 0x9c80, 0x9cec,
1768 0x9d00, 0x9d6c,
1769 0x9d80, 0x9dec,
1770 0x9e00, 0x9e6c,
1771 0x9e80, 0x9eec,
1772 0x9f00, 0x9f6c,
1773 0x9f80, 0x9fec,
1774 0xd004, 0xd03c,
1775 0xdfc0, 0xdfe0,
1776 0xe000, 0xea7c,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301777 0xf000, 0x11110,
1778 0x11118, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001779 0x19040, 0x1906c,
1780 0x19078, 0x19080,
1781 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001782 0x19150, 0x191b0,
1783 0x191d0, 0x191e8,
1784 0x19238, 0x1924c,
1785 0x193f8, 0x19474,
1786 0x19490, 0x194f8,
1787 0x19800, 0x19f30,
1788 0x1a000, 0x1a06c,
1789 0x1a0b0, 0x1a120,
1790 0x1a128, 0x1a138,
1791 0x1a190, 0x1a1c4,
1792 0x1a1fc, 0x1a1fc,
1793 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001794 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001795 0x1e2c0, 0x1e2c0,
1796 0x1e2e0, 0x1e2e0,
1797 0x1e300, 0x1e384,
1798 0x1e3c0, 0x1e3c8,
1799 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001800 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001801 0x1e6c0, 0x1e6c0,
1802 0x1e6e0, 0x1e6e0,
1803 0x1e700, 0x1e784,
1804 0x1e7c0, 0x1e7c8,
1805 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001806 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001807 0x1eac0, 0x1eac0,
1808 0x1eae0, 0x1eae0,
1809 0x1eb00, 0x1eb84,
1810 0x1ebc0, 0x1ebc8,
1811 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001812 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001813 0x1eec0, 0x1eec0,
1814 0x1eee0, 0x1eee0,
1815 0x1ef00, 0x1ef84,
1816 0x1efc0, 0x1efc8,
1817 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001818 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001819 0x1f2c0, 0x1f2c0,
1820 0x1f2e0, 0x1f2e0,
1821 0x1f300, 0x1f384,
1822 0x1f3c0, 0x1f3c8,
1823 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001824 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001825 0x1f6c0, 0x1f6c0,
1826 0x1f6e0, 0x1f6e0,
1827 0x1f700, 0x1f784,
1828 0x1f7c0, 0x1f7c8,
1829 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001830 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001831 0x1fac0, 0x1fac0,
1832 0x1fae0, 0x1fae0,
1833 0x1fb00, 0x1fb84,
1834 0x1fbc0, 0x1fbc8,
1835 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001836 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001837 0x1fec0, 0x1fec0,
1838 0x1fee0, 0x1fee0,
1839 0x1ff00, 0x1ff84,
1840 0x1ffc0, 0x1ffc8,
1841 0x20000, 0x2002c,
1842 0x20100, 0x2013c,
1843 0x20190, 0x201c8,
1844 0x20200, 0x20318,
1845 0x20400, 0x20528,
1846 0x20540, 0x20614,
1847 0x21000, 0x21040,
1848 0x2104c, 0x21060,
1849 0x210c0, 0x210ec,
1850 0x21200, 0x21268,
1851 0x21270, 0x21284,
1852 0x212fc, 0x21388,
1853 0x21400, 0x21404,
1854 0x21500, 0x21518,
1855 0x2152c, 0x2153c,
1856 0x21550, 0x21554,
1857 0x21600, 0x21600,
1858 0x21608, 0x21628,
1859 0x21630, 0x2163c,
1860 0x21700, 0x2171c,
1861 0x21780, 0x2178c,
1862 0x21800, 0x21c38,
1863 0x21c80, 0x21d7c,
1864 0x21e00, 0x21e04,
1865 0x22000, 0x2202c,
1866 0x22100, 0x2213c,
1867 0x22190, 0x221c8,
1868 0x22200, 0x22318,
1869 0x22400, 0x22528,
1870 0x22540, 0x22614,
1871 0x23000, 0x23040,
1872 0x2304c, 0x23060,
1873 0x230c0, 0x230ec,
1874 0x23200, 0x23268,
1875 0x23270, 0x23284,
1876 0x232fc, 0x23388,
1877 0x23400, 0x23404,
1878 0x23500, 0x23518,
1879 0x2352c, 0x2353c,
1880 0x23550, 0x23554,
1881 0x23600, 0x23600,
1882 0x23608, 0x23628,
1883 0x23630, 0x2363c,
1884 0x23700, 0x2371c,
1885 0x23780, 0x2378c,
1886 0x23800, 0x23c38,
1887 0x23c80, 0x23d7c,
1888 0x23e00, 0x23e04,
1889 0x24000, 0x2402c,
1890 0x24100, 0x2413c,
1891 0x24190, 0x241c8,
1892 0x24200, 0x24318,
1893 0x24400, 0x24528,
1894 0x24540, 0x24614,
1895 0x25000, 0x25040,
1896 0x2504c, 0x25060,
1897 0x250c0, 0x250ec,
1898 0x25200, 0x25268,
1899 0x25270, 0x25284,
1900 0x252fc, 0x25388,
1901 0x25400, 0x25404,
1902 0x25500, 0x25518,
1903 0x2552c, 0x2553c,
1904 0x25550, 0x25554,
1905 0x25600, 0x25600,
1906 0x25608, 0x25628,
1907 0x25630, 0x2563c,
1908 0x25700, 0x2571c,
1909 0x25780, 0x2578c,
1910 0x25800, 0x25c38,
1911 0x25c80, 0x25d7c,
1912 0x25e00, 0x25e04,
1913 0x26000, 0x2602c,
1914 0x26100, 0x2613c,
1915 0x26190, 0x261c8,
1916 0x26200, 0x26318,
1917 0x26400, 0x26528,
1918 0x26540, 0x26614,
1919 0x27000, 0x27040,
1920 0x2704c, 0x27060,
1921 0x270c0, 0x270ec,
1922 0x27200, 0x27268,
1923 0x27270, 0x27284,
1924 0x272fc, 0x27388,
1925 0x27400, 0x27404,
1926 0x27500, 0x27518,
1927 0x2752c, 0x2753c,
1928 0x27550, 0x27554,
1929 0x27600, 0x27600,
1930 0x27608, 0x27628,
1931 0x27630, 0x2763c,
1932 0x27700, 0x2771c,
1933 0x27780, 0x2778c,
1934 0x27800, 0x27c38,
1935 0x27c80, 0x27d7c,
1936 0x27e00, 0x27e04
1937 };
1938
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001939 static const unsigned int t5_reg_ranges[] = {
1940 0x1008, 0x1148,
1941 0x1180, 0x11b4,
1942 0x11fc, 0x123c,
1943 0x1280, 0x173c,
1944 0x1800, 0x18fc,
1945 0x3000, 0x3028,
1946 0x3060, 0x30d8,
1947 0x30e0, 0x30fc,
1948 0x3140, 0x357c,
1949 0x35a8, 0x35cc,
1950 0x35ec, 0x35ec,
1951 0x3600, 0x5624,
1952 0x56cc, 0x575c,
1953 0x580c, 0x5814,
1954 0x5890, 0x58bc,
1955 0x5940, 0x59dc,
1956 0x59fc, 0x5a18,
1957 0x5a60, 0x5a9c,
1958 0x5b9c, 0x5bfc,
1959 0x6000, 0x6040,
1960 0x6058, 0x614c,
1961 0x7700, 0x7798,
1962 0x77c0, 0x78fc,
1963 0x7b00, 0x7c54,
1964 0x7d00, 0x7efc,
1965 0x8dc0, 0x8de0,
1966 0x8df8, 0x8e84,
1967 0x8ea0, 0x8f84,
1968 0x8fc0, 0x90f8,
1969 0x9400, 0x9470,
1970 0x9600, 0x96f4,
1971 0x9800, 0x9808,
1972 0x9820, 0x983c,
1973 0x9850, 0x9864,
1974 0x9c00, 0x9c6c,
1975 0x9c80, 0x9cec,
1976 0x9d00, 0x9d6c,
1977 0x9d80, 0x9dec,
1978 0x9e00, 0x9e6c,
1979 0x9e80, 0x9eec,
1980 0x9f00, 0x9f6c,
1981 0x9f80, 0xa020,
1982 0xd004, 0xd03c,
1983 0xdfc0, 0xdfe0,
1984 0xe000, 0x11088,
Hariprasad Shenai3d9103f2014-09-01 19:54:59 +05301985 0x1109c, 0x11110,
1986 0x11118, 0x1117c,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001987 0x11190, 0x11204,
1988 0x19040, 0x1906c,
1989 0x19078, 0x19080,
1990 0x1908c, 0x19124,
1991 0x19150, 0x191b0,
1992 0x191d0, 0x191e8,
1993 0x19238, 0x19290,
1994 0x193f8, 0x19474,
1995 0x19490, 0x194cc,
1996 0x194f0, 0x194f8,
1997 0x19c00, 0x19c60,
1998 0x19c94, 0x19e10,
1999 0x19e50, 0x19f34,
2000 0x19f40, 0x19f50,
2001 0x19f90, 0x19fe4,
2002 0x1a000, 0x1a06c,
2003 0x1a0b0, 0x1a120,
2004 0x1a128, 0x1a138,
2005 0x1a190, 0x1a1c4,
2006 0x1a1fc, 0x1a1fc,
2007 0x1e008, 0x1e00c,
2008 0x1e040, 0x1e04c,
2009 0x1e284, 0x1e290,
2010 0x1e2c0, 0x1e2c0,
2011 0x1e2e0, 0x1e2e0,
2012 0x1e300, 0x1e384,
2013 0x1e3c0, 0x1e3c8,
2014 0x1e408, 0x1e40c,
2015 0x1e440, 0x1e44c,
2016 0x1e684, 0x1e690,
2017 0x1e6c0, 0x1e6c0,
2018 0x1e6e0, 0x1e6e0,
2019 0x1e700, 0x1e784,
2020 0x1e7c0, 0x1e7c8,
2021 0x1e808, 0x1e80c,
2022 0x1e840, 0x1e84c,
2023 0x1ea84, 0x1ea90,
2024 0x1eac0, 0x1eac0,
2025 0x1eae0, 0x1eae0,
2026 0x1eb00, 0x1eb84,
2027 0x1ebc0, 0x1ebc8,
2028 0x1ec08, 0x1ec0c,
2029 0x1ec40, 0x1ec4c,
2030 0x1ee84, 0x1ee90,
2031 0x1eec0, 0x1eec0,
2032 0x1eee0, 0x1eee0,
2033 0x1ef00, 0x1ef84,
2034 0x1efc0, 0x1efc8,
2035 0x1f008, 0x1f00c,
2036 0x1f040, 0x1f04c,
2037 0x1f284, 0x1f290,
2038 0x1f2c0, 0x1f2c0,
2039 0x1f2e0, 0x1f2e0,
2040 0x1f300, 0x1f384,
2041 0x1f3c0, 0x1f3c8,
2042 0x1f408, 0x1f40c,
2043 0x1f440, 0x1f44c,
2044 0x1f684, 0x1f690,
2045 0x1f6c0, 0x1f6c0,
2046 0x1f6e0, 0x1f6e0,
2047 0x1f700, 0x1f784,
2048 0x1f7c0, 0x1f7c8,
2049 0x1f808, 0x1f80c,
2050 0x1f840, 0x1f84c,
2051 0x1fa84, 0x1fa90,
2052 0x1fac0, 0x1fac0,
2053 0x1fae0, 0x1fae0,
2054 0x1fb00, 0x1fb84,
2055 0x1fbc0, 0x1fbc8,
2056 0x1fc08, 0x1fc0c,
2057 0x1fc40, 0x1fc4c,
2058 0x1fe84, 0x1fe90,
2059 0x1fec0, 0x1fec0,
2060 0x1fee0, 0x1fee0,
2061 0x1ff00, 0x1ff84,
2062 0x1ffc0, 0x1ffc8,
2063 0x30000, 0x30030,
2064 0x30100, 0x30144,
2065 0x30190, 0x301d0,
2066 0x30200, 0x30318,
2067 0x30400, 0x3052c,
2068 0x30540, 0x3061c,
2069 0x30800, 0x30834,
2070 0x308c0, 0x30908,
2071 0x30910, 0x309ac,
2072 0x30a00, 0x30a04,
2073 0x30a0c, 0x30a2c,
2074 0x30a44, 0x30a50,
2075 0x30a74, 0x30c24,
2076 0x30d08, 0x30d14,
2077 0x30d1c, 0x30d20,
2078 0x30d3c, 0x30d50,
2079 0x31200, 0x3120c,
2080 0x31220, 0x31220,
2081 0x31240, 0x31240,
2082 0x31600, 0x31600,
2083 0x31608, 0x3160c,
2084 0x31a00, 0x31a1c,
2085 0x31e04, 0x31e20,
2086 0x31e38, 0x31e3c,
2087 0x31e80, 0x31e80,
2088 0x31e88, 0x31ea8,
2089 0x31eb0, 0x31eb4,
2090 0x31ec8, 0x31ed4,
2091 0x31fb8, 0x32004,
2092 0x32208, 0x3223c,
2093 0x32600, 0x32630,
2094 0x32a00, 0x32abc,
2095 0x32b00, 0x32b70,
2096 0x33000, 0x33048,
2097 0x33060, 0x3309c,
2098 0x330f0, 0x33148,
2099 0x33160, 0x3319c,
2100 0x331f0, 0x332e4,
2101 0x332f8, 0x333e4,
2102 0x333f8, 0x33448,
2103 0x33460, 0x3349c,
2104 0x334f0, 0x33548,
2105 0x33560, 0x3359c,
2106 0x335f0, 0x336e4,
2107 0x336f8, 0x337e4,
2108 0x337f8, 0x337fc,
2109 0x33814, 0x33814,
2110 0x3382c, 0x3382c,
2111 0x33880, 0x3388c,
2112 0x338e8, 0x338ec,
2113 0x33900, 0x33948,
2114 0x33960, 0x3399c,
2115 0x339f0, 0x33ae4,
2116 0x33af8, 0x33b10,
2117 0x33b28, 0x33b28,
2118 0x33b3c, 0x33b50,
2119 0x33bf0, 0x33c10,
2120 0x33c28, 0x33c28,
2121 0x33c3c, 0x33c50,
2122 0x33cf0, 0x33cfc,
2123 0x34000, 0x34030,
2124 0x34100, 0x34144,
2125 0x34190, 0x341d0,
2126 0x34200, 0x34318,
2127 0x34400, 0x3452c,
2128 0x34540, 0x3461c,
2129 0x34800, 0x34834,
2130 0x348c0, 0x34908,
2131 0x34910, 0x349ac,
2132 0x34a00, 0x34a04,
2133 0x34a0c, 0x34a2c,
2134 0x34a44, 0x34a50,
2135 0x34a74, 0x34c24,
2136 0x34d08, 0x34d14,
2137 0x34d1c, 0x34d20,
2138 0x34d3c, 0x34d50,
2139 0x35200, 0x3520c,
2140 0x35220, 0x35220,
2141 0x35240, 0x35240,
2142 0x35600, 0x35600,
2143 0x35608, 0x3560c,
2144 0x35a00, 0x35a1c,
2145 0x35e04, 0x35e20,
2146 0x35e38, 0x35e3c,
2147 0x35e80, 0x35e80,
2148 0x35e88, 0x35ea8,
2149 0x35eb0, 0x35eb4,
2150 0x35ec8, 0x35ed4,
2151 0x35fb8, 0x36004,
2152 0x36208, 0x3623c,
2153 0x36600, 0x36630,
2154 0x36a00, 0x36abc,
2155 0x36b00, 0x36b70,
2156 0x37000, 0x37048,
2157 0x37060, 0x3709c,
2158 0x370f0, 0x37148,
2159 0x37160, 0x3719c,
2160 0x371f0, 0x372e4,
2161 0x372f8, 0x373e4,
2162 0x373f8, 0x37448,
2163 0x37460, 0x3749c,
2164 0x374f0, 0x37548,
2165 0x37560, 0x3759c,
2166 0x375f0, 0x376e4,
2167 0x376f8, 0x377e4,
2168 0x377f8, 0x377fc,
2169 0x37814, 0x37814,
2170 0x3782c, 0x3782c,
2171 0x37880, 0x3788c,
2172 0x378e8, 0x378ec,
2173 0x37900, 0x37948,
2174 0x37960, 0x3799c,
2175 0x379f0, 0x37ae4,
2176 0x37af8, 0x37b10,
2177 0x37b28, 0x37b28,
2178 0x37b3c, 0x37b50,
2179 0x37bf0, 0x37c10,
2180 0x37c28, 0x37c28,
2181 0x37c3c, 0x37c50,
2182 0x37cf0, 0x37cfc,
2183 0x38000, 0x38030,
2184 0x38100, 0x38144,
2185 0x38190, 0x381d0,
2186 0x38200, 0x38318,
2187 0x38400, 0x3852c,
2188 0x38540, 0x3861c,
2189 0x38800, 0x38834,
2190 0x388c0, 0x38908,
2191 0x38910, 0x389ac,
2192 0x38a00, 0x38a04,
2193 0x38a0c, 0x38a2c,
2194 0x38a44, 0x38a50,
2195 0x38a74, 0x38c24,
2196 0x38d08, 0x38d14,
2197 0x38d1c, 0x38d20,
2198 0x38d3c, 0x38d50,
2199 0x39200, 0x3920c,
2200 0x39220, 0x39220,
2201 0x39240, 0x39240,
2202 0x39600, 0x39600,
2203 0x39608, 0x3960c,
2204 0x39a00, 0x39a1c,
2205 0x39e04, 0x39e20,
2206 0x39e38, 0x39e3c,
2207 0x39e80, 0x39e80,
2208 0x39e88, 0x39ea8,
2209 0x39eb0, 0x39eb4,
2210 0x39ec8, 0x39ed4,
2211 0x39fb8, 0x3a004,
2212 0x3a208, 0x3a23c,
2213 0x3a600, 0x3a630,
2214 0x3aa00, 0x3aabc,
2215 0x3ab00, 0x3ab70,
2216 0x3b000, 0x3b048,
2217 0x3b060, 0x3b09c,
2218 0x3b0f0, 0x3b148,
2219 0x3b160, 0x3b19c,
2220 0x3b1f0, 0x3b2e4,
2221 0x3b2f8, 0x3b3e4,
2222 0x3b3f8, 0x3b448,
2223 0x3b460, 0x3b49c,
2224 0x3b4f0, 0x3b548,
2225 0x3b560, 0x3b59c,
2226 0x3b5f0, 0x3b6e4,
2227 0x3b6f8, 0x3b7e4,
2228 0x3b7f8, 0x3b7fc,
2229 0x3b814, 0x3b814,
2230 0x3b82c, 0x3b82c,
2231 0x3b880, 0x3b88c,
2232 0x3b8e8, 0x3b8ec,
2233 0x3b900, 0x3b948,
2234 0x3b960, 0x3b99c,
2235 0x3b9f0, 0x3bae4,
2236 0x3baf8, 0x3bb10,
2237 0x3bb28, 0x3bb28,
2238 0x3bb3c, 0x3bb50,
2239 0x3bbf0, 0x3bc10,
2240 0x3bc28, 0x3bc28,
2241 0x3bc3c, 0x3bc50,
2242 0x3bcf0, 0x3bcfc,
2243 0x3c000, 0x3c030,
2244 0x3c100, 0x3c144,
2245 0x3c190, 0x3c1d0,
2246 0x3c200, 0x3c318,
2247 0x3c400, 0x3c52c,
2248 0x3c540, 0x3c61c,
2249 0x3c800, 0x3c834,
2250 0x3c8c0, 0x3c908,
2251 0x3c910, 0x3c9ac,
2252 0x3ca00, 0x3ca04,
2253 0x3ca0c, 0x3ca2c,
2254 0x3ca44, 0x3ca50,
2255 0x3ca74, 0x3cc24,
2256 0x3cd08, 0x3cd14,
2257 0x3cd1c, 0x3cd20,
2258 0x3cd3c, 0x3cd50,
2259 0x3d200, 0x3d20c,
2260 0x3d220, 0x3d220,
2261 0x3d240, 0x3d240,
2262 0x3d600, 0x3d600,
2263 0x3d608, 0x3d60c,
2264 0x3da00, 0x3da1c,
2265 0x3de04, 0x3de20,
2266 0x3de38, 0x3de3c,
2267 0x3de80, 0x3de80,
2268 0x3de88, 0x3dea8,
2269 0x3deb0, 0x3deb4,
2270 0x3dec8, 0x3ded4,
2271 0x3dfb8, 0x3e004,
2272 0x3e208, 0x3e23c,
2273 0x3e600, 0x3e630,
2274 0x3ea00, 0x3eabc,
2275 0x3eb00, 0x3eb70,
2276 0x3f000, 0x3f048,
2277 0x3f060, 0x3f09c,
2278 0x3f0f0, 0x3f148,
2279 0x3f160, 0x3f19c,
2280 0x3f1f0, 0x3f2e4,
2281 0x3f2f8, 0x3f3e4,
2282 0x3f3f8, 0x3f448,
2283 0x3f460, 0x3f49c,
2284 0x3f4f0, 0x3f548,
2285 0x3f560, 0x3f59c,
2286 0x3f5f0, 0x3f6e4,
2287 0x3f6f8, 0x3f7e4,
2288 0x3f7f8, 0x3f7fc,
2289 0x3f814, 0x3f814,
2290 0x3f82c, 0x3f82c,
2291 0x3f880, 0x3f88c,
2292 0x3f8e8, 0x3f8ec,
2293 0x3f900, 0x3f948,
2294 0x3f960, 0x3f99c,
2295 0x3f9f0, 0x3fae4,
2296 0x3faf8, 0x3fb10,
2297 0x3fb28, 0x3fb28,
2298 0x3fb3c, 0x3fb50,
2299 0x3fbf0, 0x3fc10,
2300 0x3fc28, 0x3fc28,
2301 0x3fc3c, 0x3fc50,
2302 0x3fcf0, 0x3fcfc,
2303 0x40000, 0x4000c,
2304 0x40040, 0x40068,
2305 0x40080, 0x40144,
2306 0x40180, 0x4018c,
2307 0x40200, 0x40298,
2308 0x402ac, 0x4033c,
2309 0x403f8, 0x403fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302310 0x41304, 0x413c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002311 0x41400, 0x4141c,
2312 0x41480, 0x414d0,
2313 0x44000, 0x44078,
2314 0x440c0, 0x44278,
2315 0x442c0, 0x44478,
2316 0x444c0, 0x44678,
2317 0x446c0, 0x44878,
2318 0x448c0, 0x449fc,
2319 0x45000, 0x45068,
2320 0x45080, 0x45084,
2321 0x450a0, 0x450b0,
2322 0x45200, 0x45268,
2323 0x45280, 0x45284,
2324 0x452a0, 0x452b0,
2325 0x460c0, 0x460e4,
2326 0x47000, 0x4708c,
2327 0x47200, 0x47250,
2328 0x47400, 0x47420,
2329 0x47600, 0x47618,
2330 0x47800, 0x47814,
2331 0x48000, 0x4800c,
2332 0x48040, 0x48068,
2333 0x48080, 0x48144,
2334 0x48180, 0x4818c,
2335 0x48200, 0x48298,
2336 0x482ac, 0x4833c,
2337 0x483f8, 0x483fc,
Kumar Sanghvic1f49e32014-02-18 17:56:13 +05302338 0x49304, 0x493c4,
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002339 0x49400, 0x4941c,
2340 0x49480, 0x494d0,
2341 0x4c000, 0x4c078,
2342 0x4c0c0, 0x4c278,
2343 0x4c2c0, 0x4c478,
2344 0x4c4c0, 0x4c678,
2345 0x4c6c0, 0x4c878,
2346 0x4c8c0, 0x4c9fc,
2347 0x4d000, 0x4d068,
2348 0x4d080, 0x4d084,
2349 0x4d0a0, 0x4d0b0,
2350 0x4d200, 0x4d268,
2351 0x4d280, 0x4d284,
2352 0x4d2a0, 0x4d2b0,
2353 0x4e0c0, 0x4e0e4,
2354 0x4f000, 0x4f08c,
2355 0x4f200, 0x4f250,
2356 0x4f400, 0x4f420,
2357 0x4f600, 0x4f618,
2358 0x4f800, 0x4f814,
2359 0x50000, 0x500cc,
2360 0x50400, 0x50400,
2361 0x50800, 0x508cc,
2362 0x50c00, 0x50c00,
2363 0x51000, 0x5101c,
2364 0x51300, 0x51308,
2365 };
2366
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002367 int i;
2368 struct adapter *ap = netdev2adap(dev);
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002369 static const unsigned int *reg_ranges;
2370 int arr_size = 0, buf_size = 0;
2371
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302372 if (is_t4(ap->params.chip)) {
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002373 reg_ranges = &t4_reg_ranges[0];
2374 arr_size = ARRAY_SIZE(t4_reg_ranges);
2375 buf_size = T4_REGMAP_SIZE;
2376 } else {
2377 reg_ranges = &t5_reg_ranges[0];
2378 arr_size = ARRAY_SIZE(t5_reg_ranges);
2379 buf_size = T5_REGMAP_SIZE;
2380 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002381
2382 regs->version = mk_adap_vers(ap);
2383
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002384 memset(buf, 0, buf_size);
2385 for (i = 0; i < arr_size; i += 2)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002386 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2387}
2388
2389static int restart_autoneg(struct net_device *dev)
2390{
2391 struct port_info *p = netdev_priv(dev);
2392
2393 if (!netif_running(dev))
2394 return -EAGAIN;
2395 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2396 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002397 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002398 return 0;
2399}
2400
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002401static int identify_port(struct net_device *dev,
2402 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002403{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002404 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002405 struct adapter *adap = netdev2adap(dev);
2406
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002407 if (state == ETHTOOL_ID_ACTIVE)
2408 val = 0xffff;
2409 else if (state == ETHTOOL_ID_INACTIVE)
2410 val = 0;
2411 else
2412 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002413
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002414 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002415}
2416
2417static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2418{
2419 unsigned int v = 0;
2420
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002421 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2422 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002423 v |= SUPPORTED_TP;
2424 if (caps & FW_PORT_CAP_SPEED_100M)
2425 v |= SUPPORTED_100baseT_Full;
2426 if (caps & FW_PORT_CAP_SPEED_1G)
2427 v |= SUPPORTED_1000baseT_Full;
2428 if (caps & FW_PORT_CAP_SPEED_10G)
2429 v |= SUPPORTED_10000baseT_Full;
2430 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2431 v |= SUPPORTED_Backplane;
2432 if (caps & FW_PORT_CAP_SPEED_1G)
2433 v |= SUPPORTED_1000baseKX_Full;
2434 if (caps & FW_PORT_CAP_SPEED_10G)
2435 v |= SUPPORTED_10000baseKX4_Full;
2436 } else if (type == FW_PORT_TYPE_KR)
2437 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002438 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00002439 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2440 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2441 else if (type == FW_PORT_TYPE_BP4_AP)
2442 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2443 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2444 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002445 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2446 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002447 v |= SUPPORTED_FIBRE;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302448 else if (type == FW_PORT_TYPE_BP40_BA)
2449 v |= SUPPORTED_40000baseSR4_Full;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002450
2451 if (caps & FW_PORT_CAP_ANEG)
2452 v |= SUPPORTED_Autoneg;
2453 return v;
2454}
2455
2456static unsigned int to_fw_linkcaps(unsigned int caps)
2457{
2458 unsigned int v = 0;
2459
2460 if (caps & ADVERTISED_100baseT_Full)
2461 v |= FW_PORT_CAP_SPEED_100M;
2462 if (caps & ADVERTISED_1000baseT_Full)
2463 v |= FW_PORT_CAP_SPEED_1G;
2464 if (caps & ADVERTISED_10000baseT_Full)
2465 v |= FW_PORT_CAP_SPEED_10G;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302466 if (caps & ADVERTISED_40000baseSR4_Full)
2467 v |= FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002468 return v;
2469}
2470
2471static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2472{
2473 const struct port_info *p = netdev_priv(dev);
2474
2475 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002476 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002477 p->port_type == FW_PORT_TYPE_BT_XAUI)
2478 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002479 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2480 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002481 cmd->port = PORT_FIBRE;
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302482 else if (p->port_type == FW_PORT_TYPE_SFP ||
2483 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2484 p->port_type == FW_PORT_TYPE_QSFP) {
2485 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2486 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2487 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2488 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2489 cmd->port = PORT_FIBRE;
2490 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2491 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002492 cmd->port = PORT_DA;
2493 else
Hariprasad Shenai3e00a502014-05-07 18:01:02 +05302494 cmd->port = PORT_OTHER;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002495 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002496 cmd->port = PORT_OTHER;
2497
2498 if (p->mdio_addr >= 0) {
2499 cmd->phy_address = p->mdio_addr;
2500 cmd->transceiver = XCVR_EXTERNAL;
2501 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2502 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2503 } else {
2504 cmd->phy_address = 0; /* not really, but no better option */
2505 cmd->transceiver = XCVR_INTERNAL;
2506 cmd->mdio_support = 0;
2507 }
2508
2509 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2510 cmd->advertising = from_fw_linkcaps(p->port_type,
2511 p->link_cfg.advertising);
David Decotigny70739492011-04-27 18:32:40 +00002512 ethtool_cmd_speed_set(cmd,
2513 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002514 cmd->duplex = DUPLEX_FULL;
2515 cmd->autoneg = p->link_cfg.autoneg;
2516 cmd->maxtxpkt = 0;
2517 cmd->maxrxpkt = 0;
2518 return 0;
2519}
2520
2521static unsigned int speed_to_caps(int speed)
2522{
Ben Hutchingse8b39012014-02-23 00:03:24 +00002523 if (speed == 100)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002524 return FW_PORT_CAP_SPEED_100M;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002525 if (speed == 1000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002526 return FW_PORT_CAP_SPEED_1G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002527 if (speed == 10000)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002528 return FW_PORT_CAP_SPEED_10G;
Ben Hutchingse8b39012014-02-23 00:03:24 +00002529 if (speed == 40000)
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302530 return FW_PORT_CAP_SPEED_40G;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002531 return 0;
2532}
2533
2534static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2535{
2536 unsigned int cap;
2537 struct port_info *p = netdev_priv(dev);
2538 struct link_config *lc = &p->link_cfg;
David Decotigny25db0332011-04-27 18:32:39 +00002539 u32 speed = ethtool_cmd_speed(cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002540
2541 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2542 return -EINVAL;
2543
2544 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2545 /*
2546 * PHY offers a single speed. See if that's what's
2547 * being requested.
2548 */
2549 if (cmd->autoneg == AUTONEG_DISABLE &&
David Decotigny25db0332011-04-27 18:32:39 +00002550 (lc->supported & speed_to_caps(speed)))
2551 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002552 return -EINVAL;
2553 }
2554
2555 if (cmd->autoneg == AUTONEG_DISABLE) {
David Decotigny25db0332011-04-27 18:32:39 +00002556 cap = speed_to_caps(speed);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002557
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302558 if (!(lc->supported & cap) ||
Ben Hutchingse8b39012014-02-23 00:03:24 +00002559 (speed == 1000) ||
2560 (speed == 10000) ||
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302561 (speed == 40000))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002562 return -EINVAL;
2563 lc->requested_speed = cap;
2564 lc->advertising = 0;
2565 } else {
2566 cap = to_fw_linkcaps(cmd->advertising);
2567 if (!(lc->supported & cap))
2568 return -EINVAL;
2569 lc->requested_speed = 0;
2570 lc->advertising = cap | FW_PORT_CAP_ANEG;
2571 }
2572 lc->autoneg = cmd->autoneg;
2573
2574 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002575 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2576 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002577 return 0;
2578}
2579
2580static void get_pauseparam(struct net_device *dev,
2581 struct ethtool_pauseparam *epause)
2582{
2583 struct port_info *p = netdev_priv(dev);
2584
2585 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2586 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2587 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2588}
2589
2590static int set_pauseparam(struct net_device *dev,
2591 struct ethtool_pauseparam *epause)
2592{
2593 struct port_info *p = netdev_priv(dev);
2594 struct link_config *lc = &p->link_cfg;
2595
2596 if (epause->autoneg == AUTONEG_DISABLE)
2597 lc->requested_fc = 0;
2598 else if (lc->supported & FW_PORT_CAP_ANEG)
2599 lc->requested_fc = PAUSE_AUTONEG;
2600 else
2601 return -EINVAL;
2602
2603 if (epause->rx_pause)
2604 lc->requested_fc |= PAUSE_RX;
2605 if (epause->tx_pause)
2606 lc->requested_fc |= PAUSE_TX;
2607 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002608 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2609 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002610 return 0;
2611}
2612
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002613static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2614{
2615 const struct port_info *pi = netdev_priv(dev);
2616 const struct sge *s = &pi->adapter->sge;
2617
2618 e->rx_max_pending = MAX_RX_BUFFERS;
2619 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2620 e->rx_jumbo_max_pending = 0;
2621 e->tx_max_pending = MAX_TXQ_ENTRIES;
2622
2623 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2624 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2625 e->rx_jumbo_pending = 0;
2626 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2627}
2628
2629static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2630{
2631 int i;
2632 const struct port_info *pi = netdev_priv(dev);
2633 struct adapter *adapter = pi->adapter;
2634 struct sge *s = &adapter->sge;
2635
2636 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2637 e->tx_pending > MAX_TXQ_ENTRIES ||
2638 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2639 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2640 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2641 return -EINVAL;
2642
2643 if (adapter->flags & FULL_INIT_DONE)
2644 return -EBUSY;
2645
2646 for (i = 0; i < pi->nqsets; ++i) {
2647 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2648 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2649 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2650 }
2651 return 0;
2652}
2653
2654static int closest_timer(const struct sge *s, int time)
2655{
2656 int i, delta, match = 0, min_delta = INT_MAX;
2657
2658 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2659 delta = time - s->timer_val[i];
2660 if (delta < 0)
2661 delta = -delta;
2662 if (delta < min_delta) {
2663 min_delta = delta;
2664 match = i;
2665 }
2666 }
2667 return match;
2668}
2669
2670static int closest_thres(const struct sge *s, int thres)
2671{
2672 int i, delta, match = 0, min_delta = INT_MAX;
2673
2674 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2675 delta = thres - s->counter_val[i];
2676 if (delta < 0)
2677 delta = -delta;
2678 if (delta < min_delta) {
2679 min_delta = delta;
2680 match = i;
2681 }
2682 }
2683 return match;
2684}
2685
2686/*
2687 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2688 */
2689static unsigned int qtimer_val(const struct adapter *adap,
2690 const struct sge_rspq *q)
2691{
2692 unsigned int idx = q->intr_params >> 1;
2693
2694 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2695}
2696
2697/**
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302698 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002699 * @q: the Rx queue
2700 * @us: the hold-off time in us, or 0 to disable timer
2701 * @cnt: the hold-off packet count, or 0 to disable counter
2702 *
2703 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2704 * one of the two needs to be enabled for the queue to generate interrupts.
2705 */
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302706static int set_rspq_intr_params(struct sge_rspq *q,
2707 unsigned int us, unsigned int cnt)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002708{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302709 struct adapter *adap = q->adap;
2710
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002711 if ((us | cnt) == 0)
2712 cnt = 1;
2713
2714 if (cnt) {
2715 int err;
2716 u32 v, new_idx;
2717
2718 new_idx = closest_thres(&adap->sge, cnt);
2719 if (q->desc && q->pktcnt_idx != new_idx) {
2720 /* the queue has already been created, update it */
2721 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2722 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2723 FW_PARAMS_PARAM_YZ(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002724 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2725 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002726 if (err)
2727 return err;
2728 }
2729 q->pktcnt_idx = new_idx;
2730 }
2731
2732 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2733 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2734 return 0;
2735}
2736
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302737/**
2738 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2739 * @dev: the network device
2740 * @us: the hold-off time in us, or 0 to disable timer
2741 * @cnt: the hold-off packet count, or 0 to disable counter
2742 *
2743 * Set the RX interrupt hold-off parameters for a network device.
2744 */
2745static int set_rx_intr_params(struct net_device *dev,
2746 unsigned int us, unsigned int cnt)
2747{
2748 int i, err;
2749 struct port_info *pi = netdev_priv(dev);
2750 struct adapter *adap = pi->adapter;
2751 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2752
2753 for (i = 0; i < pi->nqsets; i++, q++) {
2754 err = set_rspq_intr_params(&q->rspq, us, cnt);
2755 if (err)
2756 return err;
2757 }
2758 return 0;
2759}
2760
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302761static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2762{
2763 int i;
2764 struct port_info *pi = netdev_priv(dev);
2765 struct adapter *adap = pi->adapter;
2766 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2767
2768 for (i = 0; i < pi->nqsets; i++, q++)
2769 q->rspq.adaptive_rx = adaptive_rx;
2770
2771 return 0;
2772}
2773
2774static int get_adaptive_rx_setting(struct net_device *dev)
2775{
2776 struct port_info *pi = netdev_priv(dev);
2777 struct adapter *adap = pi->adapter;
2778 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2779
2780 return q->rspq.adaptive_rx;
2781}
2782
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002783static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2784{
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302785 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05302786 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2787 c->rx_max_coalesced_frames);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002788}
2789
2790static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2791{
2792 const struct port_info *pi = netdev_priv(dev);
2793 const struct adapter *adap = pi->adapter;
2794 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2795
2796 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2797 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2798 adap->sge.counter_val[rq->pktcnt_idx] : 0;
Hariprasad Shenaie553ec32014-09-26 00:23:55 +05302799 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002800 return 0;
2801}
2802
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002803/**
2804 * eeprom_ptov - translate a physical EEPROM address to virtual
2805 * @phys_addr: the physical EEPROM address
2806 * @fn: the PCI function number
2807 * @sz: size of function-specific area
2808 *
2809 * Translate a physical EEPROM address to virtual. The first 1K is
2810 * accessed through virtual addresses starting at 31K, the rest is
2811 * accessed through virtual addresses starting at 0.
2812 *
2813 * The mapping is as follows:
2814 * [0..1K) -> [31K..32K)
2815 * [1K..1K+A) -> [31K-A..31K)
2816 * [1K+A..ES) -> [0..ES-A-1K)
2817 *
2818 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002819 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002820static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002821{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002822 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002823 if (phys_addr < 1024)
2824 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002825 if (phys_addr < 1024 + fn)
2826 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002827 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002828 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002829 return -EINVAL;
2830}
2831
2832/*
2833 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002834 */
2835static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2836{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002837 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002838
2839 if (vaddr >= 0)
2840 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2841 return vaddr < 0 ? vaddr : 0;
2842}
2843
2844static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2845{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002846 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002847
2848 if (vaddr >= 0)
2849 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2850 return vaddr < 0 ? vaddr : 0;
2851}
2852
2853#define EEPROM_MAGIC 0x38E2F10C
2854
2855static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2856 u8 *data)
2857{
2858 int i, err = 0;
2859 struct adapter *adapter = netdev2adap(dev);
2860
2861 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2862 if (!buf)
2863 return -ENOMEM;
2864
2865 e->magic = EEPROM_MAGIC;
2866 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2867 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2868
2869 if (!err)
2870 memcpy(data, buf + e->offset, e->len);
2871 kfree(buf);
2872 return err;
2873}
2874
2875static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2876 u8 *data)
2877{
2878 u8 *buf;
2879 int err = 0;
2880 u32 aligned_offset, aligned_len, *p;
2881 struct adapter *adapter = netdev2adap(dev);
2882
2883 if (eeprom->magic != EEPROM_MAGIC)
2884 return -EINVAL;
2885
2886 aligned_offset = eeprom->offset & ~3;
2887 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2888
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002889 if (adapter->fn > 0) {
2890 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2891
2892 if (aligned_offset < start ||
2893 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2894 return -EPERM;
2895 }
2896
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002897 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2898 /*
2899 * RMW possibly needed for first or last words.
2900 */
2901 buf = kmalloc(aligned_len, GFP_KERNEL);
2902 if (!buf)
2903 return -ENOMEM;
2904 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2905 if (!err && aligned_len > 4)
2906 err = eeprom_rd_phys(adapter,
2907 aligned_offset + aligned_len - 4,
2908 (u32 *)&buf[aligned_len - 4]);
2909 if (err)
2910 goto out;
2911 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2912 } else
2913 buf = data;
2914
2915 err = t4_seeprom_wp(adapter, false);
2916 if (err)
2917 goto out;
2918
2919 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2920 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2921 aligned_offset += 4;
2922 }
2923
2924 if (!err)
2925 err = t4_seeprom_wp(adapter, true);
2926out:
2927 if (buf != data)
2928 kfree(buf);
2929 return err;
2930}
2931
2932static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2933{
2934 int ret;
2935 const struct firmware *fw;
2936 struct adapter *adap = netdev2adap(netdev);
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302937 unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002938
2939 ef->data[sizeof(ef->data) - 1] = '\0';
2940 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2941 if (ret < 0)
2942 return ret;
2943
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302944 /* If the adapter has been fully initialized then we'll go ahead and
2945 * try to get the firmware's cooperation in upgrading to the new
2946 * firmware image otherwise we'll try to do the entire job from the
2947 * host ... and we always "force" the operation in this path.
2948 */
2949 if (adap->flags & FULL_INIT_DONE)
2950 mbox = adap->mbox;
2951
2952 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002953 release_firmware(fw);
2954 if (!ret)
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05302955 dev_info(adap->pdev_dev, "loaded firmware %s,"
2956 " reload cxgb4 driver\n", ef->data);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002957 return ret;
2958}
2959
2960#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2961#define BCAST_CRC 0xa0ccc1a6
2962
2963static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2964{
2965 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2966 wol->wolopts = netdev2adap(dev)->wol;
2967 memset(&wol->sopass, 0, sizeof(wol->sopass));
2968}
2969
2970static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2971{
2972 int err = 0;
2973 struct port_info *pi = netdev_priv(dev);
2974
2975 if (wol->wolopts & ~WOL_SUPPORTED)
2976 return -EINVAL;
2977 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2978 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2979 if (wol->wolopts & WAKE_BCAST) {
2980 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2981 ~0ULL, 0, false);
2982 if (!err)
2983 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2984 ~6ULL, ~0ULL, BCAST_CRC, true);
2985 } else
2986 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2987 return err;
2988}
2989
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002990static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002991{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002992 const struct port_info *pi = netdev_priv(dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002993 netdev_features_t changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002994 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002995
Patrick McHardyf6469682013-04-19 02:04:27 +00002996 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002997 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002998
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002999 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
3000 -1, -1, -1,
Patrick McHardyf6469682013-04-19 02:04:27 +00003001 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00003002 if (unlikely(err))
Patrick McHardyf6469682013-04-19 02:04:27 +00003003 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00003004 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07003005}
3006
Ben Hutchings7850f632011-12-15 13:55:01 +00003007static u32 get_rss_table_size(struct net_device *dev)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003008{
3009 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003010
Ben Hutchings7850f632011-12-15 13:55:01 +00003011 return pi->rss_size;
3012}
3013
Ben Hutchingsfe62d002014-05-15 01:25:27 +01003014static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
Ben Hutchings7850f632011-12-15 13:55:01 +00003015{
3016 const struct port_info *pi = netdev_priv(dev);
3017 unsigned int n = pi->rss_size;
3018
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003019 while (n--)
Ben Hutchings7850f632011-12-15 13:55:01 +00003020 p[n] = pi->rss[n];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003021 return 0;
3022}
3023
Ben Hutchingsfe62d002014-05-15 01:25:27 +01003024static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003025{
3026 unsigned int i;
3027 struct port_info *pi = netdev_priv(dev);
3028
Ben Hutchings7850f632011-12-15 13:55:01 +00003029 for (i = 0; i < pi->rss_size; i++)
3030 pi->rss[i] = p[i];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003031 if (pi->adapter->flags & FULL_INIT_DONE)
3032 return write_rss(pi, pi->rss);
3033 return 0;
3034}
3035
3036static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
Ben Hutchings815c7db2011-09-06 13:49:12 +00003037 u32 *rules)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003038{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003039 const struct port_info *pi = netdev_priv(dev);
3040
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003041 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003042 case ETHTOOL_GRXFH: {
3043 unsigned int v = pi->rss_mode;
3044
3045 info->data = 0;
3046 switch (info->flow_type) {
3047 case TCP_V4_FLOW:
3048 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3049 info->data = RXH_IP_SRC | RXH_IP_DST |
3050 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3051 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3052 info->data = RXH_IP_SRC | RXH_IP_DST;
3053 break;
3054 case UDP_V4_FLOW:
3055 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3056 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3057 info->data = RXH_IP_SRC | RXH_IP_DST |
3058 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3059 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3060 info->data = RXH_IP_SRC | RXH_IP_DST;
3061 break;
3062 case SCTP_V4_FLOW:
3063 case AH_ESP_V4_FLOW:
3064 case IPV4_FLOW:
3065 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3066 info->data = RXH_IP_SRC | RXH_IP_DST;
3067 break;
3068 case TCP_V6_FLOW:
3069 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3070 info->data = RXH_IP_SRC | RXH_IP_DST |
3071 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3072 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3073 info->data = RXH_IP_SRC | RXH_IP_DST;
3074 break;
3075 case UDP_V6_FLOW:
3076 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3077 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3078 info->data = RXH_IP_SRC | RXH_IP_DST |
3079 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3080 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3081 info->data = RXH_IP_SRC | RXH_IP_DST;
3082 break;
3083 case SCTP_V6_FLOW:
3084 case AH_ESP_V6_FLOW:
3085 case IPV6_FLOW:
3086 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3087 info->data = RXH_IP_SRC | RXH_IP_DST;
3088 break;
3089 }
3090 return 0;
3091 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003092 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003093 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003094 return 0;
3095 }
3096 return -EOPNOTSUPP;
3097}
3098
stephen hemminger9b07be42012-01-04 12:59:49 +00003099static const struct ethtool_ops cxgb_ethtool_ops = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003100 .get_settings = get_settings,
3101 .set_settings = set_settings,
3102 .get_drvinfo = get_drvinfo,
3103 .get_msglevel = get_msglevel,
3104 .set_msglevel = set_msglevel,
3105 .get_ringparam = get_sge_param,
3106 .set_ringparam = set_sge_param,
3107 .get_coalesce = get_coalesce,
3108 .set_coalesce = set_coalesce,
3109 .get_eeprom_len = get_eeprom_len,
3110 .get_eeprom = get_eeprom,
3111 .set_eeprom = set_eeprom,
3112 .get_pauseparam = get_pauseparam,
3113 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003114 .get_link = ethtool_op_get_link,
3115 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07003116 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003117 .nway_reset = restart_autoneg,
3118 .get_sset_count = get_sset_count,
3119 .get_ethtool_stats = get_stats,
3120 .get_regs_len = get_regs_len,
3121 .get_regs = get_regs,
3122 .get_wol = get_wol,
3123 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00003124 .get_rxnfc = get_rxnfc,
Ben Hutchings7850f632011-12-15 13:55:01 +00003125 .get_rxfh_indir_size = get_rss_table_size,
Ben Hutchingsfe62d002014-05-15 01:25:27 +01003126 .get_rxfh = get_rss_table,
3127 .set_rxfh = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003128 .flash_device = set_flash,
3129};
3130
Bill Pemberton91744942012-12-03 09:23:02 -05003131static int setup_debugfs(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003132{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003133 if (IS_ERR_OR_NULL(adap->debugfs_root))
3134 return -1;
3135
Hariprasad Shenaifd88b312014-11-07 09:35:23 +05303136#ifdef CONFIG_DEBUG_FS
3137 t4_setup_debugfs(adap);
3138#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003139 return 0;
3140}
3141
3142/*
3143 * upper-layer driver support
3144 */
3145
3146/*
3147 * Allocate an active-open TID and set it to the supplied value.
3148 */
3149int cxgb4_alloc_atid(struct tid_info *t, void *data)
3150{
3151 int atid = -1;
3152
3153 spin_lock_bh(&t->atid_lock);
3154 if (t->afree) {
3155 union aopen_entry *p = t->afree;
3156
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003157 atid = (p - t->atid_tab) + t->atid_base;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003158 t->afree = p->next;
3159 p->data = data;
3160 t->atids_in_use++;
3161 }
3162 spin_unlock_bh(&t->atid_lock);
3163 return atid;
3164}
3165EXPORT_SYMBOL(cxgb4_alloc_atid);
3166
3167/*
3168 * Release an active-open TID.
3169 */
3170void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3171{
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003172 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003173
3174 spin_lock_bh(&t->atid_lock);
3175 p->next = t->afree;
3176 t->afree = p;
3177 t->atids_in_use--;
3178 spin_unlock_bh(&t->atid_lock);
3179}
3180EXPORT_SYMBOL(cxgb4_free_atid);
3181
3182/*
3183 * Allocate a server TID and set it to the supplied value.
3184 */
3185int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3186{
3187 int stid;
3188
3189 spin_lock_bh(&t->stid_lock);
3190 if (family == PF_INET) {
3191 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3192 if (stid < t->nstids)
3193 __set_bit(stid, t->stid_bmap);
3194 else
3195 stid = -1;
3196 } else {
3197 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3198 if (stid < 0)
3199 stid = -1;
3200 }
3201 if (stid >= 0) {
3202 t->stid_tab[stid].data = data;
3203 stid += t->stid_base;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303204 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3205 * This is equivalent to 4 TIDs. With CLIP enabled it
3206 * needs 2 TIDs.
3207 */
3208 if (family == PF_INET)
3209 t->stids_in_use++;
3210 else
3211 t->stids_in_use += 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003212 }
3213 spin_unlock_bh(&t->stid_lock);
3214 return stid;
3215}
3216EXPORT_SYMBOL(cxgb4_alloc_stid);
3217
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003218/* Allocate a server filter TID and set it to the supplied value.
3219 */
3220int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3221{
3222 int stid;
3223
3224 spin_lock_bh(&t->stid_lock);
3225 if (family == PF_INET) {
3226 stid = find_next_zero_bit(t->stid_bmap,
3227 t->nstids + t->nsftids, t->nstids);
3228 if (stid < (t->nstids + t->nsftids))
3229 __set_bit(stid, t->stid_bmap);
3230 else
3231 stid = -1;
3232 } else {
3233 stid = -1;
3234 }
3235 if (stid >= 0) {
3236 t->stid_tab[stid].data = data;
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303237 stid -= t->nstids;
3238 stid += t->sftid_base;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003239 t->stids_in_use++;
3240 }
3241 spin_unlock_bh(&t->stid_lock);
3242 return stid;
3243}
3244EXPORT_SYMBOL(cxgb4_alloc_sftid);
3245
3246/* Release a server TID.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003247 */
3248void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3249{
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05303250 /* Is it a server filter TID? */
3251 if (t->nsftids && (stid >= t->sftid_base)) {
3252 stid -= t->sftid_base;
3253 stid += t->nstids;
3254 } else {
3255 stid -= t->stid_base;
3256 }
3257
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003258 spin_lock_bh(&t->stid_lock);
3259 if (family == PF_INET)
3260 __clear_bit(stid, t->stid_bmap);
3261 else
3262 bitmap_release_region(t->stid_bmap, stid, 2);
3263 t->stid_tab[stid].data = NULL;
Kumar Sanghvi15f63b72013-12-18 16:38:22 +05303264 if (family == PF_INET)
3265 t->stids_in_use--;
3266 else
3267 t->stids_in_use -= 4;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003268 spin_unlock_bh(&t->stid_lock);
3269}
3270EXPORT_SYMBOL(cxgb4_free_stid);
3271
3272/*
3273 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3274 */
3275static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3276 unsigned int tid)
3277{
3278 struct cpl_tid_release *req;
3279
3280 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3281 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3282 INIT_TP_WR(req, tid);
3283 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3284}
3285
3286/*
3287 * Queue a TID release request and if necessary schedule a work queue to
3288 * process it.
3289 */
stephen hemminger31b9c192010-10-18 05:39:18 +00003290static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3291 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003292{
3293 void **p = &t->tid_tab[tid];
3294 struct adapter *adap = container_of(t, struct adapter, tids);
3295
3296 spin_lock_bh(&adap->tid_release_lock);
3297 *p = adap->tid_release_head;
3298 /* Low 2 bits encode the Tx channel number */
3299 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3300 if (!adap->tid_release_task_busy) {
3301 adap->tid_release_task_busy = true;
Anish Bhatt29aaee62014-08-20 13:44:06 -07003302 queue_work(adap->workq, &adap->tid_release_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003303 }
3304 spin_unlock_bh(&adap->tid_release_lock);
3305}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003306
3307/*
3308 * Process the list of pending TID release requests.
3309 */
3310static void process_tid_release_list(struct work_struct *work)
3311{
3312 struct sk_buff *skb;
3313 struct adapter *adap;
3314
3315 adap = container_of(work, struct adapter, tid_release_task);
3316
3317 spin_lock_bh(&adap->tid_release_lock);
3318 while (adap->tid_release_head) {
3319 void **p = adap->tid_release_head;
3320 unsigned int chan = (uintptr_t)p & 3;
3321 p = (void *)p - chan;
3322
3323 adap->tid_release_head = *p;
3324 *p = NULL;
3325 spin_unlock_bh(&adap->tid_release_lock);
3326
3327 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3328 GFP_KERNEL)))
3329 schedule_timeout_uninterruptible(1);
3330
3331 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3332 t4_ofld_send(adap, skb);
3333 spin_lock_bh(&adap->tid_release_lock);
3334 }
3335 adap->tid_release_task_busy = false;
3336 spin_unlock_bh(&adap->tid_release_lock);
3337}
3338
3339/*
3340 * Release a TID and inform HW. If we are unable to allocate the release
3341 * message we defer to a work queue.
3342 */
3343void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3344{
3345 void *old;
3346 struct sk_buff *skb;
3347 struct adapter *adap = container_of(t, struct adapter, tids);
3348
3349 old = t->tid_tab[tid];
3350 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3351 if (likely(skb)) {
3352 t->tid_tab[tid] = NULL;
3353 mk_tid_release(skb, chan, tid);
3354 t4_ofld_send(adap, skb);
3355 } else
3356 cxgb4_queue_tid_release(t, chan, tid);
3357 if (old)
3358 atomic_dec(&t->tids_in_use);
3359}
3360EXPORT_SYMBOL(cxgb4_remove_tid);
3361
3362/*
3363 * Allocate and initialize the TID tables. Returns 0 on success.
3364 */
3365static int tid_init(struct tid_info *t)
3366{
3367 size_t size;
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003368 unsigned int stid_bmap_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003369 unsigned int natids = t->natids;
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303370 struct adapter *adap = container_of(t, struct adapter, tids);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003371
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003372 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003373 size = t->ntids * sizeof(*t->tid_tab) +
3374 natids * sizeof(*t->atid_tab) +
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003375 t->nstids * sizeof(*t->stid_tab) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003376 t->nsftids * sizeof(*t->stid_tab) +
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003377 stid_bmap_size * sizeof(long) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003378 t->nftids * sizeof(*t->ftid_tab) +
3379 t->nsftids * sizeof(*t->ftid_tab);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003380
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003381 t->tid_tab = t4_alloc_mem(size);
3382 if (!t->tid_tab)
3383 return -ENOMEM;
3384
3385 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3386 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003387 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003388 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003389 spin_lock_init(&t->stid_lock);
3390 spin_lock_init(&t->atid_lock);
3391
3392 t->stids_in_use = 0;
3393 t->afree = NULL;
3394 t->atids_in_use = 0;
3395 atomic_set(&t->tids_in_use, 0);
3396
3397 /* Setup the free list for atid_tab and clear the stid bitmap. */
3398 if (natids) {
3399 while (--natids)
3400 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3401 t->afree = t->atid_tab;
3402 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003403 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
Kumar Sanghvib6f8eae2013-12-18 16:38:19 +05303404 /* Reserve stid 0 for T4/T5 adapters */
3405 if (!t->stid_base &&
3406 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3407 __set_bit(0, t->stid_bmap);
3408
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003409 return 0;
3410}
3411
Anish Bhatta3e3b282014-07-17 00:18:16 -07003412int cxgb4_clip_get(const struct net_device *dev,
3413 const struct in6_addr *lip)
Vipul Pandya01bcca62013-07-04 16:10:46 +05303414{
3415 struct adapter *adap;
3416 struct fw_clip_cmd c;
3417
3418 adap = netdev2adap(dev);
3419 memset(&c, 0, sizeof(c));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05303420 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3421 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303422 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
Joe Perches12f2a472014-03-24 10:45:12 -07003423 c.ip_hi = *(__be64 *)(lip->s6_addr);
3424 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303425 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3426}
Anish Bhatta3e3b282014-07-17 00:18:16 -07003427EXPORT_SYMBOL(cxgb4_clip_get);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303428
Anish Bhatta3e3b282014-07-17 00:18:16 -07003429int cxgb4_clip_release(const struct net_device *dev,
3430 const struct in6_addr *lip)
Vipul Pandya01bcca62013-07-04 16:10:46 +05303431{
3432 struct adapter *adap;
3433 struct fw_clip_cmd c;
3434
3435 adap = netdev2adap(dev);
3436 memset(&c, 0, sizeof(c));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05303437 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3438 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303439 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
Joe Perches12f2a472014-03-24 10:45:12 -07003440 c.ip_hi = *(__be64 *)(lip->s6_addr);
3441 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303442 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3443}
Anish Bhatta3e3b282014-07-17 00:18:16 -07003444EXPORT_SYMBOL(cxgb4_clip_release);
Vipul Pandya01bcca62013-07-04 16:10:46 +05303445
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003446/**
3447 * cxgb4_create_server - create an IP server
3448 * @dev: the device
3449 * @stid: the server TID
3450 * @sip: local IP address to bind server to
3451 * @sport: the server's TCP port
3452 * @queue: queue to direct messages from this server to
3453 *
3454 * Create an IP server for the given port and address.
3455 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3456 */
3457int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00003458 __be32 sip, __be16 sport, __be16 vlan,
3459 unsigned int queue)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003460{
3461 unsigned int chan;
3462 struct sk_buff *skb;
3463 struct adapter *adap;
3464 struct cpl_pass_open_req *req;
Vipul Pandya80f40c12013-07-04 16:10:45 +05303465 int ret;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003466
3467 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3468 if (!skb)
3469 return -ENOMEM;
3470
3471 adap = netdev2adap(dev);
3472 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3473 INIT_TP_WR(req, 0);
3474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3475 req->local_port = sport;
3476 req->peer_port = htons(0);
3477 req->local_ip = sip;
3478 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003479 chan = rxq_to_chan(&adap->sge, queue);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003480 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3481 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3482 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
Vipul Pandya80f40c12013-07-04 16:10:45 +05303483 ret = t4_mgmt_tx(adap, skb);
3484 return net_xmit_eval(ret);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003485}
3486EXPORT_SYMBOL(cxgb4_create_server);
3487
Vipul Pandya80f40c12013-07-04 16:10:45 +05303488/* cxgb4_create_server6 - create an IPv6 server
3489 * @dev: the device
3490 * @stid: the server TID
3491 * @sip: local IPv6 address to bind server to
3492 * @sport: the server's TCP port
3493 * @queue: queue to direct messages from this server to
3494 *
3495 * Create an IPv6 server for the given port and address.
3496 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3497 */
3498int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3499 const struct in6_addr *sip, __be16 sport,
3500 unsigned int queue)
3501{
3502 unsigned int chan;
3503 struct sk_buff *skb;
3504 struct adapter *adap;
3505 struct cpl_pass_open_req6 *req;
3506 int ret;
3507
3508 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3509 if (!skb)
3510 return -ENOMEM;
3511
3512 adap = netdev2adap(dev);
3513 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3514 INIT_TP_WR(req, 0);
3515 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3516 req->local_port = sport;
3517 req->peer_port = htons(0);
3518 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3519 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3520 req->peer_ip_hi = cpu_to_be64(0);
3521 req->peer_ip_lo = cpu_to_be64(0);
3522 chan = rxq_to_chan(&adap->sge, queue);
3523 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3524 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3525 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3526 ret = t4_mgmt_tx(adap, skb);
3527 return net_xmit_eval(ret);
3528}
3529EXPORT_SYMBOL(cxgb4_create_server6);
3530
3531int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3532 unsigned int queue, bool ipv6)
3533{
3534 struct sk_buff *skb;
3535 struct adapter *adap;
3536 struct cpl_close_listsvr_req *req;
3537 int ret;
3538
3539 adap = netdev2adap(dev);
3540
3541 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3542 if (!skb)
3543 return -ENOMEM;
3544
3545 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3546 INIT_TP_WR(req, 0);
3547 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3548 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3549 LISTSVR_IPV6(0)) | QUEUENO(queue));
3550 ret = t4_mgmt_tx(adap, skb);
3551 return net_xmit_eval(ret);
3552}
3553EXPORT_SYMBOL(cxgb4_remove_server);
3554
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003555/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003556 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3557 * @mtus: the HW MTU table
3558 * @mtu: the target MTU
3559 * @idx: index of selected entry in the MTU table
3560 *
3561 * Returns the index and the value in the HW MTU table that is closest to
3562 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3563 * table, in which case that smallest available value is selected.
3564 */
3565unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3566 unsigned int *idx)
3567{
3568 unsigned int i = 0;
3569
3570 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3571 ++i;
3572 if (idx)
3573 *idx = i;
3574 return mtus[i];
3575}
3576EXPORT_SYMBOL(cxgb4_best_mtu);
3577
3578/**
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05303579 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3580 * @mtus: the HW MTU table
3581 * @header_size: Header Size
3582 * @data_size_max: maximum Data Segment Size
3583 * @data_size_align: desired Data Segment Size Alignment (2^N)
3584 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3585 *
3586 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3587 * MTU Table based solely on a Maximum MTU parameter, we break that
3588 * parameter up into a Header Size and Maximum Data Segment Size, and
3589 * provide a desired Data Segment Size Alignment. If we find an MTU in
3590 * the Hardware MTU Table which will result in a Data Segment Size with
3591 * the requested alignment _and_ that MTU isn't "too far" from the
3592 * closest MTU, then we'll return that rather than the closest MTU.
3593 */
3594unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3595 unsigned short header_size,
3596 unsigned short data_size_max,
3597 unsigned short data_size_align,
3598 unsigned int *mtu_idxp)
3599{
3600 unsigned short max_mtu = header_size + data_size_max;
3601 unsigned short data_size_align_mask = data_size_align - 1;
3602 int mtu_idx, aligned_mtu_idx;
3603
3604 /* Scan the MTU Table till we find an MTU which is larger than our
3605 * Maximum MTU or we reach the end of the table. Along the way,
3606 * record the last MTU found, if any, which will result in a Data
3607 * Segment Length matching the requested alignment.
3608 */
3609 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3610 unsigned short data_size = mtus[mtu_idx] - header_size;
3611
3612 /* If this MTU minus the Header Size would result in a
3613 * Data Segment Size of the desired alignment, remember it.
3614 */
3615 if ((data_size & data_size_align_mask) == 0)
3616 aligned_mtu_idx = mtu_idx;
3617
3618 /* If we're not at the end of the Hardware MTU Table and the
3619 * next element is larger than our Maximum MTU, drop out of
3620 * the loop.
3621 */
3622 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3623 break;
3624 }
3625
3626 /* If we fell out of the loop because we ran to the end of the table,
3627 * then we just have to use the last [largest] entry.
3628 */
3629 if (mtu_idx == NMTUS)
3630 mtu_idx--;
3631
3632 /* If we found an MTU which resulted in the requested Data Segment
3633 * Length alignment and that's "not far" from the largest MTU which is
3634 * less than or equal to the maximum MTU, then use that.
3635 */
3636 if (aligned_mtu_idx >= 0 &&
3637 mtu_idx - aligned_mtu_idx <= 1)
3638 mtu_idx = aligned_mtu_idx;
3639
3640 /* If the caller has passed in an MTU Index pointer, pass the
3641 * MTU Index back. Return the MTU value.
3642 */
3643 if (mtu_idxp)
3644 *mtu_idxp = mtu_idx;
3645 return mtus[mtu_idx];
3646}
3647EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3648
3649/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003650 * cxgb4_port_chan - get the HW channel of a port
3651 * @dev: the net device for the port
3652 *
3653 * Return the HW Tx channel of the given port.
3654 */
3655unsigned int cxgb4_port_chan(const struct net_device *dev)
3656{
3657 return netdev2pinfo(dev)->tx_chan;
3658}
3659EXPORT_SYMBOL(cxgb4_port_chan);
3660
Vipul Pandya881806b2012-05-18 15:29:24 +05303661unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3662{
3663 struct adapter *adap = netdev2adap(dev);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003664 u32 v1, v2, lp_count, hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303665
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003666 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3667 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303668 if (is_t4(adap->params.chip)) {
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003669 lp_count = G_LP_COUNT(v1);
3670 hp_count = G_HP_COUNT(v1);
3671 } else {
3672 lp_count = G_LP_COUNT_T5(v1);
3673 hp_count = G_HP_COUNT_T5(v2);
3674 }
3675 return lpfifo ? lp_count : hp_count;
Vipul Pandya881806b2012-05-18 15:29:24 +05303676}
3677EXPORT_SYMBOL(cxgb4_dbfifo_count);
3678
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003679/**
3680 * cxgb4_port_viid - get the VI id of a port
3681 * @dev: the net device for the port
3682 *
3683 * Return the VI id of the given port.
3684 */
3685unsigned int cxgb4_port_viid(const struct net_device *dev)
3686{
3687 return netdev2pinfo(dev)->viid;
3688}
3689EXPORT_SYMBOL(cxgb4_port_viid);
3690
3691/**
3692 * cxgb4_port_idx - get the index of a port
3693 * @dev: the net device for the port
3694 *
3695 * Return the index of the given port.
3696 */
3697unsigned int cxgb4_port_idx(const struct net_device *dev)
3698{
3699 return netdev2pinfo(dev)->port_id;
3700}
3701EXPORT_SYMBOL(cxgb4_port_idx);
3702
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003703void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3704 struct tp_tcp_stats *v6)
3705{
3706 struct adapter *adap = pci_get_drvdata(pdev);
3707
3708 spin_lock(&adap->stats_lock);
3709 t4_tp_get_tcp_stats(adap, v4, v6);
3710 spin_unlock(&adap->stats_lock);
3711}
3712EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3713
3714void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3715 const unsigned int *pgsz_order)
3716{
3717 struct adapter *adap = netdev2adap(dev);
3718
3719 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3720 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3721 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3722 HPZ3(pgsz_order[3]));
3723}
3724EXPORT_SYMBOL(cxgb4_iscsi_init);
3725
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303726int cxgb4_flush_eq_cache(struct net_device *dev)
3727{
3728 struct adapter *adap = netdev2adap(dev);
3729 int ret;
3730
3731 ret = t4_fwaddrspace_write(adap, adap->mbox,
3732 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3733 return ret;
3734}
3735EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3736
3737static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3738{
3739 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3740 __be64 indices;
3741 int ret;
3742
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05303743 spin_lock(&adap->win0_lock);
3744 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3745 sizeof(indices), (__be32 *)&indices,
3746 T4_MEMORY_READ);
3747 spin_unlock(&adap->win0_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303748 if (!ret) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00003749 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3750 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303751 }
3752 return ret;
3753}
3754
3755int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3756 u16 size)
3757{
3758 struct adapter *adap = netdev2adap(dev);
3759 u16 hw_pidx, hw_cidx;
3760 int ret;
3761
3762 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3763 if (ret)
3764 goto out;
3765
3766 if (pidx != hw_pidx) {
3767 u16 delta;
3768
3769 if (pidx >= hw_pidx)
3770 delta = pidx - hw_pidx;
3771 else
3772 delta = size - hw_pidx + pidx;
3773 wmb();
Vipul Pandya840f3002012-09-05 02:01:55 +00003774 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3775 QID(qid) | PIDX(delta));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303776 }
3777out:
3778 return ret;
3779}
3780EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3781
Vipul Pandya3cbdb922013-03-14 05:08:59 +00003782void cxgb4_disable_db_coalescing(struct net_device *dev)
3783{
3784 struct adapter *adap;
3785
3786 adap = netdev2adap(dev);
3787 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3788 F_NOCOALESCE);
3789}
3790EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3791
3792void cxgb4_enable_db_coalescing(struct net_device *dev)
3793{
3794 struct adapter *adap;
3795
3796 adap = netdev2adap(dev);
3797 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3798}
3799EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3800
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303801int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3802{
3803 struct adapter *adap;
3804 u32 offset, memtype, memaddr;
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303805 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303806 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3807 int ret;
3808
3809 adap = netdev2adap(dev);
3810
3811 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3812
3813 /* Figure out where the offset lands in the Memory Type/Address scheme.
3814 * This code assumes that the memory is laid out starting at offset 0
3815 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3816 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3817 * MC0, and some have both MC0 and MC1.
3818 */
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303819 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3820 edc0_size = EDRAM0_SIZE_G(size) << 20;
3821 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3822 edc1_size = EDRAM1_SIZE_G(size) << 20;
3823 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3824 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303825
3826 edc0_end = edc0_size;
3827 edc1_end = edc0_end + edc1_size;
3828 mc0_end = edc1_end + mc0_size;
3829
3830 if (offset < edc0_end) {
3831 memtype = MEM_EDC0;
3832 memaddr = offset;
3833 } else if (offset < edc1_end) {
3834 memtype = MEM_EDC1;
3835 memaddr = offset - edc0_end;
3836 } else {
3837 if (offset < mc0_end) {
3838 memtype = MEM_MC0;
3839 memaddr = offset - edc1_end;
3840 } else if (is_t4(adap->params.chip)) {
3841 /* T4 only has a single memory channel */
3842 goto err;
3843 } else {
Hariprasad Shenai6559a7e2014-11-07 09:35:24 +05303844 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3845 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
Hariprasad Shenai031cf472014-07-14 21:34:53 +05303846 mc1_end = mc0_end + mc1_size;
3847 if (offset < mc1_end) {
3848 memtype = MEM_MC1;
3849 memaddr = offset - mc0_end;
3850 } else {
3851 /* offset beyond the end of any memory */
3852 goto err;
3853 }
3854 }
3855 }
3856
3857 spin_lock(&adap->win0_lock);
3858 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3859 spin_unlock(&adap->win0_lock);
3860 return ret;
3861
3862err:
3863 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3864 stag, offset);
3865 return -EINVAL;
3866}
3867EXPORT_SYMBOL(cxgb4_read_tpte);
3868
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05303869u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3870{
3871 u32 hi, lo;
3872 struct adapter *adap;
3873
3874 adap = netdev2adap(dev);
3875 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3876 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3877
3878 return ((u64)hi << 32) | (u64)lo;
3879}
3880EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3881
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003882static struct pci_driver cxgb4_driver;
3883
3884static void check_neigh_update(struct neighbour *neigh)
3885{
3886 const struct device *parent;
3887 const struct net_device *netdev = neigh->dev;
3888
3889 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3890 netdev = vlan_dev_real_dev(netdev);
3891 parent = netdev->dev.parent;
3892 if (parent && parent->driver == &cxgb4_driver.driver)
3893 t4_l2t_update(dev_get_drvdata(parent), neigh);
3894}
3895
3896static int netevent_cb(struct notifier_block *nb, unsigned long event,
3897 void *data)
3898{
3899 switch (event) {
3900 case NETEVENT_NEIGH_UPDATE:
3901 check_neigh_update(data);
3902 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003903 case NETEVENT_REDIRECT:
3904 default:
3905 break;
3906 }
3907 return 0;
3908}
3909
3910static bool netevent_registered;
3911static struct notifier_block cxgb4_netevent_nb = {
3912 .notifier_call = netevent_cb
3913};
3914
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303915static void drain_db_fifo(struct adapter *adap, int usecs)
3916{
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003917 u32 v1, v2, lp_count, hp_count;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303918
3919 do {
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003920 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3921 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303922 if (is_t4(adap->params.chip)) {
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00003923 lp_count = G_LP_COUNT(v1);
3924 hp_count = G_HP_COUNT(v1);
3925 } else {
3926 lp_count = G_LP_COUNT_T5(v1);
3927 hp_count = G_HP_COUNT_T5(v2);
3928 }
3929
3930 if (lp_count == 0 && hp_count == 0)
3931 break;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303932 set_current_state(TASK_UNINTERRUPTIBLE);
3933 schedule_timeout(usecs_to_jiffies(usecs));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303934 } while (1);
3935}
3936
3937static void disable_txq_db(struct sge_txq *q)
3938{
Steve Wise05eb2382014-03-14 21:52:08 +05303939 unsigned long flags;
3940
3941 spin_lock_irqsave(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303942 q->db_disabled = 1;
Steve Wise05eb2382014-03-14 21:52:08 +05303943 spin_unlock_irqrestore(&q->db_lock, flags);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303944}
3945
Steve Wise05eb2382014-03-14 21:52:08 +05303946static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303947{
3948 spin_lock_irq(&q->db_lock);
Steve Wise05eb2382014-03-14 21:52:08 +05303949 if (q->db_pidx_inc) {
3950 /* Make sure that all writes to the TX descriptors
3951 * are committed before we tell HW about them.
3952 */
3953 wmb();
3954 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3955 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3956 q->db_pidx_inc = 0;
3957 }
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303958 q->db_disabled = 0;
3959 spin_unlock_irq(&q->db_lock);
3960}
3961
3962static void disable_dbs(struct adapter *adap)
3963{
3964 int i;
3965
3966 for_each_ethrxq(&adap->sge, i)
3967 disable_txq_db(&adap->sge.ethtxq[i].q);
3968 for_each_ofldrxq(&adap->sge, i)
3969 disable_txq_db(&adap->sge.ofldtxq[i].q);
3970 for_each_port(adap, i)
3971 disable_txq_db(&adap->sge.ctrlq[i].q);
3972}
3973
3974static void enable_dbs(struct adapter *adap)
3975{
3976 int i;
3977
3978 for_each_ethrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303979 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303980 for_each_ofldrxq(&adap->sge, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303981 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303982 for_each_port(adap, i)
Steve Wise05eb2382014-03-14 21:52:08 +05303983 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3984}
3985
3986static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3987{
3988 if (adap->uld_handle[CXGB4_ULD_RDMA])
3989 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3990 cmd);
3991}
3992
3993static void process_db_full(struct work_struct *work)
3994{
3995 struct adapter *adap;
3996
3997 adap = container_of(work, struct adapter, db_full_task);
3998
3999 drain_db_fifo(adap, dbfifo_drain_delay);
4000 enable_dbs(adap);
4001 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4002 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4003 DBFIFO_HP_INT | DBFIFO_LP_INT,
4004 DBFIFO_HP_INT | DBFIFO_LP_INT);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304005}
4006
4007static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4008{
4009 u16 hw_pidx, hw_cidx;
4010 int ret;
4011
Steve Wise05eb2382014-03-14 21:52:08 +05304012 spin_lock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304013 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4014 if (ret)
4015 goto out;
4016 if (q->db_pidx != hw_pidx) {
4017 u16 delta;
4018
4019 if (q->db_pidx >= hw_pidx)
4020 delta = q->db_pidx - hw_pidx;
4021 else
4022 delta = q->size - hw_pidx + q->db_pidx;
4023 wmb();
Vipul Pandya840f3002012-09-05 02:01:55 +00004024 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4025 QID(q->cntxt_id) | PIDX(delta));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304026 }
4027out:
4028 q->db_disabled = 0;
Steve Wise05eb2382014-03-14 21:52:08 +05304029 q->db_pidx_inc = 0;
4030 spin_unlock_irq(&q->db_lock);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304031 if (ret)
4032 CH_WARN(adap, "DB drop recovery failed.\n");
4033}
4034static void recover_all_queues(struct adapter *adap)
4035{
4036 int i;
4037
4038 for_each_ethrxq(&adap->sge, i)
4039 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4040 for_each_ofldrxq(&adap->sge, i)
4041 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4042 for_each_port(adap, i)
4043 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4044}
4045
Vipul Pandya881806b2012-05-18 15:29:24 +05304046static void process_db_drop(struct work_struct *work)
4047{
4048 struct adapter *adap;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304049
Vipul Pandya881806b2012-05-18 15:29:24 +05304050 adap = container_of(work, struct adapter, db_drop_task);
4051
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304052 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05304053 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004054 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
Steve Wise05eb2382014-03-14 21:52:08 +05304055 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004056 recover_all_queues(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05304057 drain_db_fifo(adap, dbfifo_drain_delay);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004058 enable_dbs(adap);
Steve Wise05eb2382014-03-14 21:52:08 +05304059 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004060 } else {
4061 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4062 u16 qid = (dropped_db >> 15) & 0x1ffff;
4063 u16 pidx_inc = dropped_db & 0x1fff;
4064 unsigned int s_qpp;
4065 unsigned short udb_density;
4066 unsigned long qpshift;
4067 int page;
4068 u32 udb;
4069
4070 dev_warn(adap->pdev_dev,
4071 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4072 dropped_db, qid,
4073 (dropped_db >> 14) & 1,
4074 (dropped_db >> 13) & 1,
4075 pidx_inc);
4076
4077 drain_db_fifo(adap, 1);
4078
4079 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4080 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4081 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4082 qpshift = PAGE_SHIFT - ilog2(udb_density);
4083 udb = qid << qpshift;
4084 udb &= PAGE_MASK;
4085 page = udb / PAGE_SIZE;
4086 udb += (qid - (page * udb_density)) * 128;
4087
4088 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4089
4090 /* Re-enable BAR2 WC */
4091 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4092 }
4093
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304094 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
Vipul Pandya881806b2012-05-18 15:29:24 +05304095}
4096
4097void t4_db_full(struct adapter *adap)
4098{
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304099 if (is_t4(adap->params.chip)) {
Steve Wise05eb2382014-03-14 21:52:08 +05304100 disable_dbs(adap);
4101 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004102 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4103 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
Anish Bhatt29aaee62014-08-20 13:44:06 -07004104 queue_work(adap->workq, &adap->db_full_task);
Santosh Rastapur2cc301d2013-03-14 05:08:52 +00004105 }
Vipul Pandya881806b2012-05-18 15:29:24 +05304106}
4107
4108void t4_db_dropped(struct adapter *adap)
4109{
Steve Wise05eb2382014-03-14 21:52:08 +05304110 if (is_t4(adap->params.chip)) {
4111 disable_dbs(adap);
4112 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4113 }
Anish Bhatt29aaee62014-08-20 13:44:06 -07004114 queue_work(adap->workq, &adap->db_drop_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05304115}
4116
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004117static void uld_attach(struct adapter *adap, unsigned int uld)
4118{
4119 void *handle;
4120 struct cxgb4_lld_info lli;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004121 unsigned short i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004122
4123 lli.pdev = adap->pdev;
Hariprasad Shenai35b1de52014-06-27 19:23:47 +05304124 lli.pf = adap->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004125 lli.l2t = adap->l2t;
4126 lli.tids = &adap->tids;
4127 lli.ports = adap->port;
4128 lli.vr = &adap->vres;
4129 lli.mtus = adap->params.mtus;
4130 if (uld == CXGB4_ULD_RDMA) {
4131 lli.rxq_ids = adap->sge.rdma_rxq;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05304132 lli.ciq_ids = adap->sge.rdma_ciq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004133 lli.nrxq = adap->sge.rdmaqs;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05304134 lli.nciq = adap->sge.rdmaciqs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004135 } else if (uld == CXGB4_ULD_ISCSI) {
4136 lli.rxq_ids = adap->sge.ofld_rxq;
4137 lli.nrxq = adap->sge.ofldqsets;
4138 }
4139 lli.ntxq = adap->sge.ofldqsets;
4140 lli.nchan = adap->params.nports;
4141 lli.nports = adap->params.nports;
4142 lli.wr_cred = adap->params.ofldq_wr_cred;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304143 lli.adapter_type = adap->params.chip;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004144 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05304145 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004146 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004147 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4148 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004149 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004150 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4151 (adap->fn * 4));
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05304152 lli.filt_mode = adap->params.tp.vlan_pri_map;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004153 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4154 for (i = 0; i < NCHAN; i++)
4155 lli.tx_modq[i] = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004156 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4157 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4158 lli.fw_vers = adap->params.fw_vers;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304159 lli.dbfifo_int_thresh = dbfifo_int_thresh;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +05304160 lli.sge_ingpadboundary = adap->sge.fl_align;
4161 lli.sge_egrstatuspagesize = adap->sge.stat_len;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004162 lli.sge_pktshift = adap->sge.pktshift;
4163 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05304164 lli.max_ordird_qp = adap->params.max_ordird_qp;
4165 lli.max_ird_adapter = adap->params.max_ird_adapter;
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05304166 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004167
4168 handle = ulds[uld].add(&lli);
4169 if (IS_ERR(handle)) {
4170 dev_warn(adap->pdev_dev,
4171 "could not attach to the %s driver, error %ld\n",
4172 uld_str[uld], PTR_ERR(handle));
4173 return;
4174 }
4175
4176 adap->uld_handle[uld] = handle;
4177
4178 if (!netevent_registered) {
4179 register_netevent_notifier(&cxgb4_netevent_nb);
4180 netevent_registered = true;
4181 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00004182
4183 if (adap->flags & FULL_INIT_DONE)
4184 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004185}
4186
4187static void attach_ulds(struct adapter *adap)
4188{
4189 unsigned int i;
4190
Vipul Pandya01bcca62013-07-04 16:10:46 +05304191 spin_lock(&adap_rcu_lock);
4192 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4193 spin_unlock(&adap_rcu_lock);
4194
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004195 mutex_lock(&uld_mutex);
4196 list_add_tail(&adap->list_node, &adapter_list);
4197 for (i = 0; i < CXGB4_ULD_MAX; i++)
4198 if (ulds[i].add)
4199 uld_attach(adap, i);
4200 mutex_unlock(&uld_mutex);
4201}
4202
4203static void detach_ulds(struct adapter *adap)
4204{
4205 unsigned int i;
4206
4207 mutex_lock(&uld_mutex);
4208 list_del(&adap->list_node);
4209 for (i = 0; i < CXGB4_ULD_MAX; i++)
4210 if (adap->uld_handle[i]) {
4211 ulds[i].state_change(adap->uld_handle[i],
4212 CXGB4_STATE_DETACH);
4213 adap->uld_handle[i] = NULL;
4214 }
4215 if (netevent_registered && list_empty(&adapter_list)) {
4216 unregister_netevent_notifier(&cxgb4_netevent_nb);
4217 netevent_registered = false;
4218 }
4219 mutex_unlock(&uld_mutex);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304220
4221 spin_lock(&adap_rcu_lock);
4222 list_del_rcu(&adap->rcu_node);
4223 spin_unlock(&adap_rcu_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004224}
4225
4226static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4227{
4228 unsigned int i;
4229
4230 mutex_lock(&uld_mutex);
4231 for (i = 0; i < CXGB4_ULD_MAX; i++)
4232 if (adap->uld_handle[i])
4233 ulds[i].state_change(adap->uld_handle[i], new_state);
4234 mutex_unlock(&uld_mutex);
4235}
4236
4237/**
4238 * cxgb4_register_uld - register an upper-layer driver
4239 * @type: the ULD type
4240 * @p: the ULD methods
4241 *
4242 * Registers an upper-layer driver with this driver and notifies the ULD
4243 * about any presently available devices that support its type. Returns
4244 * %-EBUSY if a ULD of the same type is already registered.
4245 */
4246int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4247{
4248 int ret = 0;
4249 struct adapter *adap;
4250
4251 if (type >= CXGB4_ULD_MAX)
4252 return -EINVAL;
4253 mutex_lock(&uld_mutex);
4254 if (ulds[type].add) {
4255 ret = -EBUSY;
4256 goto out;
4257 }
4258 ulds[type] = *p;
4259 list_for_each_entry(adap, &adapter_list, list_node)
4260 uld_attach(adap, type);
4261out: mutex_unlock(&uld_mutex);
4262 return ret;
4263}
4264EXPORT_SYMBOL(cxgb4_register_uld);
4265
4266/**
4267 * cxgb4_unregister_uld - unregister an upper-layer driver
4268 * @type: the ULD type
4269 *
4270 * Unregisters an existing upper-layer driver.
4271 */
4272int cxgb4_unregister_uld(enum cxgb4_uld type)
4273{
4274 struct adapter *adap;
4275
4276 if (type >= CXGB4_ULD_MAX)
4277 return -EINVAL;
4278 mutex_lock(&uld_mutex);
4279 list_for_each_entry(adap, &adapter_list, list_node)
4280 adap->uld_handle[type] = NULL;
4281 ulds[type].add = NULL;
4282 mutex_unlock(&uld_mutex);
4283 return 0;
4284}
4285EXPORT_SYMBOL(cxgb4_unregister_uld);
4286
Vipul Pandya01bcca62013-07-04 16:10:46 +05304287/* Check if netdev on which event is occured belongs to us or not. Return
Li RongQingee9a33b2014-06-20 17:32:36 +08004288 * success (true) if it belongs otherwise failure (false).
4289 * Called with rcu_read_lock() held.
Vipul Pandya01bcca62013-07-04 16:10:46 +05304290 */
Anish Bhatt1bb60372014-10-14 20:07:22 -07004291#if IS_ENABLED(CONFIG_IPV6)
Li RongQingee9a33b2014-06-20 17:32:36 +08004292static bool cxgb4_netdev(const struct net_device *netdev)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304293{
4294 struct adapter *adap;
4295 int i;
4296
Vipul Pandya01bcca62013-07-04 16:10:46 +05304297 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4298 for (i = 0; i < MAX_NPORTS; i++)
Li RongQingee9a33b2014-06-20 17:32:36 +08004299 if (adap->port[i] == netdev)
4300 return true;
4301 return false;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304302}
4303
4304static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4305 unsigned long event)
4306{
4307 int ret = NOTIFY_DONE;
4308
4309 rcu_read_lock();
4310 if (cxgb4_netdev(event_dev)) {
4311 switch (event) {
4312 case NETDEV_UP:
Joe Perches44835892014-11-06 20:46:14 -08004313 ret = cxgb4_clip_get(event_dev, &ifa->addr);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304314 if (ret < 0) {
4315 rcu_read_unlock();
4316 return ret;
4317 }
4318 ret = NOTIFY_OK;
4319 break;
4320 case NETDEV_DOWN:
Joe Perches44835892014-11-06 20:46:14 -08004321 cxgb4_clip_release(event_dev, &ifa->addr);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304322 ret = NOTIFY_OK;
4323 break;
4324 default:
4325 break;
4326 }
4327 }
4328 rcu_read_unlock();
4329 return ret;
4330}
4331
4332static int cxgb4_inet6addr_handler(struct notifier_block *this,
4333 unsigned long event, void *data)
4334{
4335 struct inet6_ifaddr *ifa = data;
4336 struct net_device *event_dev;
4337 int ret = NOTIFY_DONE;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304338 struct bonding *bond = netdev_priv(ifa->idev->dev);
Veaceslav Falico9caff1e72013-09-25 09:20:14 +02004339 struct list_head *iter;
Vipul Pandya01bcca62013-07-04 16:10:46 +05304340 struct slave *slave;
4341 struct pci_dev *first_pdev = NULL;
4342
4343 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4344 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4345 ret = clip_add(event_dev, ifa, event);
4346 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4347 /* It is possible that two different adapters are bonded in one
4348 * bond. We need to find such different adapters and add clip
4349 * in all of them only once.
4350 */
Veaceslav Falico9caff1e72013-09-25 09:20:14 +02004351 bond_for_each_slave(bond, slave, iter) {
Vipul Pandya01bcca62013-07-04 16:10:46 +05304352 if (!first_pdev) {
4353 ret = clip_add(slave->dev, ifa, event);
4354 /* If clip_add is success then only initialize
4355 * first_pdev since it means it is our device
4356 */
4357 if (ret == NOTIFY_OK)
4358 first_pdev = to_pci_dev(
4359 slave->dev->dev.parent);
4360 } else if (first_pdev !=
4361 to_pci_dev(slave->dev->dev.parent))
4362 ret = clip_add(slave->dev, ifa, event);
4363 }
Vipul Pandya01bcca62013-07-04 16:10:46 +05304364 } else
4365 ret = clip_add(ifa->idev->dev, ifa, event);
4366
4367 return ret;
4368}
4369
4370static struct notifier_block cxgb4_inet6addr_notifier = {
4371 .notifier_call = cxgb4_inet6addr_handler
4372};
4373
4374/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4375 * a physical device.
4376 * The physical device reference is needed to send the actul CLIP command.
4377 */
4378static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4379{
4380 struct inet6_dev *idev = NULL;
4381 struct inet6_ifaddr *ifa;
4382 int ret = 0;
4383
4384 idev = __in6_dev_get(root_dev);
4385 if (!idev)
4386 return ret;
4387
4388 read_lock_bh(&idev->lock);
4389 list_for_each_entry(ifa, &idev->addr_list, if_list) {
Joe Perches44835892014-11-06 20:46:14 -08004390 ret = cxgb4_clip_get(dev, &ifa->addr);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304391 if (ret < 0)
4392 break;
4393 }
4394 read_unlock_bh(&idev->lock);
4395
4396 return ret;
4397}
4398
4399static int update_root_dev_clip(struct net_device *dev)
4400{
4401 struct net_device *root_dev = NULL;
4402 int i, ret = 0;
4403
4404 /* First populate the real net device's IPv6 addresses */
4405 ret = update_dev_clip(dev, dev);
4406 if (ret)
4407 return ret;
4408
4409 /* Parse all bond and vlan devices layered on top of the physical dev */
Anish Bhatt587ddfe2014-10-14 20:07:21 -07004410 root_dev = netdev_master_upper_dev_get_rcu(dev);
4411 if (root_dev) {
4412 ret = update_dev_clip(root_dev, dev);
4413 if (ret)
4414 return ret;
4415 }
4416
Vipul Pandya01bcca62013-07-04 16:10:46 +05304417 for (i = 0; i < VLAN_N_VID; i++) {
dingtianhongf06c7f9f2014-05-09 14:58:05 +08004418 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
Vipul Pandya01bcca62013-07-04 16:10:46 +05304419 if (!root_dev)
4420 continue;
4421
4422 ret = update_dev_clip(root_dev, dev);
4423 if (ret)
4424 break;
4425 }
4426 return ret;
4427}
4428
4429static void update_clip(const struct adapter *adap)
4430{
4431 int i;
4432 struct net_device *dev;
4433 int ret;
4434
4435 rcu_read_lock();
4436
4437 for (i = 0; i < MAX_NPORTS; i++) {
4438 dev = adap->port[i];
4439 ret = 0;
4440
4441 if (dev)
4442 ret = update_root_dev_clip(dev);
4443
4444 if (ret < 0)
4445 break;
4446 }
4447 rcu_read_unlock();
4448}
Anish Bhatt1bb60372014-10-14 20:07:22 -07004449#endif /* IS_ENABLED(CONFIG_IPV6) */
Vipul Pandya01bcca62013-07-04 16:10:46 +05304450
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004451/**
4452 * cxgb_up - enable the adapter
4453 * @adap: adapter being enabled
4454 *
4455 * Called when the first port is enabled, this function performs the
4456 * actions necessary to make an adapter operational, such as completing
4457 * the initialization of HW modules, and enabling interrupts.
4458 *
4459 * Must be called with the rtnl lock held.
4460 */
4461static int cxgb_up(struct adapter *adap)
4462{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004463 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004464
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004465 err = setup_sge_queues(adap);
4466 if (err)
4467 goto out;
4468 err = setup_rss(adap);
4469 if (err)
4470 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004471
4472 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004473 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004474 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4475 adap->msix_info[0].desc, adap);
4476 if (err)
4477 goto irq_err;
4478
4479 err = request_msix_queue_irqs(adap);
4480 if (err) {
4481 free_irq(adap->msix_info[0].vec, adap);
4482 goto irq_err;
4483 }
4484 } else {
4485 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4486 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00004487 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004488 if (err)
4489 goto irq_err;
4490 }
4491 enable_rx(adap);
4492 t4_sge_start(adap);
4493 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004494 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004495 notify_ulds(adap, CXGB4_STATE_UP);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004496#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05304497 update_clip(adap);
Anish Bhatt1bb60372014-10-14 20:07:22 -07004498#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004499 out:
4500 return err;
4501 irq_err:
4502 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004503 freeq:
4504 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004505 goto out;
4506}
4507
4508static void cxgb_down(struct adapter *adapter)
4509{
4510 t4_intr_disable(adapter);
4511 cancel_work_sync(&adapter->tid_release_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05304512 cancel_work_sync(&adapter->db_full_task);
4513 cancel_work_sync(&adapter->db_drop_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004514 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004515 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004516
4517 if (adapter->flags & USING_MSIX) {
4518 free_msix_queue_irqs(adapter);
4519 free_irq(adapter->msix_info[0].vec, adapter);
4520 } else
4521 free_irq(adapter->pdev->irq, adapter);
4522 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004523 t4_sge_stop(adapter);
4524 t4_free_sge_resources(adapter);
4525 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004526}
4527
4528/*
4529 * net_device operations
4530 */
4531static int cxgb_open(struct net_device *dev)
4532{
4533 int err;
4534 struct port_info *pi = netdev_priv(dev);
4535 struct adapter *adapter = pi->adapter;
4536
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00004537 netif_carrier_off(dev);
4538
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00004539 if (!(adapter->flags & FULL_INIT_DONE)) {
4540 err = cxgb_up(adapter);
4541 if (err < 0)
4542 return err;
4543 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004544
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00004545 err = link_start(dev);
4546 if (!err)
4547 netif_tx_start_all_queues(dev);
4548 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004549}
4550
4551static int cxgb_close(struct net_device *dev)
4552{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004553 struct port_info *pi = netdev_priv(dev);
4554 struct adapter *adapter = pi->adapter;
4555
4556 netif_tx_stop_all_queues(dev);
4557 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004558 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004559}
4560
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004561/* Return an error number if the indicated filter isn't writable ...
4562 */
4563static int writable_filter(struct filter_entry *f)
4564{
4565 if (f->locked)
4566 return -EPERM;
4567 if (f->pending)
4568 return -EBUSY;
4569
4570 return 0;
4571}
4572
4573/* Delete the filter at the specified index (if valid). The checks for all
4574 * the common problems with doing this like the filter being locked, currently
4575 * pending in another operation, etc.
4576 */
4577static int delete_filter(struct adapter *adapter, unsigned int fidx)
4578{
4579 struct filter_entry *f;
4580 int ret;
4581
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004582 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00004583 return -EINVAL;
4584
4585 f = &adapter->tids.ftid_tab[fidx];
4586 ret = writable_filter(f);
4587 if (ret)
4588 return ret;
4589 if (f->valid)
4590 return del_filter_wr(adapter, fidx);
4591
4592 return 0;
4593}
4594
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004595int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00004596 __be32 sip, __be16 sport, __be16 vlan,
4597 unsigned int queue, unsigned char port, unsigned char mask)
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004598{
4599 int ret;
4600 struct filter_entry *f;
4601 struct adapter *adap;
4602 int i;
4603 u8 *val;
4604
4605 adap = netdev2adap(dev);
4606
Vipul Pandya1cab7752012-12-10 09:30:55 +00004607 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304608 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004609 stid += adap->tids.nftids;
4610
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004611 /* Check to make sure the filter requested is writable ...
4612 */
4613 f = &adap->tids.ftid_tab[stid];
4614 ret = writable_filter(f);
4615 if (ret)
4616 return ret;
4617
4618 /* Clear out any old resources being used by the filter before
4619 * we start constructing the new filter.
4620 */
4621 if (f->valid)
4622 clear_filter(adap, f);
4623
4624 /* Clear out filter specifications */
4625 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4626 f->fs.val.lport = cpu_to_be16(sport);
4627 f->fs.mask.lport = ~0;
4628 val = (u8 *)&sip;
Vipul Pandya793dad92012-12-10 09:30:56 +00004629 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004630 for (i = 0; i < 4; i++) {
4631 f->fs.val.lip[i] = val[i];
4632 f->fs.mask.lip[i] = ~0;
4633 }
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05304634 if (adap->params.tp.vlan_pri_map & F_PORT) {
Vipul Pandya793dad92012-12-10 09:30:56 +00004635 f->fs.val.iport = port;
4636 f->fs.mask.iport = mask;
4637 }
4638 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004639
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05304640 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
Kumar Sanghvi7c89e552013-12-18 16:38:20 +05304641 f->fs.val.proto = IPPROTO_TCP;
4642 f->fs.mask.proto = ~0;
4643 }
4644
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004645 f->fs.dirsteer = 1;
4646 f->fs.iq = queue;
4647 /* Mark filter as locked */
4648 f->locked = 1;
4649 f->fs.rpttid = 1;
4650
4651 ret = set_filter_wr(adap, stid);
4652 if (ret) {
4653 clear_filter(adap, f);
4654 return ret;
4655 }
4656
4657 return 0;
4658}
4659EXPORT_SYMBOL(cxgb4_create_server_filter);
4660
4661int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4662 unsigned int queue, bool ipv6)
4663{
4664 int ret;
4665 struct filter_entry *f;
4666 struct adapter *adap;
4667
4668 adap = netdev2adap(dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00004669
4670 /* Adjust stid to correct filter index */
Kumar Sanghvi470c60c2013-12-18 16:38:21 +05304671 stid -= adap->tids.sftid_base;
Vipul Pandya1cab7752012-12-10 09:30:55 +00004672 stid += adap->tids.nftids;
4673
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004674 f = &adap->tids.ftid_tab[stid];
4675 /* Unlock the filter */
4676 f->locked = 0;
4677
4678 ret = delete_filter(adap, stid);
4679 if (ret)
4680 return ret;
4681
4682 return 0;
4683}
4684EXPORT_SYMBOL(cxgb4_remove_server_filter);
4685
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00004686static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4687 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004688{
4689 struct port_stats stats;
4690 struct port_info *p = netdev_priv(dev);
4691 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004692
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004693 /* Block retrieving statistics during EEH error
4694 * recovery. Otherwise, the recovery might fail
4695 * and the PCI device will be removed permanently
4696 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004697 spin_lock(&adapter->stats_lock);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08004698 if (!netif_device_present(dev)) {
4699 spin_unlock(&adapter->stats_lock);
4700 return ns;
4701 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004702 t4_get_port_stats(adapter, p->tx_chan, &stats);
4703 spin_unlock(&adapter->stats_lock);
4704
4705 ns->tx_bytes = stats.tx_octets;
4706 ns->tx_packets = stats.tx_frames;
4707 ns->rx_bytes = stats.rx_octets;
4708 ns->rx_packets = stats.rx_frames;
4709 ns->multicast = stats.rx_mcast_frames;
4710
4711 /* detailed rx_errors */
4712 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4713 stats.rx_runt;
4714 ns->rx_over_errors = 0;
4715 ns->rx_crc_errors = stats.rx_fcs_err;
4716 ns->rx_frame_errors = stats.rx_symbol_err;
4717 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4718 stats.rx_ovflow2 + stats.rx_ovflow3 +
4719 stats.rx_trunc0 + stats.rx_trunc1 +
4720 stats.rx_trunc2 + stats.rx_trunc3;
4721 ns->rx_missed_errors = 0;
4722
4723 /* detailed tx_errors */
4724 ns->tx_aborted_errors = 0;
4725 ns->tx_carrier_errors = 0;
4726 ns->tx_fifo_errors = 0;
4727 ns->tx_heartbeat_errors = 0;
4728 ns->tx_window_errors = 0;
4729
4730 ns->tx_errors = stats.tx_error_frames;
4731 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4732 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4733 return ns;
4734}
4735
4736static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4737{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004738 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004739 int ret = 0, prtad, devad;
4740 struct port_info *pi = netdev_priv(dev);
4741 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4742
4743 switch (cmd) {
4744 case SIOCGMIIPHY:
4745 if (pi->mdio_addr < 0)
4746 return -EOPNOTSUPP;
4747 data->phy_id = pi->mdio_addr;
4748 break;
4749 case SIOCGMIIREG:
4750 case SIOCSMIIREG:
4751 if (mdio_phy_id_is_c45(data->phy_id)) {
4752 prtad = mdio_phy_id_prtad(data->phy_id);
4753 devad = mdio_phy_id_devad(data->phy_id);
4754 } else if (data->phy_id < 32) {
4755 prtad = data->phy_id;
4756 devad = 0;
4757 data->reg_num &= 0x1f;
4758 } else
4759 return -EINVAL;
4760
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004761 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004762 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004763 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004764 data->reg_num, &data->val_out);
4765 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004766 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004767 data->reg_num, data->val_in);
4768 break;
4769 default:
4770 return -EOPNOTSUPP;
4771 }
4772 return ret;
4773}
4774
4775static void cxgb_set_rxmode(struct net_device *dev)
4776{
4777 /* unfortunately we can't return errors to the stack */
4778 set_rxmode(dev, -1, false);
4779}
4780
4781static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4782{
4783 int ret;
4784 struct port_info *pi = netdev_priv(dev);
4785
4786 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4787 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004788 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4789 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004790 if (!ret)
4791 dev->mtu = new_mtu;
4792 return ret;
4793}
4794
4795static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4796{
4797 int ret;
4798 struct sockaddr *addr = p;
4799 struct port_info *pi = netdev_priv(dev);
4800
4801 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00004802 return -EADDRNOTAVAIL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004803
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004804 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4805 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004806 if (ret < 0)
4807 return ret;
4808
4809 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4810 pi->xact_addr_filt = ret;
4811 return 0;
4812}
4813
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004814#ifdef CONFIG_NET_POLL_CONTROLLER
4815static void cxgb_netpoll(struct net_device *dev)
4816{
4817 struct port_info *pi = netdev_priv(dev);
4818 struct adapter *adap = pi->adapter;
4819
4820 if (adap->flags & USING_MSIX) {
4821 int i;
4822 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4823
4824 for (i = pi->nqsets; i; i--, rx++)
4825 t4_sge_intr_msix(0, &rx->rspq);
4826 } else
4827 t4_intr_handler(adap)(0, adap);
4828}
4829#endif
4830
4831static const struct net_device_ops cxgb4_netdev_ops = {
4832 .ndo_open = cxgb_open,
4833 .ndo_stop = cxgb_close,
4834 .ndo_start_xmit = t4_eth_xmit,
Anish Bhatt688848b2014-06-19 21:37:13 -07004835 .ndo_select_queue = cxgb_select_queue,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00004836 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004837 .ndo_set_rx_mode = cxgb_set_rxmode,
4838 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00004839 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004840 .ndo_validate_addr = eth_validate_addr,
4841 .ndo_do_ioctl = cxgb_ioctl,
4842 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004843#ifdef CONFIG_NET_POLL_CONTROLLER
4844 .ndo_poll_controller = cxgb_netpoll,
4845#endif
4846};
4847
4848void t4_fatal_err(struct adapter *adap)
4849{
4850 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4851 t4_intr_disable(adap);
4852 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4853}
4854
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304855/* Return the specified PCI-E Configuration Space register from our Physical
4856 * Function. We try first via a Firmware LDST Command since we prefer to let
4857 * the firmware own all of these registers, but if that fails we go for it
4858 * directly ourselves.
4859 */
4860static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4861{
4862 struct fw_ldst_cmd ldst_cmd;
4863 u32 val;
4864 int ret;
4865
4866 /* Construct and send the Firmware LDST Command to retrieve the
4867 * specified PCI-E Configuration Space register.
4868 */
4869 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4870 ldst_cmd.op_to_addrspace =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304871 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4872 FW_CMD_REQUEST_F |
4873 FW_CMD_READ_F |
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304874 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4875 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4876 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4877 ldst_cmd.u.pcie.ctrl_to_fn =
4878 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4879 ldst_cmd.u.pcie.r = reg;
4880 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4881 &ldst_cmd);
4882
4883 /* If the LDST Command suucceeded, exctract the returned register
4884 * value. Otherwise read it directly ourself.
4885 */
4886 if (ret == 0)
4887 val = ntohl(ldst_cmd.u.pcie.data[0]);
4888 else
4889 t4_hw_pci_read_cfg4(adap, reg, &val);
4890
4891 return val;
4892}
4893
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004894static void setup_memwin(struct adapter *adap)
4895{
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304896 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004897
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05304898 if (is_t4(adap->params.chip)) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304899 u32 bar0;
4900
4901 /* Truncation intentional: we only read the bottom 32-bits of
4902 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4903 * mechanism to read BAR0 instead of using
4904 * pci_resource_start() because we could be operating from
4905 * within a Virtual Machine which is trapping our accesses to
4906 * our Configuration Space and we need to set up the PCI-E
4907 * Memory Window decoders with the actual addresses which will
4908 * be coming across the PCI-E link.
4909 */
4910 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4911 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4912 adap->t4_bar0 = bar0;
4913
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004914 mem_win0_base = bar0 + MEMWIN0_BASE;
4915 mem_win1_base = bar0 + MEMWIN1_BASE;
4916 mem_win2_base = bar0 + MEMWIN2_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304917 mem_win2_aperture = MEMWIN2_APERTURE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004918 } else {
4919 /* For T5, only relative offset inside the PCIe BAR is passed */
4920 mem_win0_base = MEMWIN0_BASE;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304921 mem_win1_base = MEMWIN1_BASE;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004922 mem_win2_base = MEMWIN2_BASE_T5;
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304923 mem_win2_aperture = MEMWIN2_APERTURE_T5;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004924 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004925 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004926 mem_win0_base | BIR(0) |
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004927 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4928 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004929 mem_win1_base | BIR(0) |
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004930 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4931 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
Santosh Rastapur19dd37b2013-03-14 05:08:53 +00004932 mem_win2_base | BIR(0) |
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304933 WINDOW(ilog2(mem_win2_aperture) - 10));
4934 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004935}
4936
4937static void setup_memwin_rdma(struct adapter *adap)
4938{
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004939 if (adap->vres.ocq.size) {
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304940 u32 start;
4941 unsigned int sz_kb;
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004942
Hariprasad Shenai0abfd152014-06-27 19:23:48 +05304943 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4944 start &= PCI_BASE_ADDRESS_MEM_MASK;
4945 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004946 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4947 t4_write_reg(adap,
4948 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4949 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4950 t4_write_reg(adap,
4951 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4952 adap->vres.ocq.start);
4953 t4_read_reg(adap,
4954 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4955 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004956}
4957
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004958static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4959{
4960 u32 v;
4961 int ret;
4962
4963 /* get device capabilities */
4964 memset(c, 0, sizeof(*c));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304965 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4966 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304967 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004968 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004969 if (ret < 0)
4970 return ret;
4971
4972 /* select capabilities we'll be using */
4973 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4974 if (!vf_acls)
4975 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4976 else
4977 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4978 } else if (vf_acls) {
4979 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4980 return ret;
4981 }
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05304982 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4983 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004984 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004985 if (ret < 0)
4986 return ret;
4987
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004988 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004989 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4990 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4991 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4992 if (ret < 0)
4993 return ret;
4994
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004995 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4996 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004997 if (ret < 0)
4998 return ret;
4999
5000 t4_sge_init(adap);
5001
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005002 /* tweak some settings */
5003 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5004 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5005 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5006 v = t4_read_reg(adap, TP_PIO_DATA);
5007 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005008
Vipul Pandyadca4fae2012-12-10 09:30:53 +00005009 /* first 4 Tx modulation queues point to consecutive Tx channels */
5010 adap->params.tp.tx_modq_map = 0xE4;
5011 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5012 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5013
5014 /* associate each Tx modulation queue with consecutive Tx channels */
5015 v = 0x84218421;
5016 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5017 &v, 1, A_TP_TX_SCHED_HDR);
5018 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5019 &v, 1, A_TP_TX_SCHED_FIFO);
5020 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5021 &v, 1, A_TP_TX_SCHED_PCMD);
5022
5023#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5024 if (is_offload(adap)) {
5025 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5026 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5027 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5028 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5029 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5030 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5031 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5032 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5033 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5034 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5035 }
5036
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005037 /* get basic stuff going */
5038 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00005039}
5040
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005041/*
5042 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5043 */
5044#define MAX_ATIDS 8192U
5045
5046/*
5047 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005048 *
5049 * If the firmware we're dealing with has Configuration File support, then
5050 * we use that to perform all configuration
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005051 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00005052
5053/*
5054 * Tweak configuration based on module parameters, etc. Most of these have
5055 * defaults assigned to them by Firmware Configuration Files (if we're using
5056 * them) but need to be explicitly set if we're using hard-coded
5057 * initialization. But even in the case of using Firmware Configuration
5058 * Files, we'd like to expose the ability to change these via module
5059 * parameters so these are essentially common tweaks/settings for
5060 * Configuration Files and hard-coded initialization ...
5061 */
5062static int adap_init0_tweaks(struct adapter *adapter)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005063{
Vipul Pandya636f9d32012-09-26 02:39:39 +00005064 /*
5065 * Fix up various Host-Dependent Parameters like Page Size, Cache
5066 * Line Size, etc. The firmware default is for a 4KB Page Size and
5067 * 64B Cache Line Size ...
5068 */
5069 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005070
Vipul Pandya636f9d32012-09-26 02:39:39 +00005071 /*
5072 * Process module parameters which affect early initialization.
5073 */
5074 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5075 dev_err(&adapter->pdev->dev,
5076 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5077 rx_dma_offset);
5078 rx_dma_offset = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005079 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005080 t4_set_reg_field(adapter, SGE_CONTROL,
5081 PKTSHIFT_MASK,
5082 PKTSHIFT(rx_dma_offset));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005083
Vipul Pandya636f9d32012-09-26 02:39:39 +00005084 /*
5085 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5086 * adds the pseudo header itself.
5087 */
5088 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5089 CSUM_HAS_PSEUDO_HDR, 0);
5090
5091 return 0;
5092}
5093
5094/*
5095 * Attempt to initialize the adapter via a Firmware Configuration File.
5096 */
5097static int adap_init0_config(struct adapter *adapter, int reset)
5098{
5099 struct fw_caps_config_cmd caps_cmd;
5100 const struct firmware *cf;
5101 unsigned long mtype = 0, maddr = 0;
5102 u32 finiver, finicsum, cfcsum;
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305103 int ret;
5104 int config_issued = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005105 char *fw_config_file, fw_config_file_path[256];
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305106 char *config_name = NULL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005107
5108 /*
5109 * Reset device if necessary.
5110 */
5111 if (reset) {
5112 ret = t4_fw_reset(adapter, adapter->mbox,
5113 PIORSTMODE | PIORST);
5114 if (ret < 0)
5115 goto bye;
5116 }
5117
5118 /*
5119 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5120 * then use that. Otherwise, use the configuration file stored
5121 * in the adapter flash ...
5122 */
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05305123 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005124 case CHELSIO_T4:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305125 fw_config_file = FW4_CFNAME;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005126 break;
5127 case CHELSIO_T5:
5128 fw_config_file = FW5_CFNAME;
5129 break;
5130 default:
5131 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5132 adapter->pdev->device);
5133 ret = -EINVAL;
5134 goto bye;
5135 }
5136
5137 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005138 if (ret < 0) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305139 config_name = "On FLASH";
Vipul Pandya636f9d32012-09-26 02:39:39 +00005140 mtype = FW_MEMTYPE_CF_FLASH;
5141 maddr = t4_flash_cfg_addr(adapter);
5142 } else {
5143 u32 params[7], val[7];
5144
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305145 sprintf(fw_config_file_path,
5146 "/lib/firmware/%s", fw_config_file);
5147 config_name = fw_config_file_path;
5148
Vipul Pandya636f9d32012-09-26 02:39:39 +00005149 if (cf->size >= FLASH_CFG_MAX_SIZE)
5150 ret = -ENOMEM;
5151 else {
5152 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5153 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5154 ret = t4_query_params(adapter, adapter->mbox,
5155 adapter->fn, 0, 1, params, val);
5156 if (ret == 0) {
5157 /*
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305158 * For t4_memory_rw() below addresses and
Vipul Pandya636f9d32012-09-26 02:39:39 +00005159 * sizes have to be in terms of multiples of 4
5160 * bytes. So, if the Configuration File isn't
5161 * a multiple of 4 bytes in length we'll have
5162 * to write that out separately since we can't
5163 * guarantee that the bytes following the
5164 * residual byte in the buffer returned by
5165 * request_firmware() are zeroed out ...
5166 */
5167 size_t resid = cf->size & 0x3;
5168 size_t size = cf->size & ~0x3;
5169 __be32 *data = (__be32 *)cf->data;
5170
5171 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5172 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5173
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305174 spin_lock(&adapter->win0_lock);
5175 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5176 size, data, T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005177 if (ret == 0 && resid != 0) {
5178 union {
5179 __be32 word;
5180 char buf[4];
5181 } last;
5182 int i;
5183
5184 last.word = data[size >> 2];
5185 for (i = resid; i < 4; i++)
5186 last.buf[i] = 0;
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305187 ret = t4_memory_rw(adapter, 0, mtype,
5188 maddr + size,
5189 4, &last.word,
5190 T4_MEMORY_WRITE);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005191 }
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +05305192 spin_unlock(&adapter->win0_lock);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005193 }
5194 }
5195
5196 release_firmware(cf);
5197 if (ret)
5198 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005199 }
5200
Vipul Pandya636f9d32012-09-26 02:39:39 +00005201 /*
5202 * Issue a Capability Configuration command to the firmware to get it
5203 * to parse the Configuration File. We don't use t4_fw_config_file()
5204 * because we want the ability to modify various features after we've
5205 * processed the configuration file ...
5206 */
5207 memset(&caps_cmd, 0, sizeof(caps_cmd));
5208 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305209 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5210 FW_CMD_REQUEST_F |
5211 FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305212 caps_cmd.cfvalid_to_len16 =
Vipul Pandya636f9d32012-09-26 02:39:39 +00005213 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5214 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5215 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5216 FW_LEN16(caps_cmd));
5217 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5218 &caps_cmd);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305219
5220 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5221 * Configuration File in FLASH), our last gasp effort is to use the
5222 * Firmware Configuration File which is embedded in the firmware. A
5223 * very few early versions of the firmware didn't have one embedded
5224 * but we can ignore those.
5225 */
5226 if (ret == -ENOENT) {
5227 memset(&caps_cmd, 0, sizeof(caps_cmd));
5228 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305229 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5230 FW_CMD_REQUEST_F |
5231 FW_CMD_READ_F);
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305232 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5233 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5234 sizeof(caps_cmd), &caps_cmd);
5235 config_name = "Firmware Default";
5236 }
5237
5238 config_issued = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005239 if (ret < 0)
5240 goto bye;
5241
Vipul Pandya636f9d32012-09-26 02:39:39 +00005242 finiver = ntohl(caps_cmd.finiver);
5243 finicsum = ntohl(caps_cmd.finicsum);
5244 cfcsum = ntohl(caps_cmd.cfcsum);
5245 if (finicsum != cfcsum)
5246 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5247 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5248 finicsum, cfcsum);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005249
Vipul Pandya636f9d32012-09-26 02:39:39 +00005250 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005251 * And now tell the firmware to use the configuration we just loaded.
5252 */
5253 caps_cmd.op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305254 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5255 FW_CMD_REQUEST_F |
5256 FW_CMD_WRITE_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305257 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005258 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5259 NULL);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005260 if (ret < 0)
5261 goto bye;
5262
Vipul Pandya636f9d32012-09-26 02:39:39 +00005263 /*
5264 * Tweak configuration based on system architecture, module
5265 * parameters, etc.
5266 */
5267 ret = adap_init0_tweaks(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005268 if (ret < 0)
5269 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005270
Vipul Pandya636f9d32012-09-26 02:39:39 +00005271 /*
5272 * And finally tell the firmware to initialize itself using the
5273 * parameters from the Configuration File.
5274 */
5275 ret = t4_fw_initialize(adapter, adapter->mbox);
5276 if (ret < 0)
5277 goto bye;
5278
5279 /*
5280 * Return successfully and note that we're operating with parameters
5281 * not supplied by the driver, rather than from hard-wired
5282 * initialization constants burried in the driver.
5283 */
5284 adapter->flags |= USING_SOFT_PARAMS;
5285 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305286 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5287 config_name, finiver, cfcsum);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005288 return 0;
5289
5290 /*
5291 * Something bad happened. Return the error ... (If the "error"
5292 * is that there's no Configuration File on the adapter we don't
5293 * want to issue a warning since this is fairly common.)
5294 */
5295bye:
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305296 if (config_issued && ret != -ENOENT)
5297 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5298 config_name, -ret);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005299 return ret;
5300}
5301
5302/*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005303 * Attempt to initialize the adapter via hard-coded, driver supplied
5304 * parameters ...
5305 */
5306static int adap_init0_no_config(struct adapter *adapter, int reset)
5307{
5308 struct sge *s = &adapter->sge;
5309 struct fw_caps_config_cmd caps_cmd;
5310 u32 v;
5311 int i, ret;
5312
5313 /*
5314 * Reset device if necessary
5315 */
5316 if (reset) {
5317 ret = t4_fw_reset(adapter, adapter->mbox,
5318 PIORSTMODE | PIORST);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005319 if (ret < 0)
5320 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005321 }
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005322
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005323 /*
5324 * Get device capabilities and select which we'll be using.
5325 */
5326 memset(&caps_cmd, 0, sizeof(caps_cmd));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305327 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5328 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305329 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005330 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5331 &caps_cmd);
5332 if (ret < 0)
5333 goto bye;
5334
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005335 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5336 if (!vf_acls)
5337 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5338 else
5339 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5340 } else if (vf_acls) {
5341 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5342 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005343 }
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305344 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5345 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005346 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5347 NULL);
5348 if (ret < 0)
5349 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005350
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005351 /*
5352 * Tweak configuration based on system architecture, module
5353 * parameters, etc.
5354 */
5355 ret = adap_init0_tweaks(adapter);
5356 if (ret < 0)
5357 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005358
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005359 /*
5360 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5361 * mode which maps each Virtual Interface to its own section of
5362 * the RSS Table and we turn on all map and hash enables ...
5363 */
5364 adapter->flags |= RSS_TNLALLLOOKUP;
5365 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5366 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5367 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5368 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5369 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5370 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5371 if (ret < 0)
5372 goto bye;
5373
5374 /*
5375 * Set up our own fundamental resource provisioning ...
5376 */
5377 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5378 PFRES_NEQ, PFRES_NETHCTRL,
5379 PFRES_NIQFLINT, PFRES_NIQ,
5380 PFRES_TC, PFRES_NVI,
5381 FW_PFVF_CMD_CMASK_MASK,
5382 pfvfres_pmask(adapter, adapter->fn, 0),
5383 PFRES_NEXACTF,
5384 PFRES_R_CAPS, PFRES_WX_CAPS);
5385 if (ret < 0)
5386 goto bye;
5387
5388 /*
5389 * Perform low level SGE initialization. We need to do this before we
5390 * send the firmware the INITIALIZE command because that will cause
5391 * any other PF Drivers which are waiting for the Master
5392 * Initialization to proceed forward.
5393 */
5394 for (i = 0; i < SGE_NTIMERS - 1; i++)
5395 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5396 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5397 s->counter_val[0] = 1;
5398 for (i = 1; i < SGE_NCOUNTERS; i++)
5399 s->counter_val[i] = min(intr_cnt[i - 1],
5400 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5401 t4_sge_init(adapter);
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005402
5403#ifdef CONFIG_PCI_IOV
5404 /*
5405 * Provision resource limits for Virtual Functions. We currently
5406 * grant them all the same static resource limits except for the Port
5407 * Access Rights Mask which we're assigning based on the PF. All of
5408 * the static provisioning stuff for both the PF and VF really needs
5409 * to be managed in a persistent manner for each device which the
5410 * firmware controls.
5411 */
5412 {
5413 int pf, vf;
5414
Santosh Rastapur7d6727c2013-03-14 05:08:56 +00005415 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005416 if (num_vf[pf] <= 0)
5417 continue;
5418
5419 /* VF numbering starts at 1! */
5420 for (vf = 1; vf <= num_vf[pf]; vf++) {
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005421 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5422 pf, vf,
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005423 VFRES_NEQ, VFRES_NETHCTRL,
5424 VFRES_NIQFLINT, VFRES_NIQ,
5425 VFRES_TC, VFRES_NVI,
Vipul Pandya1f1e4952013-01-09 07:42:49 +00005426 FW_PFVF_CMD_CMASK_MASK,
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005427 pfvfres_pmask(
5428 adapter, pf, vf),
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005429 VFRES_NEXACTF,
5430 VFRES_R_CAPS, VFRES_WX_CAPS);
5431 if (ret < 0)
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005432 dev_warn(adapter->pdev_dev,
5433 "failed to "\
Casey Leedom7ee9ff92010-06-25 12:11:46 +00005434 "provision pf/vf=%d/%d; "
5435 "err=%d\n", pf, vf, ret);
5436 }
5437 }
5438 }
5439#endif
5440
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005441 /*
5442 * Set up the default filter mode. Later we'll want to implement this
5443 * via a firmware command, etc. ... This needs to be done before the
5444 * firmare initialization command ... If the selected set of fields
5445 * isn't equal to the default value, we'll need to make sure that the
5446 * field selections will fit in the 36-bit budget.
5447 */
5448 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00005449 int j, bits = 0;
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005450
Vipul Pandya404d9e32012-10-08 02:59:43 +00005451 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5452 switch (tp_vlan_pri_map & (1 << j)) {
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005453 case 0:
5454 /* compressed filter field not enabled */
5455 break;
5456 case FCOE_MASK:
5457 bits += 1;
5458 break;
5459 case PORT_MASK:
5460 bits += 3;
5461 break;
5462 case VNIC_ID_MASK:
5463 bits += 17;
5464 break;
5465 case VLAN_MASK:
5466 bits += 17;
5467 break;
5468 case TOS_MASK:
5469 bits += 8;
5470 break;
5471 case PROTOCOL_MASK:
5472 bits += 8;
5473 break;
5474 case ETHERTYPE_MASK:
5475 bits += 16;
5476 break;
5477 case MACMATCH_MASK:
5478 bits += 9;
5479 break;
5480 case MPSHITTYPE_MASK:
5481 bits += 3;
5482 break;
5483 case FRAGMENTATION_MASK:
5484 bits += 1;
5485 break;
5486 }
5487
5488 if (bits > 36) {
5489 dev_err(adapter->pdev_dev,
5490 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5491 " using %#x\n", tp_vlan_pri_map, bits,
5492 TP_VLAN_PRI_MAP_DEFAULT);
5493 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5494 }
5495 }
5496 v = tp_vlan_pri_map;
5497 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5498 &v, 1, TP_VLAN_PRI_MAP);
5499
5500 /*
5501 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5502 * to support any of the compressed filter fields above. Newer
5503 * versions of the firmware do this automatically but it doesn't hurt
5504 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5505 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5506 * since the firmware automatically turns this on and off when we have
5507 * a non-zero number of filters active (since it does have a
5508 * performance impact).
5509 */
5510 if (tp_vlan_pri_map)
5511 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5512 FIVETUPLELOOKUP_MASK,
5513 FIVETUPLELOOKUP_MASK);
5514
5515 /*
5516 * Tweak some settings.
5517 */
5518 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5519 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5520 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5521 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5522
5523 /*
5524 * Get basic stuff going by issuing the Firmware Initialize command.
5525 * Note that this _must_ be after all PFVF commands ...
5526 */
5527 ret = t4_fw_initialize(adapter, adapter->mbox);
5528 if (ret < 0)
5529 goto bye;
5530
5531 /*
5532 * Return successfully!
5533 */
5534 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5535 "driver parameters\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005536 return 0;
5537
5538 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005539 * Something bad happened. Return the error ...
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005540 */
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005541bye:
5542 return ret;
5543}
5544
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305545static struct fw_info fw_info_array[] = {
5546 {
5547 .chip = CHELSIO_T4,
5548 .fs_name = FW4_CFNAME,
5549 .fw_mod_name = FW4_FNAME,
5550 .fw_hdr = {
5551 .chip = FW_HDR_CHIP_T4,
5552 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5553 .intfver_nic = FW_INTFVER(T4, NIC),
5554 .intfver_vnic = FW_INTFVER(T4, VNIC),
5555 .intfver_ri = FW_INTFVER(T4, RI),
5556 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5557 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5558 },
5559 }, {
5560 .chip = CHELSIO_T5,
5561 .fs_name = FW5_CFNAME,
5562 .fw_mod_name = FW5_FNAME,
5563 .fw_hdr = {
5564 .chip = FW_HDR_CHIP_T5,
5565 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5566 .intfver_nic = FW_INTFVER(T5, NIC),
5567 .intfver_vnic = FW_INTFVER(T5, VNIC),
5568 .intfver_ri = FW_INTFVER(T5, RI),
5569 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5570 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5571 },
5572 }
5573};
5574
5575static struct fw_info *find_fw_info(int chip)
5576{
5577 int i;
5578
5579 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5580 if (fw_info_array[i].chip == chip)
5581 return &fw_info_array[i];
5582 }
5583 return NULL;
5584}
5585
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005586/*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005587 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005588 */
5589static int adap_init0(struct adapter *adap)
5590{
5591 int ret;
5592 u32 v, port_vec;
5593 enum dev_state state;
5594 u32 params[7], val[7];
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005595 struct fw_caps_config_cmd caps_cmd;
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05305596 int reset = 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005597
Vipul Pandya636f9d32012-09-26 02:39:39 +00005598 /*
5599 * Contact FW, advertising Master capability (and potentially forcing
5600 * ourselves as the Master PF if our module parameter force_init is
5601 * set).
5602 */
5603 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5604 force_init ? MASTER_MUST : MASTER_MAY,
5605 &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005606 if (ret < 0) {
5607 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5608 ret);
5609 return ret;
5610 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005611 if (ret == adap->mbox)
5612 adap->flags |= MASTER_PF;
5613 if (force_init && state == DEV_STATE_INIT)
5614 state = DEV_STATE_UNINIT;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005615
Vipul Pandya636f9d32012-09-26 02:39:39 +00005616 /*
5617 * If we're the Master PF Driver and the device is uninitialized,
5618 * then let's consider upgrading the firmware ... (We always want
5619 * to check the firmware version number in order to A. get it for
5620 * later reporting and B. to warn if the currently loaded firmware
5621 * is excessively mismatched relative to the driver.)
5622 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305623 t4_get_fw_version(adap, &adap->params.fw_vers);
5624 t4_get_tp_version(adap, &adap->params.tp_vers);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005625 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305626 struct fw_info *fw_info;
5627 struct fw_hdr *card_fw;
5628 const struct firmware *fw;
5629 const u8 *fw_data = NULL;
5630 unsigned int fw_size = 0;
5631
5632 /* This is the firmware whose headers the driver was compiled
5633 * against
5634 */
5635 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5636 if (fw_info == NULL) {
5637 dev_err(adap->pdev_dev,
5638 "unable to get firmware info for chip %d.\n",
5639 CHELSIO_CHIP_VERSION(adap->params.chip));
5640 return -EINVAL;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005641 }
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305642
5643 /* allocate memory to read the header of the firmware on the
5644 * card
5645 */
5646 card_fw = t4_alloc_mem(sizeof(*card_fw));
5647
5648 /* Get FW from from /lib/firmware/ */
5649 ret = request_firmware(&fw, fw_info->fw_mod_name,
5650 adap->pdev_dev);
5651 if (ret < 0) {
5652 dev_err(adap->pdev_dev,
5653 "unable to load firmware image %s, error %d\n",
5654 fw_info->fw_mod_name, ret);
5655 } else {
5656 fw_data = fw->data;
5657 fw_size = fw->size;
5658 }
5659
5660 /* upgrade FW logic */
5661 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5662 state, &reset);
5663
5664 /* Cleaning up */
5665 if (fw != NULL)
5666 release_firmware(fw);
5667 t4_free_mem(card_fw);
5668
Vipul Pandya636f9d32012-09-26 02:39:39 +00005669 if (ret < 0)
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305670 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005671 }
5672
5673 /*
5674 * Grab VPD parameters. This should be done after we establish a
5675 * connection to the firmware since some of the VPD parameters
5676 * (notably the Core Clock frequency) are retrieved via requests to
5677 * the firmware. On the other hand, we need these fairly early on
5678 * so we do this right after getting ahold of the firmware.
5679 */
5680 ret = get_vpd_params(adap, &adap->params.vpd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005681 if (ret < 0)
5682 goto bye;
5683
Vipul Pandya636f9d32012-09-26 02:39:39 +00005684 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005685 * Find out what ports are available to us. Note that we need to do
5686 * this before calling adap_init0_no_config() since it needs nports
5687 * and portvec ...
Vipul Pandya636f9d32012-09-26 02:39:39 +00005688 */
5689 v =
5690 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5691 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5692 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5693 if (ret < 0)
5694 goto bye;
5695
5696 adap->params.nports = hweight32(port_vec);
5697 adap->params.portvec = port_vec;
5698
5699 /*
5700 * If the firmware is initialized already (and we're not forcing a
5701 * master initialization), note that we're living with existing
5702 * adapter parameters. Otherwise, it's time to try initializing the
5703 * adapter ...
5704 */
5705 if (state == DEV_STATE_INIT) {
5706 dev_info(adap->pdev_dev, "Coming up as %s: "\
5707 "Adapter already initialized\n",
5708 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5709 adap->flags |= USING_SOFT_PARAMS;
5710 } else {
5711 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5712 "Initializing adapter\n");
Vipul Pandya636f9d32012-09-26 02:39:39 +00005713 /*
5714 * If the firmware doesn't support Configuration
5715 * Files warn user and exit,
5716 */
5717 if (ret < 0)
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005718 dev_warn(adap->pdev_dev, "Firmware doesn't support "
Vipul Pandya636f9d32012-09-26 02:39:39 +00005719 "configuration file.\n");
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005720 if (force_old_init)
5721 ret = adap_init0_no_config(adap, reset);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005722 else {
5723 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005724 * Find out whether we're dealing with a version of
5725 * the firmware which has configuration file support.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005726 */
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005727 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5728 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5729 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5730 params, val);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005731
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005732 /*
5733 * If the firmware doesn't support Configuration
5734 * Files, use the old Driver-based, hard-wired
5735 * initialization. Otherwise, try using the
5736 * Configuration File support and fall back to the
5737 * Driver-based initialization if there's no
5738 * Configuration File found.
5739 */
5740 if (ret < 0)
5741 ret = adap_init0_no_config(adap, reset);
5742 else {
5743 /*
5744 * The firmware provides us with a memory
5745 * buffer where we can load a Configuration
5746 * File from the host if we want to override
5747 * the Configuration File in flash.
5748 */
5749
5750 ret = adap_init0_config(adap, reset);
5751 if (ret == -ENOENT) {
5752 dev_info(adap->pdev_dev,
5753 "No Configuration File present "
Hariprasad Shenai16e47622013-12-03 17:05:58 +05305754 "on adapter. Using hard-wired "
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005755 "configuration parameters.\n");
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305756 goto bye;
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005757 ret = adap_init0_no_config(adap, reset);
5758 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005759 }
5760 }
5761 if (ret < 0) {
5762 dev_err(adap->pdev_dev,
5763 "could not initialize adapter, error %d\n",
5764 -ret);
5765 goto bye;
5766 }
5767 }
5768
5769 /*
5770 * If we're living with non-hard-coded parameters (either from a
5771 * Firmware Configuration File or values programmed by a different PF
5772 * Driver), give the SGE code a chance to pull in anything that it
5773 * needs ... Note that this must be called after we retrieve our VPD
5774 * parameters in order to know how to convert core ticks to seconds.
5775 */
5776 if (adap->flags & USING_SOFT_PARAMS) {
5777 ret = t4_sge_init(adap);
5778 if (ret < 0)
5779 goto bye;
5780 }
5781
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00005782 if (is_bypass_device(adap->pdev->device))
5783 adap->params.bypass = 1;
5784
Vipul Pandya636f9d32012-09-26 02:39:39 +00005785 /*
5786 * Grab some of our basic fundamental operating parameters.
5787 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005788#define FW_PARAM_DEV(param) \
5789 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
Vipul Pandya636f9d32012-09-26 02:39:39 +00005790 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005791
5792#define FW_PARAM_PFVF(param) \
Vipul Pandya636f9d32012-09-26 02:39:39 +00005793 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5794 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5795 FW_PARAMS_PARAM_Y(0) | \
5796 FW_PARAMS_PARAM_Z(0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005797
Vipul Pandya636f9d32012-09-26 02:39:39 +00005798 params[0] = FW_PARAM_PFVF(EQ_START);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005799 params[1] = FW_PARAM_PFVF(L2T_START);
5800 params[2] = FW_PARAM_PFVF(L2T_END);
5801 params[3] = FW_PARAM_PFVF(FILTER_START);
5802 params[4] = FW_PARAM_PFVF(FILTER_END);
5803 params[5] = FW_PARAM_PFVF(IQFLINT_START);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005804 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005805 if (ret < 0)
5806 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005807 adap->sge.egr_start = val[0];
5808 adap->l2t_start = val[1];
5809 adap->l2t_end = val[2];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005810 adap->tids.ftid_base = val[3];
5811 adap->tids.nftids = val[4] - val[3] + 1;
5812 adap->sge.ingr_start = val[5];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005813
Vipul Pandya636f9d32012-09-26 02:39:39 +00005814 /* query params related to active filter region */
5815 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5816 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5817 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5818 /* If Active filter size is set we enable establishing
5819 * offload connection through firmware work request
5820 */
5821 if ((val[0] != val[1]) && (ret >= 0)) {
5822 adap->flags |= FW_OFLD_CONN;
5823 adap->tids.aftid_base = val[0];
5824 adap->tids.aftid_end = val[1];
5825 }
5826
Vipul Pandyab407a4a2013-04-29 04:04:40 +00005827 /* If we're running on newer firmware, let it know that we're
5828 * prepared to deal with encapsulated CPL messages. Older
5829 * firmware won't understand this and we'll just get
5830 * unencapsulated messages ...
5831 */
5832 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5833 val[0] = 1;
5834 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5835
Vipul Pandya636f9d32012-09-26 02:39:39 +00005836 /*
Kumar Sanghvi1ac0f092014-02-18 17:56:12 +05305837 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5838 * capability. Earlier versions of the firmware didn't have the
5839 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5840 * permission to use ULPTX MEMWRITE DSGL.
5841 */
5842 if (is_t4(adap->params.chip)) {
5843 adap->params.ulptx_memwrite_dsgl = false;
5844 } else {
5845 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5846 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5847 1, params, val);
5848 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5849 }
5850
5851 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00005852 * Get device capabilities so we can determine what resources we need
5853 * to manage.
5854 */
5855 memset(&caps_cmd, 0, sizeof(caps_cmd));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05305856 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5857 FW_CMD_REQUEST_F | FW_CMD_READ_F);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05305858 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00005859 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5860 &caps_cmd);
5861 if (ret < 0)
5862 goto bye;
5863
Vipul Pandya13ee15d2012-09-26 02:39:40 +00005864 if (caps_cmd.ofldcaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005865 /* query offload-related parameters */
5866 params[0] = FW_PARAM_DEV(NTID);
5867 params[1] = FW_PARAM_PFVF(SERVER_START);
5868 params[2] = FW_PARAM_PFVF(SERVER_END);
5869 params[3] = FW_PARAM_PFVF(TDDP_START);
5870 params[4] = FW_PARAM_PFVF(TDDP_END);
5871 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005872 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5873 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005874 if (ret < 0)
5875 goto bye;
5876 adap->tids.ntids = val[0];
5877 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5878 adap->tids.stid_base = val[1];
5879 adap->tids.nstids = val[2] - val[1] + 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00005880 /*
5881 * Setup server filter region. Divide the availble filter
5882 * region into two parts. Regular filters get 1/3rd and server
5883 * filters get 2/3rd part. This is only enabled if workarond
5884 * path is enabled.
5885 * 1. For regular filters.
5886 * 2. Server filter: This are special filters which are used
5887 * to redirect SYN packets to offload queue.
5888 */
5889 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5890 adap->tids.sftid_base = adap->tids.ftid_base +
5891 DIV_ROUND_UP(adap->tids.nftids, 3);
5892 adap->tids.nsftids = adap->tids.nftids -
5893 DIV_ROUND_UP(adap->tids.nftids, 3);
5894 adap->tids.nftids = adap->tids.sftid_base -
5895 adap->tids.ftid_base;
5896 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005897 adap->vres.ddp.start = val[3];
5898 adap->vres.ddp.size = val[4] - val[3] + 1;
5899 adap->params.ofldq_wr_cred = val[5];
Vipul Pandya636f9d32012-09-26 02:39:39 +00005900
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005901 adap->params.offload = 1;
5902 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005903 if (caps_cmd.rdmacaps) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005904 params[0] = FW_PARAM_PFVF(STAG_START);
5905 params[1] = FW_PARAM_PFVF(STAG_END);
5906 params[2] = FW_PARAM_PFVF(RQ_START);
5907 params[3] = FW_PARAM_PFVF(RQ_END);
5908 params[4] = FW_PARAM_PFVF(PBL_START);
5909 params[5] = FW_PARAM_PFVF(PBL_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005910 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5911 params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005912 if (ret < 0)
5913 goto bye;
5914 adap->vres.stag.start = val[0];
5915 adap->vres.stag.size = val[1] - val[0] + 1;
5916 adap->vres.rq.start = val[2];
5917 adap->vres.rq.size = val[3] - val[2] + 1;
5918 adap->vres.pbl.start = val[4];
5919 adap->vres.pbl.size = val[5] - val[4] + 1;
5920
5921 params[0] = FW_PARAM_PFVF(SQRQ_START);
5922 params[1] = FW_PARAM_PFVF(SQRQ_END);
5923 params[2] = FW_PARAM_PFVF(CQ_START);
5924 params[3] = FW_PARAM_PFVF(CQ_END);
5925 params[4] = FW_PARAM_PFVF(OCQ_START);
5926 params[5] = FW_PARAM_PFVF(OCQ_END);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05305927 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5928 val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005929 if (ret < 0)
5930 goto bye;
5931 adap->vres.qp.start = val[0];
5932 adap->vres.qp.size = val[1] - val[0] + 1;
5933 adap->vres.cq.start = val[2];
5934 adap->vres.cq.size = val[3] - val[2] + 1;
5935 adap->vres.ocq.start = val[4];
5936 adap->vres.ocq.size = val[5] - val[4] + 1;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05305937
5938 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5939 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
Hariprasad Shenai5c937dd2014-09-01 19:55:00 +05305940 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5941 val);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05305942 if (ret < 0) {
5943 adap->params.max_ordird_qp = 8;
5944 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5945 ret = 0;
5946 } else {
5947 adap->params.max_ordird_qp = val[0];
5948 adap->params.max_ird_adapter = val[1];
5949 }
5950 dev_info(adap->pdev_dev,
5951 "max_ordird_qp %d max_ird_adapter %d\n",
5952 adap->params.max_ordird_qp,
5953 adap->params.max_ird_adapter);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005954 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00005955 if (caps_cmd.iscsicaps) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005956 params[0] = FW_PARAM_PFVF(ISCSI_START);
5957 params[1] = FW_PARAM_PFVF(ISCSI_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005958 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5959 params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005960 if (ret < 0)
5961 goto bye;
5962 adap->vres.iscsi.start = val[0];
5963 adap->vres.iscsi.size = val[1] - val[0] + 1;
5964 }
5965#undef FW_PARAM_PFVF
5966#undef FW_PARAM_DEV
5967
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305968 /* The MTU/MSS Table is initialized by now, so load their values. If
5969 * we're initializing the adapter, then we'll make any modifications
5970 * we want to the MTU/MSS Table and also initialize the congestion
5971 * parameters.
Vipul Pandya636f9d32012-09-26 02:39:39 +00005972 */
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005973 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305974 if (state != DEV_STATE_INIT) {
5975 int i;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005976
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05305977 /* The default MTU Table contains values 1492 and 1500.
5978 * However, for TCP, it's better to have two values which are
5979 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5980 * This allows us to have a TCP Data Payload which is a
5981 * multiple of 8 regardless of what combination of TCP Options
5982 * are in use (always a multiple of 4 bytes) which is
5983 * important for performance reasons. For instance, if no
5984 * options are in use, then we have a 20-byte IP header and a
5985 * 20-byte TCP header. In this case, a 1500-byte MSS would
5986 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5987 * which is not a multiple of 8. So using an MSS of 1488 in
5988 * this case results in a TCP Data Payload of 1448 bytes which
5989 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5990 * Stamps have been negotiated, then an MTU of 1500 bytes
5991 * results in a TCP Data Payload of 1448 bytes which, as
5992 * above, is a multiple of 8 bytes ...
5993 */
5994 for (i = 0; i < NMTUS; i++)
5995 if (adap->params.mtus[i] == 1492) {
5996 adap->params.mtus[i] = 1488;
5997 break;
5998 }
5999
6000 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6001 adap->params.b_wnd);
6002 }
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05306003 t4_init_tp_params(adap);
Vipul Pandya636f9d32012-09-26 02:39:39 +00006004 adap->flags |= FW_OK;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006005 return 0;
6006
6007 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00006008 * Something bad happened. If a command timed out or failed with EIO
6009 * FW does not operate within its spec or something catastrophic
6010 * happened to HW/FW, stop issuing commands.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006011 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00006012bye:
6013 if (ret != -ETIMEDOUT && ret != -EIO)
6014 t4_fw_bye(adap, adap->mbox);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006015 return ret;
6016}
6017
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006018/* EEH callbacks */
6019
6020static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6021 pci_channel_state_t state)
6022{
6023 int i;
6024 struct adapter *adap = pci_get_drvdata(pdev);
6025
6026 if (!adap)
6027 goto out;
6028
6029 rtnl_lock();
6030 adap->flags &= ~FW_OK;
6031 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
Gavin Shan9fe6cb52014-01-23 12:27:35 +08006032 spin_lock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006033 for_each_port(adap, i) {
6034 struct net_device *dev = adap->port[i];
6035
6036 netif_device_detach(dev);
6037 netif_carrier_off(dev);
6038 }
Gavin Shan9fe6cb52014-01-23 12:27:35 +08006039 spin_unlock(&adap->stats_lock);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006040 if (adap->flags & FULL_INIT_DONE)
6041 cxgb_down(adap);
6042 rtnl_unlock();
Gavin Shan144be3d2014-01-23 12:27:34 +08006043 if ((adap->flags & DEV_ENABLED)) {
6044 pci_disable_device(pdev);
6045 adap->flags &= ~DEV_ENABLED;
6046 }
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006047out: return state == pci_channel_io_perm_failure ?
6048 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6049}
6050
6051static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6052{
6053 int i, ret;
6054 struct fw_caps_config_cmd c;
6055 struct adapter *adap = pci_get_drvdata(pdev);
6056
6057 if (!adap) {
6058 pci_restore_state(pdev);
6059 pci_save_state(pdev);
6060 return PCI_ERS_RESULT_RECOVERED;
6061 }
6062
Gavin Shan144be3d2014-01-23 12:27:34 +08006063 if (!(adap->flags & DEV_ENABLED)) {
6064 if (pci_enable_device(pdev)) {
6065 dev_err(&pdev->dev, "Cannot reenable PCI "
6066 "device after reset\n");
6067 return PCI_ERS_RESULT_DISCONNECT;
6068 }
6069 adap->flags |= DEV_ENABLED;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006070 }
6071
6072 pci_set_master(pdev);
6073 pci_restore_state(pdev);
6074 pci_save_state(pdev);
6075 pci_cleanup_aer_uncorrect_error_status(pdev);
6076
Hariprasad Shenai8203b502014-10-09 05:48:47 +05306077 if (t4_wait_dev_ready(adap->regs) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006078 return PCI_ERS_RESULT_DISCONNECT;
Thadeu Lima de Souza Cascardo777c2302013-05-03 08:11:04 +00006079 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006080 return PCI_ERS_RESULT_DISCONNECT;
6081 adap->flags |= FW_OK;
6082 if (adap_init1(adap, &c))
6083 return PCI_ERS_RESULT_DISCONNECT;
6084
6085 for_each_port(adap, i) {
6086 struct port_info *p = adap2pinfo(adap, i);
6087
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006088 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6089 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006090 if (ret < 0)
6091 return PCI_ERS_RESULT_DISCONNECT;
6092 p->viid = ret;
6093 p->xact_addr_filt = -1;
6094 }
6095
6096 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6097 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00006098 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006099 if (cxgb_up(adap))
6100 return PCI_ERS_RESULT_DISCONNECT;
6101 return PCI_ERS_RESULT_RECOVERED;
6102}
6103
6104static void eeh_resume(struct pci_dev *pdev)
6105{
6106 int i;
6107 struct adapter *adap = pci_get_drvdata(pdev);
6108
6109 if (!adap)
6110 return;
6111
6112 rtnl_lock();
6113 for_each_port(adap, i) {
6114 struct net_device *dev = adap->port[i];
6115
6116 if (netif_running(dev)) {
6117 link_start(dev);
6118 cxgb_set_rxmode(dev);
6119 }
6120 netif_device_attach(dev);
6121 }
6122 rtnl_unlock();
6123}
6124
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006125static const struct pci_error_handlers cxgb4_eeh = {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006126 .error_detected = eeh_err_detected,
6127 .slot_reset = eeh_slot_reset,
6128 .resume = eeh_resume,
6129};
6130
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05306131static inline bool is_x_10g_port(const struct link_config *lc)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006132{
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05306133 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6134 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006135}
6136
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306137static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6138 unsigned int us, unsigned int cnt,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006139 unsigned int size, unsigned int iqe_size)
6140{
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306141 q->adap = adap;
6142 set_rspq_intr_params(q, us, cnt);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006143 q->iqe_len = iqe_size;
6144 q->size = size;
6145}
6146
6147/*
6148 * Perform default configuration of DMA queues depending on the number and type
6149 * of ports we found and the number of available CPUs. Most settings can be
6150 * modified by the admin prior to actual use.
6151 */
Bill Pemberton91744942012-12-03 09:23:02 -05006152static void cfg_queues(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006153{
6154 struct sge *s = &adap->sge;
Anish Bhatt688848b2014-06-19 21:37:13 -07006155 int i, n10g = 0, qidx = 0;
6156#ifndef CONFIG_CHELSIO_T4_DCB
6157 int q10g = 0;
6158#endif
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306159 int ciq_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006160
6161 for_each_port(adap, i)
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05306162 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
Anish Bhatt688848b2014-06-19 21:37:13 -07006163#ifdef CONFIG_CHELSIO_T4_DCB
6164 /* For Data Center Bridging support we need to be able to support up
6165 * to 8 Traffic Priorities; each of which will be assigned to its
6166 * own TX Queue in order to prevent Head-Of-Line Blocking.
6167 */
6168 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6169 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6170 MAX_ETH_QSETS, adap->params.nports * 8);
6171 BUG_ON(1);
6172 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006173
Anish Bhatt688848b2014-06-19 21:37:13 -07006174 for_each_port(adap, i) {
6175 struct port_info *pi = adap2pinfo(adap, i);
6176
6177 pi->first_qset = qidx;
6178 pi->nqsets = 8;
6179 qidx += pi->nqsets;
6180 }
6181#else /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006182 /*
6183 * We default to 1 queue per non-10G port and up to # of cores queues
6184 * per 10G port.
6185 */
6186 if (n10g)
6187 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
Yuval Mintz5952dde2012-07-01 03:18:55 +00006188 if (q10g > netif_get_num_default_rss_queues())
6189 q10g = netif_get_num_default_rss_queues();
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006190
6191 for_each_port(adap, i) {
6192 struct port_info *pi = adap2pinfo(adap, i);
6193
6194 pi->first_qset = qidx;
Kumar Sanghvi57d8b762014-02-18 17:56:10 +05306195 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006196 qidx += pi->nqsets;
6197 }
Anish Bhatt688848b2014-06-19 21:37:13 -07006198#endif /* !CONFIG_CHELSIO_T4_DCB */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006199
6200 s->ethqsets = qidx;
6201 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6202
6203 if (is_offload(adap)) {
6204 /*
6205 * For offload we use 1 queue/channel if all ports are up to 1G,
6206 * otherwise we divide all available queues amongst the channels
6207 * capped by the number of available cores.
6208 */
6209 if (n10g) {
6210 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6211 num_online_cpus());
6212 s->ofldqsets = roundup(i, adap->params.nports);
6213 } else
6214 s->ofldqsets = adap->params.nports;
6215 /* For RDMA one Rx queue per channel suffices */
6216 s->rdmaqs = adap->params.nports;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306217 s->rdmaciqs = adap->params.nports;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006218 }
6219
6220 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6221 struct sge_eth_rxq *r = &s->ethrxq[i];
6222
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306223 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006224 r->fl.size = 72;
6225 }
6226
6227 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6228 s->ethtxq[i].q.size = 1024;
6229
6230 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6231 s->ctrlq[i].q.size = 512;
6232
6233 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6234 s->ofldtxq[i].q.size = 1024;
6235
6236 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6237 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6238
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306239 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006240 r->rspq.uld = CXGB4_ULD_ISCSI;
6241 r->fl.size = 72;
6242 }
6243
6244 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6245 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6246
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306247 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006248 r->rspq.uld = CXGB4_ULD_RDMA;
6249 r->fl.size = 72;
6250 }
6251
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306252 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6253 if (ciq_size > SGE_MAX_IQ_SIZE) {
6254 CH_WARN(adap, "CIQ size too small for available IQs\n");
6255 ciq_size = SGE_MAX_IQ_SIZE;
6256 }
6257
6258 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6259 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6260
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306261 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306262 r->rspq.uld = CXGB4_ULD_RDMA;
6263 }
6264
Hariprasad Shenaic887ad02014-06-06 21:40:45 +05306265 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6266 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006267}
6268
6269/*
6270 * Reduce the number of Ethernet queues across all ports to at most n.
6271 * n provides at least one queue per port.
6272 */
Bill Pemberton91744942012-12-03 09:23:02 -05006273static void reduce_ethqs(struct adapter *adap, int n)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006274{
6275 int i;
6276 struct port_info *pi;
6277
6278 while (n < adap->sge.ethqsets)
6279 for_each_port(adap, i) {
6280 pi = adap2pinfo(adap, i);
6281 if (pi->nqsets > 1) {
6282 pi->nqsets--;
6283 adap->sge.ethqsets--;
6284 if (adap->sge.ethqsets <= n)
6285 break;
6286 }
6287 }
6288
6289 n = 0;
6290 for_each_port(adap, i) {
6291 pi = adap2pinfo(adap, i);
6292 pi->first_qset = n;
6293 n += pi->nqsets;
6294 }
6295}
6296
6297/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6298#define EXTRA_VECS 2
6299
Bill Pemberton91744942012-12-03 09:23:02 -05006300static int enable_msix(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006301{
6302 int ofld_need = 0;
Alexander Gordeevc32ad222014-02-18 11:07:59 +01006303 int i, want, need;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006304 struct sge *s = &adap->sge;
6305 unsigned int nchan = adap->params.nports;
6306 struct msix_entry entries[MAX_INGQ + 1];
6307
6308 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6309 entries[i].entry = i;
6310
6311 want = s->max_ethqsets + EXTRA_VECS;
6312 if (is_offload(adap)) {
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306313 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006314 /* need nchan for each possible ULD */
Hariprasad Shenaicf38be62014-06-06 21:40:42 +05306315 ofld_need = 3 * nchan;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006316 }
Anish Bhatt688848b2014-06-19 21:37:13 -07006317#ifdef CONFIG_CHELSIO_T4_DCB
6318 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6319 * each port.
6320 */
6321 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6322#else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006323 need = adap->params.nports + EXTRA_VECS + ofld_need;
Anish Bhatt688848b2014-06-19 21:37:13 -07006324#endif
Alexander Gordeevc32ad222014-02-18 11:07:59 +01006325 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6326 if (want < 0)
6327 return want;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006328
Alexander Gordeevc32ad222014-02-18 11:07:59 +01006329 /*
6330 * Distribute available vectors to the various queue groups.
6331 * Every group gets its minimum requirement and NIC gets top
6332 * priority for leftovers.
6333 */
6334 i = want - EXTRA_VECS - ofld_need;
6335 if (i < s->max_ethqsets) {
6336 s->max_ethqsets = i;
6337 if (i < s->ethqsets)
6338 reduce_ethqs(adap, i);
6339 }
6340 if (is_offload(adap)) {
6341 i = want - EXTRA_VECS - s->max_ethqsets;
6342 i -= ofld_need - nchan;
6343 s->ofldqsets = (i / nchan) * nchan; /* round down */
6344 }
6345 for (i = 0; i < want; ++i)
6346 adap->msix_info[i].vec = entries[i].vector;
6347
6348 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006349}
6350
6351#undef EXTRA_VECS
6352
Bill Pemberton91744942012-12-03 09:23:02 -05006353static int init_rss(struct adapter *adap)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006354{
6355 unsigned int i, j;
6356
6357 for_each_port(adap, i) {
6358 struct port_info *pi = adap2pinfo(adap, i);
6359
6360 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6361 if (!pi->rss)
6362 return -ENOMEM;
6363 for (j = 0; j < pi->rss_size; j++)
Ben Hutchings278bc422011-12-15 13:56:49 +00006364 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006365 }
6366 return 0;
6367}
6368
Bill Pemberton91744942012-12-03 09:23:02 -05006369static void print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006370{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006371 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006372 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00006373 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006374 const struct port_info *pi = netdev_priv(dev);
6375 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00006376
6377 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6378 spd = " 2.5 GT/s";
6379 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6380 spd = " 5 GT/s";
Roland Dreierd2e752d2014-04-28 17:36:20 -07006381 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6382 spd = " 8 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006383
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006384 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6385 bufp += sprintf(bufp, "100/");
6386 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6387 bufp += sprintf(bufp, "1000/");
6388 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6389 bufp += sprintf(bufp, "10G/");
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05306390 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6391 bufp += sprintf(bufp, "40G/");
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006392 if (bufp != buf)
6393 --bufp;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05306394 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006395
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006396 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Santosh Rastapur0a57a532013-03-14 05:08:49 +00006397 adap->params.vpd.id,
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306398 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
Dimitris Michailidis118969e2010-12-14 21:36:48 +00006399 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6400 (adap->flags & USING_MSIX) ? " MSI-X" :
6401 (adap->flags & USING_MSI) ? " MSI" : "");
Kumar Sanghvia94cd702014-02-18 17:56:09 +05306402 netdev_info(dev, "S/N: %s, P/N: %s\n",
6403 adap->params.vpd.sn, adap->params.vpd.pn);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006404}
6405
Bill Pemberton91744942012-12-03 09:23:02 -05006406static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
Dimitris Michailidisef306b52010-12-14 21:36:44 +00006407{
Jiang Liue5c8ae52012-08-20 13:53:19 -06006408 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00006409}
6410
Dimitris Michailidis06546392010-07-11 12:01:16 +00006411/*
6412 * Free the following resources:
6413 * - memory used for tables
6414 * - MSI/MSI-X
6415 * - net devices
6416 * - resources FW is holding for us
6417 */
6418static void free_some_resources(struct adapter *adapter)
6419{
6420 unsigned int i;
6421
6422 t4_free_mem(adapter->l2t);
6423 t4_free_mem(adapter->tids.tid_tab);
6424 disable_msi(adapter);
6425
6426 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006427 if (adapter->port[i]) {
6428 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00006429 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006430 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00006431 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006432 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00006433}
6434
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00006435#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00006436#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006437 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006438#define SEGMENT_SIZE 128
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006439
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00006440static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006441{
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006442 int func, i, err, s_qpp, qpp, num_seg;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006443 struct port_info *pi;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006444 bool highdma = false;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006445 struct adapter *adapter = NULL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306446 void __iomem *regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006447
6448 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6449
6450 err = pci_request_regions(pdev, KBUILD_MODNAME);
6451 if (err) {
6452 /* Just info, some other driver may have claimed the device. */
6453 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6454 return err;
6455 }
6456
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006457 err = pci_enable_device(pdev);
6458 if (err) {
6459 dev_err(&pdev->dev, "cannot enable PCI device\n");
6460 goto out_release_regions;
6461 }
6462
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306463 regs = pci_ioremap_bar(pdev, 0);
6464 if (!regs) {
6465 dev_err(&pdev->dev, "cannot map device registers\n");
6466 err = -ENOMEM;
6467 goto out_disable_device;
6468 }
6469
Hariprasad Shenai8203b502014-10-09 05:48:47 +05306470 err = t4_wait_dev_ready(regs);
6471 if (err < 0)
6472 goto out_unmap_bar0;
6473
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306474 /* We control everything through one PF */
6475 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6476 if (func != ent->driver_data) {
6477 iounmap(regs);
6478 pci_disable_device(pdev);
6479 pci_save_state(pdev); /* to restore SR-IOV later */
6480 goto sriov;
6481 }
6482
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006483 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006484 highdma = true;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006485 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6486 if (err) {
6487 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6488 "coherent allocations\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306489 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006490 }
6491 } else {
6492 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6493 if (err) {
6494 dev_err(&pdev->dev, "no usable DMA configuration\n");
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306495 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006496 }
6497 }
6498
6499 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00006500 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006501 pci_set_master(pdev);
6502 pci_save_state(pdev);
6503
6504 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6505 if (!adapter) {
6506 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306507 goto out_unmap_bar0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006508 }
6509
Anish Bhatt29aaee62014-08-20 13:44:06 -07006510 adapter->workq = create_singlethread_workqueue("cxgb4");
6511 if (!adapter->workq) {
6512 err = -ENOMEM;
6513 goto out_free_adapter;
6514 }
6515
Gavin Shan144be3d2014-01-23 12:27:34 +08006516 /* PCI device has been enabled */
6517 adapter->flags |= DEV_ENABLED;
6518
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306519 adapter->regs = regs;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006520 adapter->pdev = pdev;
6521 adapter->pdev_dev = &pdev->dev;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05306522 adapter->mbox = func;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006523 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006524 adapter->msg_enable = dflt_msg_enable;
6525 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6526
6527 spin_lock_init(&adapter->stats_lock);
6528 spin_lock_init(&adapter->tid_release_lock);
Anish Bhatte327c222014-10-29 17:54:03 -07006529 spin_lock_init(&adapter->win0_lock);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006530
6531 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
Vipul Pandya881806b2012-05-18 15:29:24 +05306532 INIT_WORK(&adapter->db_full_task, process_db_full);
6533 INIT_WORK(&adapter->db_drop_task, process_db_drop);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006534
6535 err = t4_prep_adapter(adapter);
6536 if (err)
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306537 goto out_free_adapter;
6538
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006539
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306540 if (!is_t4(adapter->params.chip)) {
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006541 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6542 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6543 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6544 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6545
6546 /* Each segment size is 128B. Write coalescing is enabled only
6547 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6548 * queue is less no of segments that can be accommodated in
6549 * a page size.
6550 */
6551 if (qpp > num_seg) {
6552 dev_err(&pdev->dev,
6553 "Incorrect number of egress queues per page\n");
6554 err = -EINVAL;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306555 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006556 }
6557 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6558 pci_resource_len(pdev, 2));
6559 if (!adapter->bar2) {
6560 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6561 err = -ENOMEM;
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306562 goto out_free_adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006563 }
6564 }
6565
Vipul Pandya636f9d32012-09-26 02:39:39 +00006566 setup_memwin(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006567 err = adap_init0(adapter);
Vipul Pandya636f9d32012-09-26 02:39:39 +00006568 setup_memwin_rdma(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006569 if (err)
6570 goto out_unmap_bar;
6571
6572 for_each_port(adapter, i) {
6573 struct net_device *netdev;
6574
6575 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6576 MAX_ETH_QSETS);
6577 if (!netdev) {
6578 err = -ENOMEM;
6579 goto out_free_dev;
6580 }
6581
6582 SET_NETDEV_DEV(netdev, &pdev->dev);
6583
6584 adapter->port[i] = netdev;
6585 pi = netdev_priv(netdev);
6586 pi->adapter = adapter;
6587 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006588 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006589 netdev->irq = pdev->irq;
6590
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00006591 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6592 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6593 NETIF_F_RXCSUM | NETIF_F_RXHASH |
Patrick McHardyf6469682013-04-19 02:04:27 +00006594 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006595 if (highdma)
6596 netdev->hw_features |= NETIF_F_HIGHDMA;
6597 netdev->features |= netdev->hw_features;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006598 netdev->vlan_features = netdev->features & VLAN_FEAT;
6599
Jiri Pirko01789342011-08-16 06:29:00 +00006600 netdev->priv_flags |= IFF_UNICAST_FLT;
6601
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006602 netdev->netdev_ops = &cxgb4_netdev_ops;
Anish Bhatt688848b2014-06-19 21:37:13 -07006603#ifdef CONFIG_CHELSIO_T4_DCB
6604 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6605 cxgb4_dcb_state_init(netdev);
6606#endif
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00006607 netdev->ethtool_ops = &cxgb_ethtool_ops;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006608 }
6609
6610 pci_set_drvdata(pdev, adapter);
6611
6612 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00006613 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006614 if (err)
6615 goto out_free_dev;
6616 }
6617
6618 /*
6619 * Configure queues and allocate tables now, they can be needed as
6620 * soon as the first register_netdev completes.
6621 */
6622 cfg_queues(adapter);
6623
6624 adapter->l2t = t4_init_l2t();
6625 if (!adapter->l2t) {
6626 /* We tolerate a lack of L2T, giving up some functionality */
6627 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6628 adapter->params.offload = 0;
6629 }
6630
6631 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6632 dev_warn(&pdev->dev, "could not allocate TID table, "
6633 "continuing\n");
6634 adapter->params.offload = 0;
6635 }
6636
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00006637 /* See what interrupts we'll be using */
6638 if (msi > 1 && enable_msix(adapter) == 0)
6639 adapter->flags |= USING_MSIX;
6640 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6641 adapter->flags |= USING_MSI;
6642
Dimitris Michailidis671b0062010-07-11 12:01:17 +00006643 err = init_rss(adapter);
6644 if (err)
6645 goto out_free_dev;
6646
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006647 /*
6648 * The card is now ready to go. If any errors occur during device
6649 * registration we do not fail the whole card but rather proceed only
6650 * with the ports we manage to register successfully. However we must
6651 * register at least one net device.
6652 */
6653 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00006654 pi = adap2pinfo(adapter, i);
6655 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6656 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6657
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006658 err = register_netdev(adapter->port[i]);
6659 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006660 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006661 adapter->chan_map[pi->tx_chan] = i;
6662 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006663 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006664 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006665 dev_err(&pdev->dev, "could not register any net devices\n");
6666 goto out_free_dev;
6667 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00006668 if (err) {
6669 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6670 err = 0;
Joe Perches6403eab2011-06-03 11:51:20 +00006671 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006672
6673 if (cxgb4_debugfs_root) {
6674 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6675 cxgb4_debugfs_root);
6676 setup_debugfs(adapter);
6677 }
6678
David S. Miller88c51002011-10-07 13:38:43 -04006679 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6680 pdev->needs_freset = 1;
6681
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006682 if (is_offload(adapter))
6683 attach_ulds(adapter);
6684
Hariprasad Shenai8e1e6052014-08-06 17:10:59 +05306685sriov:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006686#ifdef CONFIG_PCI_IOV
Santosh Rastapur7d6727c2013-03-14 05:08:56 +00006687 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006688 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6689 dev_info(&pdev->dev,
6690 "instantiated %u virtual functions\n",
6691 num_vf[func]);
6692#endif
6693 return 0;
6694
6695 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00006696 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006697 out_unmap_bar:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306698 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006699 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006700 out_free_adapter:
Anish Bhatt29aaee62014-08-20 13:44:06 -07006701 if (adapter->workq)
6702 destroy_workqueue(adapter->workq);
6703
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006704 kfree(adapter);
Hariprasad Shenaid6ce2622014-09-16 02:58:46 +05306705 out_unmap_bar0:
6706 iounmap(regs);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006707 out_disable_device:
6708 pci_disable_pcie_error_reporting(pdev);
6709 pci_disable_device(pdev);
6710 out_release_regions:
6711 pci_release_regions(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006712 return err;
6713}
6714
Bill Pemberton91744942012-12-03 09:23:02 -05006715static void remove_one(struct pci_dev *pdev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006716{
6717 struct adapter *adapter = pci_get_drvdata(pdev);
6718
Vipul Pandya636f9d32012-09-26 02:39:39 +00006719#ifdef CONFIG_PCI_IOV
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006720 pci_disable_sriov(pdev);
6721
Vipul Pandya636f9d32012-09-26 02:39:39 +00006722#endif
6723
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006724 if (adapter) {
6725 int i;
6726
Anish Bhatt29aaee62014-08-20 13:44:06 -07006727 /* Tear down per-adapter Work Queue first since it can contain
6728 * references to our adapter data structure.
6729 */
6730 destroy_workqueue(adapter->workq);
6731
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006732 if (is_offload(adapter))
6733 detach_ulds(adapter);
6734
6735 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00006736 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006737 unregister_netdev(adapter->port[i]);
6738
Fabian Frederick9f16dc22014-06-27 22:51:52 +02006739 debugfs_remove_recursive(adapter->debugfs_root);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006740
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006741 /* If we allocated filters, free up state associated with any
6742 * valid filters ...
6743 */
6744 if (adapter->tids.ftid_tab) {
6745 struct filter_entry *f = &adapter->tids.ftid_tab[0];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00006746 for (i = 0; i < (adapter->tids.nftids +
6747 adapter->tids.nsftids); i++, f++)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00006748 if (f->valid)
6749 clear_filter(adapter, f);
6750 }
6751
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00006752 if (adapter->flags & FULL_INIT_DONE)
6753 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006754
Dimitris Michailidis06546392010-07-11 12:01:16 +00006755 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006756 iounmap(adapter->regs);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05306757 if (!is_t4(adapter->params.chip))
Santosh Rastapur22adfe02013-03-14 05:08:51 +00006758 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006759 pci_disable_pcie_error_reporting(pdev);
Gavin Shan144be3d2014-01-23 12:27:34 +08006760 if ((adapter->flags & DEV_ENABLED)) {
6761 pci_disable_device(pdev);
6762 adapter->flags &= ~DEV_ENABLED;
6763 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006764 pci_release_regions(pdev);
Li RongQingee9a33b2014-06-20 17:32:36 +08006765 synchronize_rcu();
Gavin Shan8b662fe2014-01-24 17:12:03 +08006766 kfree(adapter);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00006767 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006768 pci_release_regions(pdev);
6769}
6770
6771static struct pci_driver cxgb4_driver = {
6772 .name = KBUILD_MODNAME,
6773 .id_table = cxgb4_pci_tbl,
6774 .probe = init_one,
Bill Pemberton91744942012-12-03 09:23:02 -05006775 .remove = remove_one,
Thadeu Lima de Souza Cascardo687d7052014-02-24 17:04:52 -03006776 .shutdown = remove_one,
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00006777 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006778};
6779
6780static int __init cxgb4_init_module(void)
6781{
6782 int ret;
6783
6784 /* Debugfs support is optional, just warn if this fails */
6785 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6786 if (!cxgb4_debugfs_root)
Joe Perches428ac432013-01-06 13:34:49 +00006787 pr_warn("could not create debugfs entry, continuing\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006788
6789 ret = pci_register_driver(&cxgb4_driver);
Anish Bhatt29aaee62014-08-20 13:44:06 -07006790 if (ret < 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006791 debugfs_remove(cxgb4_debugfs_root);
Vipul Pandya01bcca62013-07-04 16:10:46 +05306792
Anish Bhatt1bb60372014-10-14 20:07:22 -07006793#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05306794 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
Anish Bhatt1bb60372014-10-14 20:07:22 -07006795#endif
Vipul Pandya01bcca62013-07-04 16:10:46 +05306796
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006797 return ret;
6798}
6799
6800static void __exit cxgb4_cleanup_module(void)
6801{
Anish Bhatt1bb60372014-10-14 20:07:22 -07006802#if IS_ENABLED(CONFIG_IPV6)
Vipul Pandya01bcca62013-07-04 16:10:46 +05306803 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
Anish Bhatt1bb60372014-10-14 20:07:22 -07006804#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006805 pci_unregister_driver(&cxgb4_driver);
6806 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00006807}
6808
6809module_init(cxgb4_init_module);
6810module_exit(cxgb4_cleanup_module);