blob: ce1451cb5a26e8fb7987ef4ec28fc15e12b25670 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000045#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <asm/uaccess.h>
64
65#include "cxgb4.h"
66#include "t4_regs.h"
67#include "t4_msg.h"
68#include "t4fw_api.h"
69#include "l2t.h"
70
Dimitris Michailidis99e6d062010-08-02 13:19:24 +000071#define DRV_VERSION "1.3.0-ko"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000072#define DRV_DESC "Chelsio T4 Network Driver"
73
74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
77 * recover.
78 */
79#define MAX_SGE_TIMERVAL 200U
80
Casey Leedom7ee9ff92010-06-25 12:11:46 +000081enum {
Vipul Pandya13ee15d2012-09-26 02:39:40 +000082 /*
83 * Physical Function provisioning constants.
84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97#ifdef CONFIG_PCI_IOV
98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context.
105 */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
108
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
116
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000119#endif
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000120};
121
122/*
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
126 * controls.
127 */
128static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
130{
131 unsigned int portn, portvec;
132
133 /*
134 * Give PF's access to all of the ports.
135 */
136 if (vf == 0)
137 return FW_PFVF_CMD_PMASK_MASK;
138
139 /*
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
145 */
146 if (adapter->params.nports == 0)
147 return 0;
148
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
151 for (;;) {
152 /*
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
157 */
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159 if (portn == 0)
160 return pmask;
161 portn--;
162 portvec &= ~pmask;
163 }
164 /*NOTREACHED*/
165}
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000166
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000167enum {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
175 MIN_FL_ENTRIES = 16
176};
177
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000178/* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
183 */
184struct filter_entry {
185 /* Administrative fields for filter.
186 */
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
189
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
193
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
198 */
199 struct ch_filter_specification fs;
200};
201
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000202#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
205
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000206#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000207
208static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000209 CH_DEVICE(0xa000, 0), /* PE10K */
Dimitris Michailidisccea7902010-08-23 17:21:01 +0000210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
Vipul Pandyaf637d572012-03-05 22:56:36 +0000230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000232 { 0, }
233};
234
235#define FW_FNAME "cxgb4/t4fw.bin"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000236#define FW5_FNAME "cxgb4/t5fw.bin"
Vipul Pandya636f9d32012-09-26 02:39:39 +0000237#define FW_CFNAME "cxgb4/t4-config.txt"
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000238#define FW5_CFNAME "cxgb4/t5-config.txt"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000239
240MODULE_DESCRIPTION(DRV_DESC);
241MODULE_AUTHOR("Chelsio Communications");
242MODULE_LICENSE("Dual BSD/GPL");
243MODULE_VERSION(DRV_VERSION);
244MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
245MODULE_FIRMWARE(FW_FNAME);
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000246MODULE_FIRMWARE(FW5_FNAME);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000247
Vipul Pandya636f9d32012-09-26 02:39:39 +0000248/*
249 * Normally we're willing to become the firmware's Master PF but will be happy
250 * if another PF has already become the Master and initialized the adapter.
251 * Setting "force_init" will cause this driver to forcibly establish itself as
252 * the Master PF and initialize the adapter.
253 */
254static uint force_init;
255
256module_param(force_init, uint, 0644);
257MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
258
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000259/*
260 * Normally if the firmware we connect to has Configuration File support, we
261 * use that and only fall back to the old Driver-based initialization if the
262 * Configuration File fails for some reason. If force_old_init is set, then
263 * we'll always use the old Driver-based initialization sequence.
264 */
265static uint force_old_init;
266
267module_param(force_old_init, uint, 0644);
268MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
269
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000270static int dflt_msg_enable = DFLT_MSG_ENABLE;
271
272module_param(dflt_msg_enable, int, 0644);
273MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
274
275/*
276 * The driver uses the best interrupt scheme available on a platform in the
277 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
278 * of these schemes the driver may consider as follows:
279 *
280 * msi = 2: choose from among all three options
281 * msi = 1: only consider MSI and INTx interrupts
282 * msi = 0: force INTx interrupts
283 */
284static int msi = 2;
285
286module_param(msi, int, 0644);
287MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
288
289/*
290 * Queue interrupt hold-off timer values. Queues default to the first of these
291 * upon creation.
292 */
293static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
294
295module_param_array(intr_holdoff, uint, NULL, 0644);
296MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
297 "0..4 in microseconds");
298
299static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
300
301module_param_array(intr_cnt, uint, NULL, 0644);
302MODULE_PARM_DESC(intr_cnt,
303 "thresholds 1..3 for queue interrupt packet counters");
304
Vipul Pandya636f9d32012-09-26 02:39:39 +0000305/*
306 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
307 * offset by 2 bytes in order to have the IP headers line up on 4-byte
308 * boundaries. This is a requirement for many architectures which will throw
309 * a machine check fault if an attempt is made to access one of the 4-byte IP
310 * header fields on a non-4-byte boundary. And it's a major performance issue
311 * even on some architectures which allow it like some implementations of the
312 * x86 ISA. However, some architectures don't mind this and for some very
313 * edge-case performance sensitive applications (like forwarding large volumes
314 * of small packets), setting this DMA offset to 0 will decrease the number of
315 * PCI-E Bus transfers enough to measurably affect performance.
316 */
317static int rx_dma_offset = 2;
318
Rusty Russelleb939922011-12-19 14:08:01 +0000319static bool vf_acls;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000320
321#ifdef CONFIG_PCI_IOV
322module_param(vf_acls, bool, 0644);
323MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
324
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000325/* Since T5 has more num of PFs, using NUM_OF_PF_WITH_SRIOV_T5
326 * macro as num_vf array size
327 */
328static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV_T5];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000329
330module_param_array(num_vf, uint, NULL, 0644);
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000331MODULE_PARM_DESC(num_vf,
332 "number of VFs for each of PFs 0-3 for T4 and PFs 0-7 for T5");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000333#endif
334
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000335/*
336 * The filter TCAM has a fixed portion and a variable portion. The fixed
337 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
338 * ports. The variable portion is 36 bits which can include things like Exact
339 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
340 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
341 * far exceed the 36-bit budget for this "compressed" header portion of the
342 * filter. Thus, we have a scarce resource which must be carefully managed.
343 *
344 * By default we set this up to mostly match the set of filter matching
345 * capabilities of T3 but with accommodations for some of T4's more
346 * interesting features:
347 *
348 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
349 * [Inner] VLAN (17), Port (3), FCoE (1) }
350 */
351enum {
352 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
353 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
354 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
355};
356
357static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
358
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000359module_param(tp_vlan_pri_map, uint, 0644);
360MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
361
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000362static struct dentry *cxgb4_debugfs_root;
363
364static LIST_HEAD(adapter_list);
365static DEFINE_MUTEX(uld_mutex);
366static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
367static const char *uld_str[] = { "RDMA", "iSCSI" };
368
369static void link_report(struct net_device *dev)
370{
371 if (!netif_carrier_ok(dev))
372 netdev_info(dev, "link down\n");
373 else {
374 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
375
376 const char *s = "10Mbps";
377 const struct port_info *p = netdev_priv(dev);
378
379 switch (p->link_cfg.speed) {
380 case SPEED_10000:
381 s = "10Gbps";
382 break;
383 case SPEED_1000:
384 s = "1000Mbps";
385 break;
386 case SPEED_100:
387 s = "100Mbps";
388 break;
389 }
390
391 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
392 fc[p->link_cfg.fc]);
393 }
394}
395
396void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
397{
398 struct net_device *dev = adapter->port[port_id];
399
400 /* Skip changes from disabled ports. */
401 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
402 if (link_stat)
403 netif_carrier_on(dev);
404 else
405 netif_carrier_off(dev);
406
407 link_report(dev);
408 }
409}
410
411void t4_os_portmod_changed(const struct adapter *adap, int port_id)
412{
413 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000414 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000415 };
416
417 const struct net_device *dev = adap->port[port_id];
418 const struct port_info *pi = netdev_priv(dev);
419
420 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
421 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000422 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000423 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
424}
425
426/*
427 * Configure the exact and hash address filters to handle a port's multicast
428 * and secondary unicast MAC addresses.
429 */
430static int set_addr_filters(const struct net_device *dev, bool sleep)
431{
432 u64 mhash = 0;
433 u64 uhash = 0;
434 bool free = true;
435 u16 filt_idx[7];
436 const u8 *addr[7];
437 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000438 const struct netdev_hw_addr *ha;
439 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700440 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000441 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000442 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000443
444 /* first do the secondary unicast addresses */
445 netdev_for_each_uc_addr(ha, dev) {
446 addr[naddr++] = ha->addr;
447 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000448 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000449 naddr, addr, filt_idx, &uhash, sleep);
450 if (ret < 0)
451 return ret;
452
453 free = false;
454 naddr = 0;
455 }
456 }
457
458 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700459 netdev_for_each_mc_addr(ha, dev) {
460 addr[naddr++] = ha->addr;
461 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000462 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000463 naddr, addr, filt_idx, &mhash, sleep);
464 if (ret < 0)
465 return ret;
466
467 free = false;
468 naddr = 0;
469 }
470 }
471
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000472 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000473 uhash | mhash, sleep);
474}
475
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530476int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
477module_param(dbfifo_int_thresh, int, 0644);
478MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
479
Vipul Pandya404d9e32012-10-08 02:59:43 +0000480/*
481 * usecs to sleep while draining the dbfifo
482 */
483static int dbfifo_drain_delay = 1000;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530484module_param(dbfifo_drain_delay, int, 0644);
485MODULE_PARM_DESC(dbfifo_drain_delay,
486 "usecs to sleep while draining the dbfifo");
487
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000488/*
489 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
490 * If @mtu is -1 it is left unchanged.
491 */
492static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
493{
494 int ret;
495 struct port_info *pi = netdev_priv(dev);
496
497 ret = set_addr_filters(dev, sleep_ok);
498 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000499 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000500 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000501 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000502 sleep_ok);
503 return ret;
504}
505
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530506static struct workqueue_struct *workq;
507
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000508/**
509 * link_start - enable a port
510 * @dev: the port to enable
511 *
512 * Performs the MAC and PHY actions needed to enable a port.
513 */
514static int link_start(struct net_device *dev)
515{
516 int ret;
517 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000518 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000519
520 /*
521 * We do not set address filters and promiscuity here, the stack does
522 * that step explicitly.
523 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000524 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Dimitris Michailidis19ecae22010-10-21 11:29:56 +0000525 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000526 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000527 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000528 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000529 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000530 if (ret >= 0) {
531 pi->xact_addr_filt = ret;
532 ret = 0;
533 }
534 }
535 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000536 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
537 &pi->link_cfg);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000538 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000539 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000540 return ret;
541}
542
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000543/* Clear a filter and release any of its resources that we own. This also
544 * clears the filter's "pending" status.
545 */
546static void clear_filter(struct adapter *adap, struct filter_entry *f)
547{
548 /* If the new or old filter have loopback rewriteing rules then we'll
549 * need to free any existing Layer Two Table (L2T) entries of the old
550 * filter rule. The firmware will handle freeing up any Source MAC
551 * Table (SMT) entries used for rewriting Source MAC Addresses in
552 * loopback rules.
553 */
554 if (f->l2t)
555 cxgb4_l2t_release(f->l2t);
556
557 /* The zeroing of the filter rule below clears the filter valid,
558 * pending, locked flags, l2t pointer, etc. so it's all we need for
559 * this operation.
560 */
561 memset(f, 0, sizeof(*f));
562}
563
564/* Handle a filter write/deletion reply.
565 */
566static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
567{
568 unsigned int idx = GET_TID(rpl);
569 unsigned int nidx = idx - adap->tids.ftid_base;
570 unsigned int ret;
571 struct filter_entry *f;
572
573 if (idx >= adap->tids.ftid_base && nidx <
574 (adap->tids.nftids + adap->tids.nsftids)) {
575 idx = nidx;
576 ret = GET_TCB_COOKIE(rpl->cookie);
577 f = &adap->tids.ftid_tab[idx];
578
579 if (ret == FW_FILTER_WR_FLT_DELETED) {
580 /* Clear the filter when we get confirmation from the
581 * hardware that the filter has been deleted.
582 */
583 clear_filter(adap, f);
584 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
585 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
586 idx);
587 clear_filter(adap, f);
588 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
589 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
590 f->pending = 0; /* asynchronous setup completed */
591 f->valid = 1;
592 } else {
593 /* Something went wrong. Issue a warning about the
594 * problem and clear everything out.
595 */
596 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
597 idx, ret);
598 clear_filter(adap, f);
599 }
600 }
601}
602
603/* Response queue handler for the FW event queue.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000604 */
605static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
606 const struct pkt_gl *gl)
607{
608 u8 opcode = ((const struct rss_header *)rsp)->opcode;
609
610 rsp++; /* skip RSS header */
611 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
612 const struct cpl_sge_egr_update *p = (void *)rsp;
613 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000614 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000615
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000616 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000617 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000618 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000619 struct sge_eth_txq *eq;
620
621 eq = container_of(txq, struct sge_eth_txq, q);
622 netif_tx_wake_queue(eq->txq);
623 } else {
624 struct sge_ofld_txq *oq;
625
626 oq = container_of(txq, struct sge_ofld_txq, q);
627 tasklet_schedule(&oq->qresume_tsk);
628 }
629 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
630 const struct cpl_fw6_msg *p = (void *)rsp;
631
632 if (p->type == 0)
633 t4_handle_fw_rpl(q->adap, p->data);
634 } else if (opcode == CPL_L2T_WRITE_RPL) {
635 const struct cpl_l2t_write_rpl *p = (void *)rsp;
636
637 do_l2t_write_rpl(q->adap, p);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000638 } else if (opcode == CPL_SET_TCB_RPL) {
639 const struct cpl_set_tcb_rpl *p = (void *)rsp;
640
641 filter_rpl(q->adap, p);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000642 } else
643 dev_err(q->adap->pdev_dev,
644 "unexpected CPL %#x on FW event queue\n", opcode);
645 return 0;
646}
647
648/**
649 * uldrx_handler - response queue handler for ULD queues
650 * @q: the response queue that received the packet
651 * @rsp: the response queue descriptor holding the offload message
652 * @gl: the gather list of packet fragments
653 *
654 * Deliver an ingress offload packet to a ULD. All processing is done by
655 * the ULD, we just maintain statistics.
656 */
657static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
658 const struct pkt_gl *gl)
659{
660 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
661
662 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
663 rxq->stats.nomem++;
664 return -1;
665 }
666 if (gl == NULL)
667 rxq->stats.imm++;
668 else if (gl == CXGB4_MSG_AN)
669 rxq->stats.an++;
670 else
671 rxq->stats.pkts++;
672 return 0;
673}
674
675static void disable_msi(struct adapter *adapter)
676{
677 if (adapter->flags & USING_MSIX) {
678 pci_disable_msix(adapter->pdev);
679 adapter->flags &= ~USING_MSIX;
680 } else if (adapter->flags & USING_MSI) {
681 pci_disable_msi(adapter->pdev);
682 adapter->flags &= ~USING_MSI;
683 }
684}
685
686/*
687 * Interrupt handler for non-data events used with MSI-X.
688 */
689static irqreturn_t t4_nondata_intr(int irq, void *cookie)
690{
691 struct adapter *adap = cookie;
692
693 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
694 if (v & PFSW) {
695 adap->swintr = 1;
696 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
697 }
698 t4_slow_intr_handler(adap);
699 return IRQ_HANDLED;
700}
701
702/*
703 * Name the MSI-X interrupts.
704 */
705static void name_msix_vecs(struct adapter *adap)
706{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000707 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000708
709 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000710 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000711
712 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000713 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
714 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000715
716 /* Ethernet queues */
717 for_each_port(adap, j) {
718 struct net_device *d = adap->port[j];
719 const struct port_info *pi = netdev_priv(d);
720
Dimitris Michailidisba278162010-12-14 21:36:50 +0000721 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000722 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
723 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000724 }
725
726 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000727 for_each_ofldrxq(&adap->sge, i)
728 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000729 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000730
731 for_each_rdmarxq(&adap->sge, i)
732 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000733 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000734}
735
736static int request_msix_queue_irqs(struct adapter *adap)
737{
738 struct sge *s = &adap->sge;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000739 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000740
741 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
742 adap->msix_info[1].desc, &s->fw_evtq);
743 if (err)
744 return err;
745
746 for_each_ethrxq(s, ethqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000747 err = request_irq(adap->msix_info[msi_index].vec,
748 t4_sge_intr_msix, 0,
749 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000750 &s->ethrxq[ethqidx].rspq);
751 if (err)
752 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000753 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000754 }
755 for_each_ofldrxq(s, ofldqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000756 err = request_irq(adap->msix_info[msi_index].vec,
757 t4_sge_intr_msix, 0,
758 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000759 &s->ofldrxq[ofldqidx].rspq);
760 if (err)
761 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000762 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000763 }
764 for_each_rdmarxq(s, rdmaqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000765 err = request_irq(adap->msix_info[msi_index].vec,
766 t4_sge_intr_msix, 0,
767 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000768 &s->rdmarxq[rdmaqidx].rspq);
769 if (err)
770 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000771 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000772 }
773 return 0;
774
775unwind:
776 while (--rdmaqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000777 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000778 &s->rdmarxq[rdmaqidx].rspq);
779 while (--ofldqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000780 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000781 &s->ofldrxq[ofldqidx].rspq);
782 while (--ethqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000783 free_irq(adap->msix_info[--msi_index].vec,
784 &s->ethrxq[ethqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000785 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
786 return err;
787}
788
789static void free_msix_queue_irqs(struct adapter *adap)
790{
Vipul Pandya404d9e32012-10-08 02:59:43 +0000791 int i, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000792 struct sge *s = &adap->sge;
793
794 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
795 for_each_ethrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000796 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000797 for_each_ofldrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000798 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000799 for_each_rdmarxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000800 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000801}
802
803/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000804 * write_rss - write the RSS table for a given port
805 * @pi: the port
806 * @queues: array of queue indices for RSS
807 *
808 * Sets up the portion of the HW RSS table for the port's VI to distribute
809 * packets to the Rx queues in @queues.
810 */
811static int write_rss(const struct port_info *pi, const u16 *queues)
812{
813 u16 *rss;
814 int i, err;
815 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
816
817 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
818 if (!rss)
819 return -ENOMEM;
820
821 /* map the queue indices to queue ids */
822 for (i = 0; i < pi->rss_size; i++, queues++)
823 rss[i] = q[*queues].rspq.abs_id;
824
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000825 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
826 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000827 kfree(rss);
828 return err;
829}
830
831/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000832 * setup_rss - configure RSS
833 * @adap: the adapter
834 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000835 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000836 */
837static int setup_rss(struct adapter *adap)
838{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000839 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000840
841 for_each_port(adap, i) {
842 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000843
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000844 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000845 if (err)
846 return err;
847 }
848 return 0;
849}
850
851/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000852 * Return the channel of the ingress queue with the given qid.
853 */
854static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
855{
856 qid -= p->ingr_start;
857 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
858}
859
860/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000861 * Wait until all NAPI handlers are descheduled.
862 */
863static void quiesce_rx(struct adapter *adap)
864{
865 int i;
866
867 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
868 struct sge_rspq *q = adap->sge.ingr_map[i];
869
870 if (q && q->handler)
871 napi_disable(&q->napi);
872 }
873}
874
875/*
876 * Enable NAPI scheduling and interrupt generation for all Rx queues.
877 */
878static void enable_rx(struct adapter *adap)
879{
880 int i;
881
882 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
883 struct sge_rspq *q = adap->sge.ingr_map[i];
884
885 if (!q)
886 continue;
887 if (q->handler)
888 napi_enable(&q->napi);
889 /* 0-increment GTS to start the timer and enable interrupts */
890 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
891 SEINTARM(q->intr_params) |
892 INGRESSQID(q->cntxt_id));
893 }
894}
895
896/**
897 * setup_sge_queues - configure SGE Tx/Rx/response queues
898 * @adap: the adapter
899 *
900 * Determines how many sets of SGE queues to use and initializes them.
901 * We support multiple queue sets per port if we have MSI-X, otherwise
902 * just one queue set per port.
903 */
904static int setup_sge_queues(struct adapter *adap)
905{
906 int err, msi_idx, i, j;
907 struct sge *s = &adap->sge;
908
909 bitmap_zero(s->starving_fl, MAX_EGRQ);
910 bitmap_zero(s->txq_maperr, MAX_EGRQ);
911
912 if (adap->flags & USING_MSIX)
913 msi_idx = 1; /* vector 0 is for non-queue interrupts */
914 else {
915 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
916 NULL, NULL);
917 if (err)
918 return err;
919 msi_idx = -((int)s->intrq.abs_id + 1);
920 }
921
922 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
923 msi_idx, NULL, fwevtq_handler);
924 if (err) {
925freeout: t4_free_sge_resources(adap);
926 return err;
927 }
928
929 for_each_port(adap, i) {
930 struct net_device *dev = adap->port[i];
931 struct port_info *pi = netdev_priv(dev);
932 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
933 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
934
935 for (j = 0; j < pi->nqsets; j++, q++) {
936 if (msi_idx > 0)
937 msi_idx++;
938 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
939 msi_idx, &q->fl,
940 t4_ethrx_handler);
941 if (err)
942 goto freeout;
943 q->rspq.idx = j;
944 memset(&q->stats, 0, sizeof(q->stats));
945 }
946 for (j = 0; j < pi->nqsets; j++, t++) {
947 err = t4_sge_alloc_eth_txq(adap, t, dev,
948 netdev_get_tx_queue(dev, j),
949 s->fw_evtq.cntxt_id);
950 if (err)
951 goto freeout;
952 }
953 }
954
955 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
956 for_each_ofldrxq(s, i) {
957 struct sge_ofld_rxq *q = &s->ofldrxq[i];
958 struct net_device *dev = adap->port[i / j];
959
960 if (msi_idx > 0)
961 msi_idx++;
962 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
963 &q->fl, uldrx_handler);
964 if (err)
965 goto freeout;
966 memset(&q->stats, 0, sizeof(q->stats));
967 s->ofld_rxq[i] = q->rspq.abs_id;
968 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
969 s->fw_evtq.cntxt_id);
970 if (err)
971 goto freeout;
972 }
973
974 for_each_rdmarxq(s, i) {
975 struct sge_ofld_rxq *q = &s->rdmarxq[i];
976
977 if (msi_idx > 0)
978 msi_idx++;
979 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
980 msi_idx, &q->fl, uldrx_handler);
981 if (err)
982 goto freeout;
983 memset(&q->stats, 0, sizeof(q->stats));
984 s->rdma_rxq[i] = q->rspq.abs_id;
985 }
986
987 for_each_port(adap, i) {
988 /*
989 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
990 * have RDMA queues, and that's the right value.
991 */
992 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
993 s->fw_evtq.cntxt_id,
994 s->rdmarxq[i].rspq.cntxt_id);
995 if (err)
996 goto freeout;
997 }
998
999 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1000 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1001 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1002 return 0;
1003}
1004
1005/*
1006 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1007 * started but failed, and a negative errno if flash load couldn't start.
1008 */
1009static int upgrade_fw(struct adapter *adap)
1010{
1011 int ret;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001012 u32 vers, exp_major;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001013 const struct fw_hdr *hdr;
1014 const struct firmware *fw;
1015 struct device *dev = adap->pdev_dev;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001016 char *fw_file_name;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001017
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001018 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1019 case CHELSIO_T4:
1020 fw_file_name = FW_FNAME;
1021 exp_major = FW_VERSION_MAJOR;
1022 break;
1023 case CHELSIO_T5:
1024 fw_file_name = FW5_FNAME;
1025 exp_major = FW_VERSION_MAJOR_T5;
1026 break;
1027 default:
1028 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1029 return -EINVAL;
1030 }
1031
1032 ret = request_firmware(&fw, fw_file_name, dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001033 if (ret < 0) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001034 dev_err(dev, "unable to load firmware image %s, error %d\n",
1035 fw_file_name, ret);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001036 return ret;
1037 }
1038
1039 hdr = (const struct fw_hdr *)fw->data;
1040 vers = ntohl(hdr->fw_ver);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001041 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001042 ret = -EINVAL; /* wrong major version, won't do */
1043 goto out;
1044 }
1045
1046 /*
1047 * If the flash FW is unusable or we found something newer, load it.
1048 */
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001049 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001050 vers > adap->params.fw_vers) {
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00001051 dev_info(dev, "upgrading firmware ...\n");
1052 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1053 /*force=*/false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001054 if (!ret)
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001055 dev_info(dev,
1056 "firmware upgraded to version %pI4 from %s\n",
1057 &hdr->fw_ver, fw_file_name);
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00001058 else
1059 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
Vipul Pandya1648a222012-09-26 02:39:41 +00001060 } else {
1061 /*
1062 * Tell our caller that we didn't upgrade the firmware.
1063 */
1064 ret = -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001065 }
Vipul Pandya1648a222012-09-26 02:39:41 +00001066
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001067out: release_firmware(fw);
1068 return ret;
1069}
1070
1071/*
1072 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1073 * The allocated memory is cleared.
1074 */
1075void *t4_alloc_mem(size_t size)
1076{
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001077 void *p = kzalloc(size, GFP_KERNEL);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001078
1079 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001080 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001081 return p;
1082}
1083
1084/*
1085 * Free memory allocated through alloc_mem().
1086 */
stephen hemminger31b9c192010-10-18 05:39:18 +00001087static void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001088{
1089 if (is_vmalloc_addr(addr))
1090 vfree(addr);
1091 else
1092 kfree(addr);
1093}
1094
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001095/* Send a Work Request to write the filter at a specified index. We construct
1096 * a Firmware Filter Work Request to have the work done and put the indicated
1097 * filter into "pending" mode which will prevent any further actions against
1098 * it till we get a reply from the firmware on the completion status of the
1099 * request.
1100 */
1101static int set_filter_wr(struct adapter *adapter, int fidx)
1102{
1103 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1104 struct sk_buff *skb;
1105 struct fw_filter_wr *fwr;
1106 unsigned int ftid;
1107
1108 /* If the new filter requires loopback Destination MAC and/or VLAN
1109 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1110 * the filter.
1111 */
1112 if (f->fs.newdmac || f->fs.newvlan) {
1113 /* allocate L2T entry for new filter */
1114 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1115 if (f->l2t == NULL)
1116 return -EAGAIN;
1117 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1118 f->fs.eport, f->fs.dmac)) {
1119 cxgb4_l2t_release(f->l2t);
1120 f->l2t = NULL;
1121 return -ENOMEM;
1122 }
1123 }
1124
1125 ftid = adapter->tids.ftid_base + fidx;
1126
1127 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1128 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1129 memset(fwr, 0, sizeof(*fwr));
1130
1131 /* It would be nice to put most of the following in t4_hw.c but most
1132 * of the work is translating the cxgbtool ch_filter_specification
1133 * into the Work Request and the definition of that structure is
1134 * currently in cxgbtool.h which isn't appropriate to pull into the
1135 * common code. We may eventually try to come up with a more neutral
1136 * filter specification structure but for now it's easiest to simply
1137 * put this fairly direct code in line ...
1138 */
1139 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1140 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1141 fwr->tid_to_iq =
1142 htonl(V_FW_FILTER_WR_TID(ftid) |
1143 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1144 V_FW_FILTER_WR_NOREPLY(0) |
1145 V_FW_FILTER_WR_IQ(f->fs.iq));
1146 fwr->del_filter_to_l2tix =
1147 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1148 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1149 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1150 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1151 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1152 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1153 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1154 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1155 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1156 f->fs.newvlan == VLAN_REWRITE) |
1157 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1158 f->fs.newvlan == VLAN_REWRITE) |
1159 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1160 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1161 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1162 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1163 fwr->ethtype = htons(f->fs.val.ethtype);
1164 fwr->ethtypem = htons(f->fs.mask.ethtype);
1165 fwr->frag_to_ovlan_vldm =
1166 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1167 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1168 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1169 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1170 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1171 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1172 fwr->smac_sel = 0;
1173 fwr->rx_chan_rx_rpl_iq =
1174 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1175 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1176 fwr->maci_to_matchtypem =
1177 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1178 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1179 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1180 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1181 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1182 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1183 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1184 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1185 fwr->ptcl = f->fs.val.proto;
1186 fwr->ptclm = f->fs.mask.proto;
1187 fwr->ttyp = f->fs.val.tos;
1188 fwr->ttypm = f->fs.mask.tos;
1189 fwr->ivlan = htons(f->fs.val.ivlan);
1190 fwr->ivlanm = htons(f->fs.mask.ivlan);
1191 fwr->ovlan = htons(f->fs.val.ovlan);
1192 fwr->ovlanm = htons(f->fs.mask.ovlan);
1193 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1194 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1195 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1196 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1197 fwr->lp = htons(f->fs.val.lport);
1198 fwr->lpm = htons(f->fs.mask.lport);
1199 fwr->fp = htons(f->fs.val.fport);
1200 fwr->fpm = htons(f->fs.mask.fport);
1201 if (f->fs.newsmac)
1202 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1203
1204 /* Mark the filter as "pending" and ship off the Filter Work Request.
1205 * When we get the Work Request Reply we'll clear the pending status.
1206 */
1207 f->pending = 1;
1208 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1209 t4_ofld_send(adapter, skb);
1210 return 0;
1211}
1212
1213/* Delete the filter at a specified index.
1214 */
1215static int del_filter_wr(struct adapter *adapter, int fidx)
1216{
1217 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1218 struct sk_buff *skb;
1219 struct fw_filter_wr *fwr;
1220 unsigned int len, ftid;
1221
1222 len = sizeof(*fwr);
1223 ftid = adapter->tids.ftid_base + fidx;
1224
1225 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1226 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1227 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1228
1229 /* Mark the filter as "pending" and ship off the Filter Work Request.
1230 * When we get the Work Request Reply we'll clear the pending status.
1231 */
1232 f->pending = 1;
1233 t4_mgmt_tx(adapter, skb);
1234 return 0;
1235}
1236
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001237static inline int is_offload(const struct adapter *adap)
1238{
1239 return adap->params.offload;
1240}
1241
1242/*
1243 * Implementation of ethtool operations.
1244 */
1245
1246static u32 get_msglevel(struct net_device *dev)
1247{
1248 return netdev2adap(dev)->msg_enable;
1249}
1250
1251static void set_msglevel(struct net_device *dev, u32 val)
1252{
1253 netdev2adap(dev)->msg_enable = val;
1254}
1255
1256static char stats_strings[][ETH_GSTRING_LEN] = {
1257 "TxOctetsOK ",
1258 "TxFramesOK ",
1259 "TxBroadcastFrames ",
1260 "TxMulticastFrames ",
1261 "TxUnicastFrames ",
1262 "TxErrorFrames ",
1263
1264 "TxFrames64 ",
1265 "TxFrames65To127 ",
1266 "TxFrames128To255 ",
1267 "TxFrames256To511 ",
1268 "TxFrames512To1023 ",
1269 "TxFrames1024To1518 ",
1270 "TxFrames1519ToMax ",
1271
1272 "TxFramesDropped ",
1273 "TxPauseFrames ",
1274 "TxPPP0Frames ",
1275 "TxPPP1Frames ",
1276 "TxPPP2Frames ",
1277 "TxPPP3Frames ",
1278 "TxPPP4Frames ",
1279 "TxPPP5Frames ",
1280 "TxPPP6Frames ",
1281 "TxPPP7Frames ",
1282
1283 "RxOctetsOK ",
1284 "RxFramesOK ",
1285 "RxBroadcastFrames ",
1286 "RxMulticastFrames ",
1287 "RxUnicastFrames ",
1288
1289 "RxFramesTooLong ",
1290 "RxJabberErrors ",
1291 "RxFCSErrors ",
1292 "RxLengthErrors ",
1293 "RxSymbolErrors ",
1294 "RxRuntFrames ",
1295
1296 "RxFrames64 ",
1297 "RxFrames65To127 ",
1298 "RxFrames128To255 ",
1299 "RxFrames256To511 ",
1300 "RxFrames512To1023 ",
1301 "RxFrames1024To1518 ",
1302 "RxFrames1519ToMax ",
1303
1304 "RxPauseFrames ",
1305 "RxPPP0Frames ",
1306 "RxPPP1Frames ",
1307 "RxPPP2Frames ",
1308 "RxPPP3Frames ",
1309 "RxPPP4Frames ",
1310 "RxPPP5Frames ",
1311 "RxPPP6Frames ",
1312 "RxPPP7Frames ",
1313
1314 "RxBG0FramesDropped ",
1315 "RxBG1FramesDropped ",
1316 "RxBG2FramesDropped ",
1317 "RxBG3FramesDropped ",
1318 "RxBG0FramesTrunc ",
1319 "RxBG1FramesTrunc ",
1320 "RxBG2FramesTrunc ",
1321 "RxBG3FramesTrunc ",
1322
1323 "TSO ",
1324 "TxCsumOffload ",
1325 "RxCsumGood ",
1326 "VLANextractions ",
1327 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001328 "GROpackets ",
1329 "GROmerged ",
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001330 "WriteCoalSuccess ",
1331 "WriteCoalFail ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001332};
1333
1334static int get_sset_count(struct net_device *dev, int sset)
1335{
1336 switch (sset) {
1337 case ETH_SS_STATS:
1338 return ARRAY_SIZE(stats_strings);
1339 default:
1340 return -EOPNOTSUPP;
1341 }
1342}
1343
1344#define T4_REGMAP_SIZE (160 * 1024)
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001345#define T5_REGMAP_SIZE (332 * 1024)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001346
1347static int get_regs_len(struct net_device *dev)
1348{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001349 struct adapter *adap = netdev2adap(dev);
1350 if (is_t4(adap->chip))
1351 return T4_REGMAP_SIZE;
1352 else
1353 return T5_REGMAP_SIZE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001354}
1355
1356static int get_eeprom_len(struct net_device *dev)
1357{
1358 return EEPROMSIZE;
1359}
1360
1361static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1362{
1363 struct adapter *adapter = netdev2adap(dev);
1364
Rick Jones23020ab2011-11-09 09:58:07 +00001365 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1366 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1367 strlcpy(info->bus_info, pci_name(adapter->pdev),
1368 sizeof(info->bus_info));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001369
Rick Jones84b40502011-11-21 10:54:05 +00001370 if (adapter->params.fw_vers)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001371 snprintf(info->fw_version, sizeof(info->fw_version),
1372 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1373 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1374 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1375 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1376 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1377 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1378 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1379 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1380 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1381}
1382
1383static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1384{
1385 if (stringset == ETH_SS_STATS)
1386 memcpy(data, stats_strings, sizeof(stats_strings));
1387}
1388
1389/*
1390 * port stats maintained per queue of the port. They should be in the same
1391 * order as in stats_strings above.
1392 */
1393struct queue_port_stats {
1394 u64 tso;
1395 u64 tx_csum;
1396 u64 rx_csum;
1397 u64 vlan_ex;
1398 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001399 u64 gro_pkts;
1400 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001401};
1402
1403static void collect_sge_port_stats(const struct adapter *adap,
1404 const struct port_info *p, struct queue_port_stats *s)
1405{
1406 int i;
1407 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1408 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1409
1410 memset(s, 0, sizeof(*s));
1411 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1412 s->tso += tx->tso;
1413 s->tx_csum += tx->tx_cso;
1414 s->rx_csum += rx->stats.rx_cso;
1415 s->vlan_ex += rx->stats.vlan_ex;
1416 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001417 s->gro_pkts += rx->stats.lro_pkts;
1418 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001419 }
1420}
1421
1422static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1423 u64 *data)
1424{
1425 struct port_info *pi = netdev_priv(dev);
1426 struct adapter *adapter = pi->adapter;
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001427 u32 val1, val2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001428
1429 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1430
1431 data += sizeof(struct port_stats) / sizeof(u64);
1432 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00001433 data += sizeof(struct queue_port_stats) / sizeof(u64);
1434 if (!is_t4(adapter->chip)) {
1435 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1436 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1437 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1438 *data = val1 - val2;
1439 data++;
1440 *data = val2;
1441 data++;
1442 } else {
1443 memset(data, 0, 2 * sizeof(u64));
1444 *data += 2;
1445 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001446}
1447
1448/*
1449 * Return a version number to identify the type of adapter. The scheme is:
1450 * - bits 0..9: chip version
1451 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001452 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001453 */
1454static inline unsigned int mk_adap_vers(const struct adapter *ap)
1455{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001456 return CHELSIO_CHIP_VERSION(ap->chip) |
1457 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001458}
1459
1460static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1461 unsigned int end)
1462{
1463 u32 *p = buf + start;
1464
1465 for ( ; start <= end; start += sizeof(u32))
1466 *p++ = t4_read_reg(ap, start);
1467}
1468
1469static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1470 void *buf)
1471{
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001472 static const unsigned int t4_reg_ranges[] = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001473 0x1008, 0x1108,
1474 0x1180, 0x11b4,
1475 0x11fc, 0x123c,
1476 0x1300, 0x173c,
1477 0x1800, 0x18fc,
1478 0x3000, 0x30d8,
1479 0x30e0, 0x5924,
1480 0x5960, 0x59d4,
1481 0x5a00, 0x5af8,
1482 0x6000, 0x6098,
1483 0x6100, 0x6150,
1484 0x6200, 0x6208,
1485 0x6240, 0x6248,
1486 0x6280, 0x6338,
1487 0x6370, 0x638c,
1488 0x6400, 0x643c,
1489 0x6500, 0x6524,
1490 0x6a00, 0x6a38,
1491 0x6a60, 0x6a78,
1492 0x6b00, 0x6b84,
1493 0x6bf0, 0x6c84,
1494 0x6cf0, 0x6d84,
1495 0x6df0, 0x6e84,
1496 0x6ef0, 0x6f84,
1497 0x6ff0, 0x7084,
1498 0x70f0, 0x7184,
1499 0x71f0, 0x7284,
1500 0x72f0, 0x7384,
1501 0x73f0, 0x7450,
1502 0x7500, 0x7530,
1503 0x7600, 0x761c,
1504 0x7680, 0x76cc,
1505 0x7700, 0x7798,
1506 0x77c0, 0x77fc,
1507 0x7900, 0x79fc,
1508 0x7b00, 0x7c38,
1509 0x7d00, 0x7efc,
1510 0x8dc0, 0x8e1c,
1511 0x8e30, 0x8e78,
1512 0x8ea0, 0x8f6c,
1513 0x8fc0, 0x9074,
1514 0x90fc, 0x90fc,
1515 0x9400, 0x9458,
1516 0x9600, 0x96bc,
1517 0x9800, 0x9808,
1518 0x9820, 0x983c,
1519 0x9850, 0x9864,
1520 0x9c00, 0x9c6c,
1521 0x9c80, 0x9cec,
1522 0x9d00, 0x9d6c,
1523 0x9d80, 0x9dec,
1524 0x9e00, 0x9e6c,
1525 0x9e80, 0x9eec,
1526 0x9f00, 0x9f6c,
1527 0x9f80, 0x9fec,
1528 0xd004, 0xd03c,
1529 0xdfc0, 0xdfe0,
1530 0xe000, 0xea7c,
1531 0xf000, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001532 0x19040, 0x1906c,
1533 0x19078, 0x19080,
1534 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001535 0x19150, 0x191b0,
1536 0x191d0, 0x191e8,
1537 0x19238, 0x1924c,
1538 0x193f8, 0x19474,
1539 0x19490, 0x194f8,
1540 0x19800, 0x19f30,
1541 0x1a000, 0x1a06c,
1542 0x1a0b0, 0x1a120,
1543 0x1a128, 0x1a138,
1544 0x1a190, 0x1a1c4,
1545 0x1a1fc, 0x1a1fc,
1546 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001547 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001548 0x1e2c0, 0x1e2c0,
1549 0x1e2e0, 0x1e2e0,
1550 0x1e300, 0x1e384,
1551 0x1e3c0, 0x1e3c8,
1552 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001553 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001554 0x1e6c0, 0x1e6c0,
1555 0x1e6e0, 0x1e6e0,
1556 0x1e700, 0x1e784,
1557 0x1e7c0, 0x1e7c8,
1558 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001559 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001560 0x1eac0, 0x1eac0,
1561 0x1eae0, 0x1eae0,
1562 0x1eb00, 0x1eb84,
1563 0x1ebc0, 0x1ebc8,
1564 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001565 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001566 0x1eec0, 0x1eec0,
1567 0x1eee0, 0x1eee0,
1568 0x1ef00, 0x1ef84,
1569 0x1efc0, 0x1efc8,
1570 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001571 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001572 0x1f2c0, 0x1f2c0,
1573 0x1f2e0, 0x1f2e0,
1574 0x1f300, 0x1f384,
1575 0x1f3c0, 0x1f3c8,
1576 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001577 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001578 0x1f6c0, 0x1f6c0,
1579 0x1f6e0, 0x1f6e0,
1580 0x1f700, 0x1f784,
1581 0x1f7c0, 0x1f7c8,
1582 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001583 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001584 0x1fac0, 0x1fac0,
1585 0x1fae0, 0x1fae0,
1586 0x1fb00, 0x1fb84,
1587 0x1fbc0, 0x1fbc8,
1588 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001589 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001590 0x1fec0, 0x1fec0,
1591 0x1fee0, 0x1fee0,
1592 0x1ff00, 0x1ff84,
1593 0x1ffc0, 0x1ffc8,
1594 0x20000, 0x2002c,
1595 0x20100, 0x2013c,
1596 0x20190, 0x201c8,
1597 0x20200, 0x20318,
1598 0x20400, 0x20528,
1599 0x20540, 0x20614,
1600 0x21000, 0x21040,
1601 0x2104c, 0x21060,
1602 0x210c0, 0x210ec,
1603 0x21200, 0x21268,
1604 0x21270, 0x21284,
1605 0x212fc, 0x21388,
1606 0x21400, 0x21404,
1607 0x21500, 0x21518,
1608 0x2152c, 0x2153c,
1609 0x21550, 0x21554,
1610 0x21600, 0x21600,
1611 0x21608, 0x21628,
1612 0x21630, 0x2163c,
1613 0x21700, 0x2171c,
1614 0x21780, 0x2178c,
1615 0x21800, 0x21c38,
1616 0x21c80, 0x21d7c,
1617 0x21e00, 0x21e04,
1618 0x22000, 0x2202c,
1619 0x22100, 0x2213c,
1620 0x22190, 0x221c8,
1621 0x22200, 0x22318,
1622 0x22400, 0x22528,
1623 0x22540, 0x22614,
1624 0x23000, 0x23040,
1625 0x2304c, 0x23060,
1626 0x230c0, 0x230ec,
1627 0x23200, 0x23268,
1628 0x23270, 0x23284,
1629 0x232fc, 0x23388,
1630 0x23400, 0x23404,
1631 0x23500, 0x23518,
1632 0x2352c, 0x2353c,
1633 0x23550, 0x23554,
1634 0x23600, 0x23600,
1635 0x23608, 0x23628,
1636 0x23630, 0x2363c,
1637 0x23700, 0x2371c,
1638 0x23780, 0x2378c,
1639 0x23800, 0x23c38,
1640 0x23c80, 0x23d7c,
1641 0x23e00, 0x23e04,
1642 0x24000, 0x2402c,
1643 0x24100, 0x2413c,
1644 0x24190, 0x241c8,
1645 0x24200, 0x24318,
1646 0x24400, 0x24528,
1647 0x24540, 0x24614,
1648 0x25000, 0x25040,
1649 0x2504c, 0x25060,
1650 0x250c0, 0x250ec,
1651 0x25200, 0x25268,
1652 0x25270, 0x25284,
1653 0x252fc, 0x25388,
1654 0x25400, 0x25404,
1655 0x25500, 0x25518,
1656 0x2552c, 0x2553c,
1657 0x25550, 0x25554,
1658 0x25600, 0x25600,
1659 0x25608, 0x25628,
1660 0x25630, 0x2563c,
1661 0x25700, 0x2571c,
1662 0x25780, 0x2578c,
1663 0x25800, 0x25c38,
1664 0x25c80, 0x25d7c,
1665 0x25e00, 0x25e04,
1666 0x26000, 0x2602c,
1667 0x26100, 0x2613c,
1668 0x26190, 0x261c8,
1669 0x26200, 0x26318,
1670 0x26400, 0x26528,
1671 0x26540, 0x26614,
1672 0x27000, 0x27040,
1673 0x2704c, 0x27060,
1674 0x270c0, 0x270ec,
1675 0x27200, 0x27268,
1676 0x27270, 0x27284,
1677 0x272fc, 0x27388,
1678 0x27400, 0x27404,
1679 0x27500, 0x27518,
1680 0x2752c, 0x2753c,
1681 0x27550, 0x27554,
1682 0x27600, 0x27600,
1683 0x27608, 0x27628,
1684 0x27630, 0x2763c,
1685 0x27700, 0x2771c,
1686 0x27780, 0x2778c,
1687 0x27800, 0x27c38,
1688 0x27c80, 0x27d7c,
1689 0x27e00, 0x27e04
1690 };
1691
Santosh Rastapur251f9e82013-03-14 05:08:50 +00001692 static const unsigned int t5_reg_ranges[] = {
1693 0x1008, 0x1148,
1694 0x1180, 0x11b4,
1695 0x11fc, 0x123c,
1696 0x1280, 0x173c,
1697 0x1800, 0x18fc,
1698 0x3000, 0x3028,
1699 0x3060, 0x30d8,
1700 0x30e0, 0x30fc,
1701 0x3140, 0x357c,
1702 0x35a8, 0x35cc,
1703 0x35ec, 0x35ec,
1704 0x3600, 0x5624,
1705 0x56cc, 0x575c,
1706 0x580c, 0x5814,
1707 0x5890, 0x58bc,
1708 0x5940, 0x59dc,
1709 0x59fc, 0x5a18,
1710 0x5a60, 0x5a9c,
1711 0x5b9c, 0x5bfc,
1712 0x6000, 0x6040,
1713 0x6058, 0x614c,
1714 0x7700, 0x7798,
1715 0x77c0, 0x78fc,
1716 0x7b00, 0x7c54,
1717 0x7d00, 0x7efc,
1718 0x8dc0, 0x8de0,
1719 0x8df8, 0x8e84,
1720 0x8ea0, 0x8f84,
1721 0x8fc0, 0x90f8,
1722 0x9400, 0x9470,
1723 0x9600, 0x96f4,
1724 0x9800, 0x9808,
1725 0x9820, 0x983c,
1726 0x9850, 0x9864,
1727 0x9c00, 0x9c6c,
1728 0x9c80, 0x9cec,
1729 0x9d00, 0x9d6c,
1730 0x9d80, 0x9dec,
1731 0x9e00, 0x9e6c,
1732 0x9e80, 0x9eec,
1733 0x9f00, 0x9f6c,
1734 0x9f80, 0xa020,
1735 0xd004, 0xd03c,
1736 0xdfc0, 0xdfe0,
1737 0xe000, 0x11088,
1738 0x1109c, 0x1117c,
1739 0x11190, 0x11204,
1740 0x19040, 0x1906c,
1741 0x19078, 0x19080,
1742 0x1908c, 0x19124,
1743 0x19150, 0x191b0,
1744 0x191d0, 0x191e8,
1745 0x19238, 0x19290,
1746 0x193f8, 0x19474,
1747 0x19490, 0x194cc,
1748 0x194f0, 0x194f8,
1749 0x19c00, 0x19c60,
1750 0x19c94, 0x19e10,
1751 0x19e50, 0x19f34,
1752 0x19f40, 0x19f50,
1753 0x19f90, 0x19fe4,
1754 0x1a000, 0x1a06c,
1755 0x1a0b0, 0x1a120,
1756 0x1a128, 0x1a138,
1757 0x1a190, 0x1a1c4,
1758 0x1a1fc, 0x1a1fc,
1759 0x1e008, 0x1e00c,
1760 0x1e040, 0x1e04c,
1761 0x1e284, 0x1e290,
1762 0x1e2c0, 0x1e2c0,
1763 0x1e2e0, 0x1e2e0,
1764 0x1e300, 0x1e384,
1765 0x1e3c0, 0x1e3c8,
1766 0x1e408, 0x1e40c,
1767 0x1e440, 0x1e44c,
1768 0x1e684, 0x1e690,
1769 0x1e6c0, 0x1e6c0,
1770 0x1e6e0, 0x1e6e0,
1771 0x1e700, 0x1e784,
1772 0x1e7c0, 0x1e7c8,
1773 0x1e808, 0x1e80c,
1774 0x1e840, 0x1e84c,
1775 0x1ea84, 0x1ea90,
1776 0x1eac0, 0x1eac0,
1777 0x1eae0, 0x1eae0,
1778 0x1eb00, 0x1eb84,
1779 0x1ebc0, 0x1ebc8,
1780 0x1ec08, 0x1ec0c,
1781 0x1ec40, 0x1ec4c,
1782 0x1ee84, 0x1ee90,
1783 0x1eec0, 0x1eec0,
1784 0x1eee0, 0x1eee0,
1785 0x1ef00, 0x1ef84,
1786 0x1efc0, 0x1efc8,
1787 0x1f008, 0x1f00c,
1788 0x1f040, 0x1f04c,
1789 0x1f284, 0x1f290,
1790 0x1f2c0, 0x1f2c0,
1791 0x1f2e0, 0x1f2e0,
1792 0x1f300, 0x1f384,
1793 0x1f3c0, 0x1f3c8,
1794 0x1f408, 0x1f40c,
1795 0x1f440, 0x1f44c,
1796 0x1f684, 0x1f690,
1797 0x1f6c0, 0x1f6c0,
1798 0x1f6e0, 0x1f6e0,
1799 0x1f700, 0x1f784,
1800 0x1f7c0, 0x1f7c8,
1801 0x1f808, 0x1f80c,
1802 0x1f840, 0x1f84c,
1803 0x1fa84, 0x1fa90,
1804 0x1fac0, 0x1fac0,
1805 0x1fae0, 0x1fae0,
1806 0x1fb00, 0x1fb84,
1807 0x1fbc0, 0x1fbc8,
1808 0x1fc08, 0x1fc0c,
1809 0x1fc40, 0x1fc4c,
1810 0x1fe84, 0x1fe90,
1811 0x1fec0, 0x1fec0,
1812 0x1fee0, 0x1fee0,
1813 0x1ff00, 0x1ff84,
1814 0x1ffc0, 0x1ffc8,
1815 0x30000, 0x30030,
1816 0x30100, 0x30144,
1817 0x30190, 0x301d0,
1818 0x30200, 0x30318,
1819 0x30400, 0x3052c,
1820 0x30540, 0x3061c,
1821 0x30800, 0x30834,
1822 0x308c0, 0x30908,
1823 0x30910, 0x309ac,
1824 0x30a00, 0x30a04,
1825 0x30a0c, 0x30a2c,
1826 0x30a44, 0x30a50,
1827 0x30a74, 0x30c24,
1828 0x30d08, 0x30d14,
1829 0x30d1c, 0x30d20,
1830 0x30d3c, 0x30d50,
1831 0x31200, 0x3120c,
1832 0x31220, 0x31220,
1833 0x31240, 0x31240,
1834 0x31600, 0x31600,
1835 0x31608, 0x3160c,
1836 0x31a00, 0x31a1c,
1837 0x31e04, 0x31e20,
1838 0x31e38, 0x31e3c,
1839 0x31e80, 0x31e80,
1840 0x31e88, 0x31ea8,
1841 0x31eb0, 0x31eb4,
1842 0x31ec8, 0x31ed4,
1843 0x31fb8, 0x32004,
1844 0x32208, 0x3223c,
1845 0x32600, 0x32630,
1846 0x32a00, 0x32abc,
1847 0x32b00, 0x32b70,
1848 0x33000, 0x33048,
1849 0x33060, 0x3309c,
1850 0x330f0, 0x33148,
1851 0x33160, 0x3319c,
1852 0x331f0, 0x332e4,
1853 0x332f8, 0x333e4,
1854 0x333f8, 0x33448,
1855 0x33460, 0x3349c,
1856 0x334f0, 0x33548,
1857 0x33560, 0x3359c,
1858 0x335f0, 0x336e4,
1859 0x336f8, 0x337e4,
1860 0x337f8, 0x337fc,
1861 0x33814, 0x33814,
1862 0x3382c, 0x3382c,
1863 0x33880, 0x3388c,
1864 0x338e8, 0x338ec,
1865 0x33900, 0x33948,
1866 0x33960, 0x3399c,
1867 0x339f0, 0x33ae4,
1868 0x33af8, 0x33b10,
1869 0x33b28, 0x33b28,
1870 0x33b3c, 0x33b50,
1871 0x33bf0, 0x33c10,
1872 0x33c28, 0x33c28,
1873 0x33c3c, 0x33c50,
1874 0x33cf0, 0x33cfc,
1875 0x34000, 0x34030,
1876 0x34100, 0x34144,
1877 0x34190, 0x341d0,
1878 0x34200, 0x34318,
1879 0x34400, 0x3452c,
1880 0x34540, 0x3461c,
1881 0x34800, 0x34834,
1882 0x348c0, 0x34908,
1883 0x34910, 0x349ac,
1884 0x34a00, 0x34a04,
1885 0x34a0c, 0x34a2c,
1886 0x34a44, 0x34a50,
1887 0x34a74, 0x34c24,
1888 0x34d08, 0x34d14,
1889 0x34d1c, 0x34d20,
1890 0x34d3c, 0x34d50,
1891 0x35200, 0x3520c,
1892 0x35220, 0x35220,
1893 0x35240, 0x35240,
1894 0x35600, 0x35600,
1895 0x35608, 0x3560c,
1896 0x35a00, 0x35a1c,
1897 0x35e04, 0x35e20,
1898 0x35e38, 0x35e3c,
1899 0x35e80, 0x35e80,
1900 0x35e88, 0x35ea8,
1901 0x35eb0, 0x35eb4,
1902 0x35ec8, 0x35ed4,
1903 0x35fb8, 0x36004,
1904 0x36208, 0x3623c,
1905 0x36600, 0x36630,
1906 0x36a00, 0x36abc,
1907 0x36b00, 0x36b70,
1908 0x37000, 0x37048,
1909 0x37060, 0x3709c,
1910 0x370f0, 0x37148,
1911 0x37160, 0x3719c,
1912 0x371f0, 0x372e4,
1913 0x372f8, 0x373e4,
1914 0x373f8, 0x37448,
1915 0x37460, 0x3749c,
1916 0x374f0, 0x37548,
1917 0x37560, 0x3759c,
1918 0x375f0, 0x376e4,
1919 0x376f8, 0x377e4,
1920 0x377f8, 0x377fc,
1921 0x37814, 0x37814,
1922 0x3782c, 0x3782c,
1923 0x37880, 0x3788c,
1924 0x378e8, 0x378ec,
1925 0x37900, 0x37948,
1926 0x37960, 0x3799c,
1927 0x379f0, 0x37ae4,
1928 0x37af8, 0x37b10,
1929 0x37b28, 0x37b28,
1930 0x37b3c, 0x37b50,
1931 0x37bf0, 0x37c10,
1932 0x37c28, 0x37c28,
1933 0x37c3c, 0x37c50,
1934 0x37cf0, 0x37cfc,
1935 0x38000, 0x38030,
1936 0x38100, 0x38144,
1937 0x38190, 0x381d0,
1938 0x38200, 0x38318,
1939 0x38400, 0x3852c,
1940 0x38540, 0x3861c,
1941 0x38800, 0x38834,
1942 0x388c0, 0x38908,
1943 0x38910, 0x389ac,
1944 0x38a00, 0x38a04,
1945 0x38a0c, 0x38a2c,
1946 0x38a44, 0x38a50,
1947 0x38a74, 0x38c24,
1948 0x38d08, 0x38d14,
1949 0x38d1c, 0x38d20,
1950 0x38d3c, 0x38d50,
1951 0x39200, 0x3920c,
1952 0x39220, 0x39220,
1953 0x39240, 0x39240,
1954 0x39600, 0x39600,
1955 0x39608, 0x3960c,
1956 0x39a00, 0x39a1c,
1957 0x39e04, 0x39e20,
1958 0x39e38, 0x39e3c,
1959 0x39e80, 0x39e80,
1960 0x39e88, 0x39ea8,
1961 0x39eb0, 0x39eb4,
1962 0x39ec8, 0x39ed4,
1963 0x39fb8, 0x3a004,
1964 0x3a208, 0x3a23c,
1965 0x3a600, 0x3a630,
1966 0x3aa00, 0x3aabc,
1967 0x3ab00, 0x3ab70,
1968 0x3b000, 0x3b048,
1969 0x3b060, 0x3b09c,
1970 0x3b0f0, 0x3b148,
1971 0x3b160, 0x3b19c,
1972 0x3b1f0, 0x3b2e4,
1973 0x3b2f8, 0x3b3e4,
1974 0x3b3f8, 0x3b448,
1975 0x3b460, 0x3b49c,
1976 0x3b4f0, 0x3b548,
1977 0x3b560, 0x3b59c,
1978 0x3b5f0, 0x3b6e4,
1979 0x3b6f8, 0x3b7e4,
1980 0x3b7f8, 0x3b7fc,
1981 0x3b814, 0x3b814,
1982 0x3b82c, 0x3b82c,
1983 0x3b880, 0x3b88c,
1984 0x3b8e8, 0x3b8ec,
1985 0x3b900, 0x3b948,
1986 0x3b960, 0x3b99c,
1987 0x3b9f0, 0x3bae4,
1988 0x3baf8, 0x3bb10,
1989 0x3bb28, 0x3bb28,
1990 0x3bb3c, 0x3bb50,
1991 0x3bbf0, 0x3bc10,
1992 0x3bc28, 0x3bc28,
1993 0x3bc3c, 0x3bc50,
1994 0x3bcf0, 0x3bcfc,
1995 0x3c000, 0x3c030,
1996 0x3c100, 0x3c144,
1997 0x3c190, 0x3c1d0,
1998 0x3c200, 0x3c318,
1999 0x3c400, 0x3c52c,
2000 0x3c540, 0x3c61c,
2001 0x3c800, 0x3c834,
2002 0x3c8c0, 0x3c908,
2003 0x3c910, 0x3c9ac,
2004 0x3ca00, 0x3ca04,
2005 0x3ca0c, 0x3ca2c,
2006 0x3ca44, 0x3ca50,
2007 0x3ca74, 0x3cc24,
2008 0x3cd08, 0x3cd14,
2009 0x3cd1c, 0x3cd20,
2010 0x3cd3c, 0x3cd50,
2011 0x3d200, 0x3d20c,
2012 0x3d220, 0x3d220,
2013 0x3d240, 0x3d240,
2014 0x3d600, 0x3d600,
2015 0x3d608, 0x3d60c,
2016 0x3da00, 0x3da1c,
2017 0x3de04, 0x3de20,
2018 0x3de38, 0x3de3c,
2019 0x3de80, 0x3de80,
2020 0x3de88, 0x3dea8,
2021 0x3deb0, 0x3deb4,
2022 0x3dec8, 0x3ded4,
2023 0x3dfb8, 0x3e004,
2024 0x3e208, 0x3e23c,
2025 0x3e600, 0x3e630,
2026 0x3ea00, 0x3eabc,
2027 0x3eb00, 0x3eb70,
2028 0x3f000, 0x3f048,
2029 0x3f060, 0x3f09c,
2030 0x3f0f0, 0x3f148,
2031 0x3f160, 0x3f19c,
2032 0x3f1f0, 0x3f2e4,
2033 0x3f2f8, 0x3f3e4,
2034 0x3f3f8, 0x3f448,
2035 0x3f460, 0x3f49c,
2036 0x3f4f0, 0x3f548,
2037 0x3f560, 0x3f59c,
2038 0x3f5f0, 0x3f6e4,
2039 0x3f6f8, 0x3f7e4,
2040 0x3f7f8, 0x3f7fc,
2041 0x3f814, 0x3f814,
2042 0x3f82c, 0x3f82c,
2043 0x3f880, 0x3f88c,
2044 0x3f8e8, 0x3f8ec,
2045 0x3f900, 0x3f948,
2046 0x3f960, 0x3f99c,
2047 0x3f9f0, 0x3fae4,
2048 0x3faf8, 0x3fb10,
2049 0x3fb28, 0x3fb28,
2050 0x3fb3c, 0x3fb50,
2051 0x3fbf0, 0x3fc10,
2052 0x3fc28, 0x3fc28,
2053 0x3fc3c, 0x3fc50,
2054 0x3fcf0, 0x3fcfc,
2055 0x40000, 0x4000c,
2056 0x40040, 0x40068,
2057 0x40080, 0x40144,
2058 0x40180, 0x4018c,
2059 0x40200, 0x40298,
2060 0x402ac, 0x4033c,
2061 0x403f8, 0x403fc,
2062 0x41300, 0x413c4,
2063 0x41400, 0x4141c,
2064 0x41480, 0x414d0,
2065 0x44000, 0x44078,
2066 0x440c0, 0x44278,
2067 0x442c0, 0x44478,
2068 0x444c0, 0x44678,
2069 0x446c0, 0x44878,
2070 0x448c0, 0x449fc,
2071 0x45000, 0x45068,
2072 0x45080, 0x45084,
2073 0x450a0, 0x450b0,
2074 0x45200, 0x45268,
2075 0x45280, 0x45284,
2076 0x452a0, 0x452b0,
2077 0x460c0, 0x460e4,
2078 0x47000, 0x4708c,
2079 0x47200, 0x47250,
2080 0x47400, 0x47420,
2081 0x47600, 0x47618,
2082 0x47800, 0x47814,
2083 0x48000, 0x4800c,
2084 0x48040, 0x48068,
2085 0x48080, 0x48144,
2086 0x48180, 0x4818c,
2087 0x48200, 0x48298,
2088 0x482ac, 0x4833c,
2089 0x483f8, 0x483fc,
2090 0x49300, 0x493c4,
2091 0x49400, 0x4941c,
2092 0x49480, 0x494d0,
2093 0x4c000, 0x4c078,
2094 0x4c0c0, 0x4c278,
2095 0x4c2c0, 0x4c478,
2096 0x4c4c0, 0x4c678,
2097 0x4c6c0, 0x4c878,
2098 0x4c8c0, 0x4c9fc,
2099 0x4d000, 0x4d068,
2100 0x4d080, 0x4d084,
2101 0x4d0a0, 0x4d0b0,
2102 0x4d200, 0x4d268,
2103 0x4d280, 0x4d284,
2104 0x4d2a0, 0x4d2b0,
2105 0x4e0c0, 0x4e0e4,
2106 0x4f000, 0x4f08c,
2107 0x4f200, 0x4f250,
2108 0x4f400, 0x4f420,
2109 0x4f600, 0x4f618,
2110 0x4f800, 0x4f814,
2111 0x50000, 0x500cc,
2112 0x50400, 0x50400,
2113 0x50800, 0x508cc,
2114 0x50c00, 0x50c00,
2115 0x51000, 0x5101c,
2116 0x51300, 0x51308,
2117 };
2118
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002119 int i;
2120 struct adapter *ap = netdev2adap(dev);
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002121 static const unsigned int *reg_ranges;
2122 int arr_size = 0, buf_size = 0;
2123
2124 if (is_t4(ap->chip)) {
2125 reg_ranges = &t4_reg_ranges[0];
2126 arr_size = ARRAY_SIZE(t4_reg_ranges);
2127 buf_size = T4_REGMAP_SIZE;
2128 } else {
2129 reg_ranges = &t5_reg_ranges[0];
2130 arr_size = ARRAY_SIZE(t5_reg_ranges);
2131 buf_size = T5_REGMAP_SIZE;
2132 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002133
2134 regs->version = mk_adap_vers(ap);
2135
Santosh Rastapur251f9e82013-03-14 05:08:50 +00002136 memset(buf, 0, buf_size);
2137 for (i = 0; i < arr_size; i += 2)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002138 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2139}
2140
2141static int restart_autoneg(struct net_device *dev)
2142{
2143 struct port_info *p = netdev_priv(dev);
2144
2145 if (!netif_running(dev))
2146 return -EAGAIN;
2147 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2148 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002149 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002150 return 0;
2151}
2152
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002153static int identify_port(struct net_device *dev,
2154 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002155{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002156 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002157 struct adapter *adap = netdev2adap(dev);
2158
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002159 if (state == ETHTOOL_ID_ACTIVE)
2160 val = 0xffff;
2161 else if (state == ETHTOOL_ID_INACTIVE)
2162 val = 0;
2163 else
2164 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002165
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002166 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002167}
2168
2169static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2170{
2171 unsigned int v = 0;
2172
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002173 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2174 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002175 v |= SUPPORTED_TP;
2176 if (caps & FW_PORT_CAP_SPEED_100M)
2177 v |= SUPPORTED_100baseT_Full;
2178 if (caps & FW_PORT_CAP_SPEED_1G)
2179 v |= SUPPORTED_1000baseT_Full;
2180 if (caps & FW_PORT_CAP_SPEED_10G)
2181 v |= SUPPORTED_10000baseT_Full;
2182 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2183 v |= SUPPORTED_Backplane;
2184 if (caps & FW_PORT_CAP_SPEED_1G)
2185 v |= SUPPORTED_1000baseKX_Full;
2186 if (caps & FW_PORT_CAP_SPEED_10G)
2187 v |= SUPPORTED_10000baseKX4_Full;
2188 } else if (type == FW_PORT_TYPE_KR)
2189 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002190 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00002191 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2192 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2193 else if (type == FW_PORT_TYPE_BP4_AP)
2194 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2195 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2196 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002197 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2198 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002199 v |= SUPPORTED_FIBRE;
2200
2201 if (caps & FW_PORT_CAP_ANEG)
2202 v |= SUPPORTED_Autoneg;
2203 return v;
2204}
2205
2206static unsigned int to_fw_linkcaps(unsigned int caps)
2207{
2208 unsigned int v = 0;
2209
2210 if (caps & ADVERTISED_100baseT_Full)
2211 v |= FW_PORT_CAP_SPEED_100M;
2212 if (caps & ADVERTISED_1000baseT_Full)
2213 v |= FW_PORT_CAP_SPEED_1G;
2214 if (caps & ADVERTISED_10000baseT_Full)
2215 v |= FW_PORT_CAP_SPEED_10G;
2216 return v;
2217}
2218
2219static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2220{
2221 const struct port_info *p = netdev_priv(dev);
2222
2223 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002224 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002225 p->port_type == FW_PORT_TYPE_BT_XAUI)
2226 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002227 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2228 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002229 cmd->port = PORT_FIBRE;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002230 else if (p->port_type == FW_PORT_TYPE_SFP) {
2231 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2232 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2233 cmd->port = PORT_DA;
2234 else
2235 cmd->port = PORT_FIBRE;
2236 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002237 cmd->port = PORT_OTHER;
2238
2239 if (p->mdio_addr >= 0) {
2240 cmd->phy_address = p->mdio_addr;
2241 cmd->transceiver = XCVR_EXTERNAL;
2242 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2243 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2244 } else {
2245 cmd->phy_address = 0; /* not really, but no better option */
2246 cmd->transceiver = XCVR_INTERNAL;
2247 cmd->mdio_support = 0;
2248 }
2249
2250 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2251 cmd->advertising = from_fw_linkcaps(p->port_type,
2252 p->link_cfg.advertising);
David Decotigny70739492011-04-27 18:32:40 +00002253 ethtool_cmd_speed_set(cmd,
2254 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002255 cmd->duplex = DUPLEX_FULL;
2256 cmd->autoneg = p->link_cfg.autoneg;
2257 cmd->maxtxpkt = 0;
2258 cmd->maxrxpkt = 0;
2259 return 0;
2260}
2261
2262static unsigned int speed_to_caps(int speed)
2263{
2264 if (speed == SPEED_100)
2265 return FW_PORT_CAP_SPEED_100M;
2266 if (speed == SPEED_1000)
2267 return FW_PORT_CAP_SPEED_1G;
2268 if (speed == SPEED_10000)
2269 return FW_PORT_CAP_SPEED_10G;
2270 return 0;
2271}
2272
2273static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2274{
2275 unsigned int cap;
2276 struct port_info *p = netdev_priv(dev);
2277 struct link_config *lc = &p->link_cfg;
David Decotigny25db0332011-04-27 18:32:39 +00002278 u32 speed = ethtool_cmd_speed(cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002279
2280 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2281 return -EINVAL;
2282
2283 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2284 /*
2285 * PHY offers a single speed. See if that's what's
2286 * being requested.
2287 */
2288 if (cmd->autoneg == AUTONEG_DISABLE &&
David Decotigny25db0332011-04-27 18:32:39 +00002289 (lc->supported & speed_to_caps(speed)))
2290 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002291 return -EINVAL;
2292 }
2293
2294 if (cmd->autoneg == AUTONEG_DISABLE) {
David Decotigny25db0332011-04-27 18:32:39 +00002295 cap = speed_to_caps(speed);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002296
David Decotigny25db0332011-04-27 18:32:39 +00002297 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2298 (speed == SPEED_10000))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002299 return -EINVAL;
2300 lc->requested_speed = cap;
2301 lc->advertising = 0;
2302 } else {
2303 cap = to_fw_linkcaps(cmd->advertising);
2304 if (!(lc->supported & cap))
2305 return -EINVAL;
2306 lc->requested_speed = 0;
2307 lc->advertising = cap | FW_PORT_CAP_ANEG;
2308 }
2309 lc->autoneg = cmd->autoneg;
2310
2311 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002312 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2313 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002314 return 0;
2315}
2316
2317static void get_pauseparam(struct net_device *dev,
2318 struct ethtool_pauseparam *epause)
2319{
2320 struct port_info *p = netdev_priv(dev);
2321
2322 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2323 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2324 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2325}
2326
2327static int set_pauseparam(struct net_device *dev,
2328 struct ethtool_pauseparam *epause)
2329{
2330 struct port_info *p = netdev_priv(dev);
2331 struct link_config *lc = &p->link_cfg;
2332
2333 if (epause->autoneg == AUTONEG_DISABLE)
2334 lc->requested_fc = 0;
2335 else if (lc->supported & FW_PORT_CAP_ANEG)
2336 lc->requested_fc = PAUSE_AUTONEG;
2337 else
2338 return -EINVAL;
2339
2340 if (epause->rx_pause)
2341 lc->requested_fc |= PAUSE_RX;
2342 if (epause->tx_pause)
2343 lc->requested_fc |= PAUSE_TX;
2344 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002345 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2346 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002347 return 0;
2348}
2349
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002350static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2351{
2352 const struct port_info *pi = netdev_priv(dev);
2353 const struct sge *s = &pi->adapter->sge;
2354
2355 e->rx_max_pending = MAX_RX_BUFFERS;
2356 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2357 e->rx_jumbo_max_pending = 0;
2358 e->tx_max_pending = MAX_TXQ_ENTRIES;
2359
2360 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2361 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2362 e->rx_jumbo_pending = 0;
2363 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2364}
2365
2366static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2367{
2368 int i;
2369 const struct port_info *pi = netdev_priv(dev);
2370 struct adapter *adapter = pi->adapter;
2371 struct sge *s = &adapter->sge;
2372
2373 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2374 e->tx_pending > MAX_TXQ_ENTRIES ||
2375 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2376 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2377 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2378 return -EINVAL;
2379
2380 if (adapter->flags & FULL_INIT_DONE)
2381 return -EBUSY;
2382
2383 for (i = 0; i < pi->nqsets; ++i) {
2384 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2385 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2386 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2387 }
2388 return 0;
2389}
2390
2391static int closest_timer(const struct sge *s, int time)
2392{
2393 int i, delta, match = 0, min_delta = INT_MAX;
2394
2395 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2396 delta = time - s->timer_val[i];
2397 if (delta < 0)
2398 delta = -delta;
2399 if (delta < min_delta) {
2400 min_delta = delta;
2401 match = i;
2402 }
2403 }
2404 return match;
2405}
2406
2407static int closest_thres(const struct sge *s, int thres)
2408{
2409 int i, delta, match = 0, min_delta = INT_MAX;
2410
2411 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2412 delta = thres - s->counter_val[i];
2413 if (delta < 0)
2414 delta = -delta;
2415 if (delta < min_delta) {
2416 min_delta = delta;
2417 match = i;
2418 }
2419 }
2420 return match;
2421}
2422
2423/*
2424 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2425 */
2426static unsigned int qtimer_val(const struct adapter *adap,
2427 const struct sge_rspq *q)
2428{
2429 unsigned int idx = q->intr_params >> 1;
2430
2431 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2432}
2433
2434/**
2435 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2436 * @adap: the adapter
2437 * @q: the Rx queue
2438 * @us: the hold-off time in us, or 0 to disable timer
2439 * @cnt: the hold-off packet count, or 0 to disable counter
2440 *
2441 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2442 * one of the two needs to be enabled for the queue to generate interrupts.
2443 */
2444static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2445 unsigned int us, unsigned int cnt)
2446{
2447 if ((us | cnt) == 0)
2448 cnt = 1;
2449
2450 if (cnt) {
2451 int err;
2452 u32 v, new_idx;
2453
2454 new_idx = closest_thres(&adap->sge, cnt);
2455 if (q->desc && q->pktcnt_idx != new_idx) {
2456 /* the queue has already been created, update it */
2457 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2458 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2459 FW_PARAMS_PARAM_YZ(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00002460 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2461 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002462 if (err)
2463 return err;
2464 }
2465 q->pktcnt_idx = new_idx;
2466 }
2467
2468 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2469 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2470 return 0;
2471}
2472
2473static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2474{
2475 const struct port_info *pi = netdev_priv(dev);
2476 struct adapter *adap = pi->adapter;
Thadeu Lima de Souza Cascardod4fc9dc2013-01-15 05:15:10 +00002477 struct sge_rspq *q;
2478 int i;
2479 int r = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002480
Thadeu Lima de Souza Cascardod4fc9dc2013-01-15 05:15:10 +00002481 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2482 q = &adap->sge.ethrxq[i].rspq;
2483 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2484 c->rx_max_coalesced_frames);
2485 if (r) {
2486 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2487 break;
2488 }
2489 }
2490 return r;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002491}
2492
2493static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2494{
2495 const struct port_info *pi = netdev_priv(dev);
2496 const struct adapter *adap = pi->adapter;
2497 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2498
2499 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2500 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2501 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2502 return 0;
2503}
2504
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002505/**
2506 * eeprom_ptov - translate a physical EEPROM address to virtual
2507 * @phys_addr: the physical EEPROM address
2508 * @fn: the PCI function number
2509 * @sz: size of function-specific area
2510 *
2511 * Translate a physical EEPROM address to virtual. The first 1K is
2512 * accessed through virtual addresses starting at 31K, the rest is
2513 * accessed through virtual addresses starting at 0.
2514 *
2515 * The mapping is as follows:
2516 * [0..1K) -> [31K..32K)
2517 * [1K..1K+A) -> [31K-A..31K)
2518 * [1K+A..ES) -> [0..ES-A-1K)
2519 *
2520 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002521 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002522static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002523{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002524 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002525 if (phys_addr < 1024)
2526 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002527 if (phys_addr < 1024 + fn)
2528 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002529 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002530 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002531 return -EINVAL;
2532}
2533
2534/*
2535 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002536 */
2537static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2538{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002539 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002540
2541 if (vaddr >= 0)
2542 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2543 return vaddr < 0 ? vaddr : 0;
2544}
2545
2546static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2547{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002548 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002549
2550 if (vaddr >= 0)
2551 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2552 return vaddr < 0 ? vaddr : 0;
2553}
2554
2555#define EEPROM_MAGIC 0x38E2F10C
2556
2557static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2558 u8 *data)
2559{
2560 int i, err = 0;
2561 struct adapter *adapter = netdev2adap(dev);
2562
2563 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2564 if (!buf)
2565 return -ENOMEM;
2566
2567 e->magic = EEPROM_MAGIC;
2568 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2569 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2570
2571 if (!err)
2572 memcpy(data, buf + e->offset, e->len);
2573 kfree(buf);
2574 return err;
2575}
2576
2577static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2578 u8 *data)
2579{
2580 u8 *buf;
2581 int err = 0;
2582 u32 aligned_offset, aligned_len, *p;
2583 struct adapter *adapter = netdev2adap(dev);
2584
2585 if (eeprom->magic != EEPROM_MAGIC)
2586 return -EINVAL;
2587
2588 aligned_offset = eeprom->offset & ~3;
2589 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2590
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002591 if (adapter->fn > 0) {
2592 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2593
2594 if (aligned_offset < start ||
2595 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2596 return -EPERM;
2597 }
2598
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002599 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2600 /*
2601 * RMW possibly needed for first or last words.
2602 */
2603 buf = kmalloc(aligned_len, GFP_KERNEL);
2604 if (!buf)
2605 return -ENOMEM;
2606 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2607 if (!err && aligned_len > 4)
2608 err = eeprom_rd_phys(adapter,
2609 aligned_offset + aligned_len - 4,
2610 (u32 *)&buf[aligned_len - 4]);
2611 if (err)
2612 goto out;
2613 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2614 } else
2615 buf = data;
2616
2617 err = t4_seeprom_wp(adapter, false);
2618 if (err)
2619 goto out;
2620
2621 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2622 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2623 aligned_offset += 4;
2624 }
2625
2626 if (!err)
2627 err = t4_seeprom_wp(adapter, true);
2628out:
2629 if (buf != data)
2630 kfree(buf);
2631 return err;
2632}
2633
2634static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2635{
2636 int ret;
2637 const struct firmware *fw;
2638 struct adapter *adap = netdev2adap(netdev);
2639
2640 ef->data[sizeof(ef->data) - 1] = '\0';
2641 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2642 if (ret < 0)
2643 return ret;
2644
2645 ret = t4_load_fw(adap, fw->data, fw->size);
2646 release_firmware(fw);
2647 if (!ret)
2648 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2649 return ret;
2650}
2651
2652#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2653#define BCAST_CRC 0xa0ccc1a6
2654
2655static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2656{
2657 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2658 wol->wolopts = netdev2adap(dev)->wol;
2659 memset(&wol->sopass, 0, sizeof(wol->sopass));
2660}
2661
2662static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2663{
2664 int err = 0;
2665 struct port_info *pi = netdev_priv(dev);
2666
2667 if (wol->wolopts & ~WOL_SUPPORTED)
2668 return -EINVAL;
2669 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2670 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2671 if (wol->wolopts & WAKE_BCAST) {
2672 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2673 ~0ULL, 0, false);
2674 if (!err)
2675 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2676 ~6ULL, ~0ULL, BCAST_CRC, true);
2677 } else
2678 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2679 return err;
2680}
2681
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002682static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002683{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002684 const struct port_info *pi = netdev_priv(dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002685 netdev_features_t changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002686 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002687
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002688 if (!(changed & NETIF_F_HW_VLAN_RX))
2689 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002690
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002691 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2692 -1, -1, -1,
2693 !!(features & NETIF_F_HW_VLAN_RX), true);
2694 if (unlikely(err))
2695 dev->features = features ^ NETIF_F_HW_VLAN_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002696 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07002697}
2698
Ben Hutchings7850f632011-12-15 13:55:01 +00002699static u32 get_rss_table_size(struct net_device *dev)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002700{
2701 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002702
Ben Hutchings7850f632011-12-15 13:55:01 +00002703 return pi->rss_size;
2704}
2705
2706static int get_rss_table(struct net_device *dev, u32 *p)
2707{
2708 const struct port_info *pi = netdev_priv(dev);
2709 unsigned int n = pi->rss_size;
2710
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002711 while (n--)
Ben Hutchings7850f632011-12-15 13:55:01 +00002712 p[n] = pi->rss[n];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002713 return 0;
2714}
2715
Ben Hutchings7850f632011-12-15 13:55:01 +00002716static int set_rss_table(struct net_device *dev, const u32 *p)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002717{
2718 unsigned int i;
2719 struct port_info *pi = netdev_priv(dev);
2720
Ben Hutchings7850f632011-12-15 13:55:01 +00002721 for (i = 0; i < pi->rss_size; i++)
2722 pi->rss[i] = p[i];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002723 if (pi->adapter->flags & FULL_INIT_DONE)
2724 return write_rss(pi, pi->rss);
2725 return 0;
2726}
2727
2728static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
Ben Hutchings815c7db2011-09-06 13:49:12 +00002729 u32 *rules)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002730{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002731 const struct port_info *pi = netdev_priv(dev);
2732
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002733 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002734 case ETHTOOL_GRXFH: {
2735 unsigned int v = pi->rss_mode;
2736
2737 info->data = 0;
2738 switch (info->flow_type) {
2739 case TCP_V4_FLOW:
2740 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2741 info->data = RXH_IP_SRC | RXH_IP_DST |
2742 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2743 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2744 info->data = RXH_IP_SRC | RXH_IP_DST;
2745 break;
2746 case UDP_V4_FLOW:
2747 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2748 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2749 info->data = RXH_IP_SRC | RXH_IP_DST |
2750 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2751 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2752 info->data = RXH_IP_SRC | RXH_IP_DST;
2753 break;
2754 case SCTP_V4_FLOW:
2755 case AH_ESP_V4_FLOW:
2756 case IPV4_FLOW:
2757 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2758 info->data = RXH_IP_SRC | RXH_IP_DST;
2759 break;
2760 case TCP_V6_FLOW:
2761 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2762 info->data = RXH_IP_SRC | RXH_IP_DST |
2763 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2764 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2765 info->data = RXH_IP_SRC | RXH_IP_DST;
2766 break;
2767 case UDP_V6_FLOW:
2768 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2769 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2770 info->data = RXH_IP_SRC | RXH_IP_DST |
2771 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2772 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2773 info->data = RXH_IP_SRC | RXH_IP_DST;
2774 break;
2775 case SCTP_V6_FLOW:
2776 case AH_ESP_V6_FLOW:
2777 case IPV6_FLOW:
2778 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2779 info->data = RXH_IP_SRC | RXH_IP_DST;
2780 break;
2781 }
2782 return 0;
2783 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002784 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002785 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002786 return 0;
2787 }
2788 return -EOPNOTSUPP;
2789}
2790
stephen hemminger9b07be42012-01-04 12:59:49 +00002791static const struct ethtool_ops cxgb_ethtool_ops = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002792 .get_settings = get_settings,
2793 .set_settings = set_settings,
2794 .get_drvinfo = get_drvinfo,
2795 .get_msglevel = get_msglevel,
2796 .set_msglevel = set_msglevel,
2797 .get_ringparam = get_sge_param,
2798 .set_ringparam = set_sge_param,
2799 .get_coalesce = get_coalesce,
2800 .set_coalesce = set_coalesce,
2801 .get_eeprom_len = get_eeprom_len,
2802 .get_eeprom = get_eeprom,
2803 .set_eeprom = set_eeprom,
2804 .get_pauseparam = get_pauseparam,
2805 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002806 .get_link = ethtool_op_get_link,
2807 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002808 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002809 .nway_reset = restart_autoneg,
2810 .get_sset_count = get_sset_count,
2811 .get_ethtool_stats = get_stats,
2812 .get_regs_len = get_regs_len,
2813 .get_regs = get_regs,
2814 .get_wol = get_wol,
2815 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002816 .get_rxnfc = get_rxnfc,
Ben Hutchings7850f632011-12-15 13:55:01 +00002817 .get_rxfh_indir_size = get_rss_table_size,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002818 .get_rxfh_indir = get_rss_table,
2819 .set_rxfh_indir = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002820 .flash_device = set_flash,
2821};
2822
2823/*
2824 * debugfs support
2825 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002826static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2827 loff_t *ppos)
2828{
2829 loff_t pos = *ppos;
Al Viro496ad9a2013-01-23 17:07:38 -05002830 loff_t avail = file_inode(file)->i_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002831 unsigned int mem = (uintptr_t)file->private_data & 3;
2832 struct adapter *adap = file->private_data - mem;
2833
2834 if (pos < 0)
2835 return -EINVAL;
2836 if (pos >= avail)
2837 return 0;
2838 if (count > avail - pos)
2839 count = avail - pos;
2840
2841 while (count) {
2842 size_t len;
2843 int ret, ofst;
2844 __be32 data[16];
2845
2846 if (mem == MEM_MC)
2847 ret = t4_mc_read(adap, pos, data, NULL);
2848 else
2849 ret = t4_edc_read(adap, mem, pos, data, NULL);
2850 if (ret)
2851 return ret;
2852
2853 ofst = pos % sizeof(data);
2854 len = min(count, sizeof(data) - ofst);
2855 if (copy_to_user(buf, (u8 *)data + ofst, len))
2856 return -EFAULT;
2857
2858 buf += len;
2859 pos += len;
2860 count -= len;
2861 }
2862 count = pos - *ppos;
2863 *ppos = pos;
2864 return count;
2865}
2866
2867static const struct file_operations mem_debugfs_fops = {
2868 .owner = THIS_MODULE,
Stephen Boyd234e3402012-04-05 14:25:11 -07002869 .open = simple_open,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002870 .read = mem_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02002871 .llseek = default_llseek,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002872};
2873
Bill Pemberton91744942012-12-03 09:23:02 -05002874static void add_debugfs_mem(struct adapter *adap, const char *name,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002875 unsigned int idx, unsigned int size_mb)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002876{
2877 struct dentry *de;
2878
2879 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2880 (void *)adap + idx, &mem_debugfs_fops);
2881 if (de && de->d_inode)
2882 de->d_inode->i_size = size_mb << 20;
2883}
2884
Bill Pemberton91744942012-12-03 09:23:02 -05002885static int setup_debugfs(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002886{
2887 int i;
2888
2889 if (IS_ERR_OR_NULL(adap->debugfs_root))
2890 return -1;
2891
2892 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2893 if (i & EDRAM0_ENABLE)
2894 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2895 if (i & EDRAM1_ENABLE)
2896 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2897 if (i & EXT_MEM_ENABLE)
2898 add_debugfs_mem(adap, "mc", MEM_MC,
2899 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2900 if (adap->l2t)
2901 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2902 &t4_l2t_fops);
2903 return 0;
2904}
2905
2906/*
2907 * upper-layer driver support
2908 */
2909
2910/*
2911 * Allocate an active-open TID and set it to the supplied value.
2912 */
2913int cxgb4_alloc_atid(struct tid_info *t, void *data)
2914{
2915 int atid = -1;
2916
2917 spin_lock_bh(&t->atid_lock);
2918 if (t->afree) {
2919 union aopen_entry *p = t->afree;
2920
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002921 atid = (p - t->atid_tab) + t->atid_base;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002922 t->afree = p->next;
2923 p->data = data;
2924 t->atids_in_use++;
2925 }
2926 spin_unlock_bh(&t->atid_lock);
2927 return atid;
2928}
2929EXPORT_SYMBOL(cxgb4_alloc_atid);
2930
2931/*
2932 * Release an active-open TID.
2933 */
2934void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2935{
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002936 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002937
2938 spin_lock_bh(&t->atid_lock);
2939 p->next = t->afree;
2940 t->afree = p;
2941 t->atids_in_use--;
2942 spin_unlock_bh(&t->atid_lock);
2943}
2944EXPORT_SYMBOL(cxgb4_free_atid);
2945
2946/*
2947 * Allocate a server TID and set it to the supplied value.
2948 */
2949int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2950{
2951 int stid;
2952
2953 spin_lock_bh(&t->stid_lock);
2954 if (family == PF_INET) {
2955 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2956 if (stid < t->nstids)
2957 __set_bit(stid, t->stid_bmap);
2958 else
2959 stid = -1;
2960 } else {
2961 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2962 if (stid < 0)
2963 stid = -1;
2964 }
2965 if (stid >= 0) {
2966 t->stid_tab[stid].data = data;
2967 stid += t->stid_base;
2968 t->stids_in_use++;
2969 }
2970 spin_unlock_bh(&t->stid_lock);
2971 return stid;
2972}
2973EXPORT_SYMBOL(cxgb4_alloc_stid);
2974
Vipul Pandyadca4fae2012-12-10 09:30:53 +00002975/* Allocate a server filter TID and set it to the supplied value.
2976 */
2977int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2978{
2979 int stid;
2980
2981 spin_lock_bh(&t->stid_lock);
2982 if (family == PF_INET) {
2983 stid = find_next_zero_bit(t->stid_bmap,
2984 t->nstids + t->nsftids, t->nstids);
2985 if (stid < (t->nstids + t->nsftids))
2986 __set_bit(stid, t->stid_bmap);
2987 else
2988 stid = -1;
2989 } else {
2990 stid = -1;
2991 }
2992 if (stid >= 0) {
2993 t->stid_tab[stid].data = data;
2994 stid += t->stid_base;
2995 t->stids_in_use++;
2996 }
2997 spin_unlock_bh(&t->stid_lock);
2998 return stid;
2999}
3000EXPORT_SYMBOL(cxgb4_alloc_sftid);
3001
3002/* Release a server TID.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003003 */
3004void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3005{
3006 stid -= t->stid_base;
3007 spin_lock_bh(&t->stid_lock);
3008 if (family == PF_INET)
3009 __clear_bit(stid, t->stid_bmap);
3010 else
3011 bitmap_release_region(t->stid_bmap, stid, 2);
3012 t->stid_tab[stid].data = NULL;
3013 t->stids_in_use--;
3014 spin_unlock_bh(&t->stid_lock);
3015}
3016EXPORT_SYMBOL(cxgb4_free_stid);
3017
3018/*
3019 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3020 */
3021static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3022 unsigned int tid)
3023{
3024 struct cpl_tid_release *req;
3025
3026 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3027 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3028 INIT_TP_WR(req, tid);
3029 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3030}
3031
3032/*
3033 * Queue a TID release request and if necessary schedule a work queue to
3034 * process it.
3035 */
stephen hemminger31b9c192010-10-18 05:39:18 +00003036static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3037 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003038{
3039 void **p = &t->tid_tab[tid];
3040 struct adapter *adap = container_of(t, struct adapter, tids);
3041
3042 spin_lock_bh(&adap->tid_release_lock);
3043 *p = adap->tid_release_head;
3044 /* Low 2 bits encode the Tx channel number */
3045 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3046 if (!adap->tid_release_task_busy) {
3047 adap->tid_release_task_busy = true;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303048 queue_work(workq, &adap->tid_release_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003049 }
3050 spin_unlock_bh(&adap->tid_release_lock);
3051}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003052
3053/*
3054 * Process the list of pending TID release requests.
3055 */
3056static void process_tid_release_list(struct work_struct *work)
3057{
3058 struct sk_buff *skb;
3059 struct adapter *adap;
3060
3061 adap = container_of(work, struct adapter, tid_release_task);
3062
3063 spin_lock_bh(&adap->tid_release_lock);
3064 while (adap->tid_release_head) {
3065 void **p = adap->tid_release_head;
3066 unsigned int chan = (uintptr_t)p & 3;
3067 p = (void *)p - chan;
3068
3069 adap->tid_release_head = *p;
3070 *p = NULL;
3071 spin_unlock_bh(&adap->tid_release_lock);
3072
3073 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3074 GFP_KERNEL)))
3075 schedule_timeout_uninterruptible(1);
3076
3077 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3078 t4_ofld_send(adap, skb);
3079 spin_lock_bh(&adap->tid_release_lock);
3080 }
3081 adap->tid_release_task_busy = false;
3082 spin_unlock_bh(&adap->tid_release_lock);
3083}
3084
3085/*
3086 * Release a TID and inform HW. If we are unable to allocate the release
3087 * message we defer to a work queue.
3088 */
3089void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3090{
3091 void *old;
3092 struct sk_buff *skb;
3093 struct adapter *adap = container_of(t, struct adapter, tids);
3094
3095 old = t->tid_tab[tid];
3096 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3097 if (likely(skb)) {
3098 t->tid_tab[tid] = NULL;
3099 mk_tid_release(skb, chan, tid);
3100 t4_ofld_send(adap, skb);
3101 } else
3102 cxgb4_queue_tid_release(t, chan, tid);
3103 if (old)
3104 atomic_dec(&t->tids_in_use);
3105}
3106EXPORT_SYMBOL(cxgb4_remove_tid);
3107
3108/*
3109 * Allocate and initialize the TID tables. Returns 0 on success.
3110 */
3111static int tid_init(struct tid_info *t)
3112{
3113 size_t size;
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003114 unsigned int stid_bmap_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003115 unsigned int natids = t->natids;
3116
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003117 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003118 size = t->ntids * sizeof(*t->tid_tab) +
3119 natids * sizeof(*t->atid_tab) +
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003120 t->nstids * sizeof(*t->stid_tab) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003121 t->nsftids * sizeof(*t->stid_tab) +
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003122 stid_bmap_size * sizeof(long) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003123 t->nftids * sizeof(*t->ftid_tab) +
3124 t->nsftids * sizeof(*t->ftid_tab);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003125
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003126 t->tid_tab = t4_alloc_mem(size);
3127 if (!t->tid_tab)
3128 return -ENOMEM;
3129
3130 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3131 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003132 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003133 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003134 spin_lock_init(&t->stid_lock);
3135 spin_lock_init(&t->atid_lock);
3136
3137 t->stids_in_use = 0;
3138 t->afree = NULL;
3139 t->atids_in_use = 0;
3140 atomic_set(&t->tids_in_use, 0);
3141
3142 /* Setup the free list for atid_tab and clear the stid bitmap. */
3143 if (natids) {
3144 while (--natids)
3145 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3146 t->afree = t->atid_tab;
3147 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003148 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003149 return 0;
3150}
3151
3152/**
3153 * cxgb4_create_server - create an IP server
3154 * @dev: the device
3155 * @stid: the server TID
3156 * @sip: local IP address to bind server to
3157 * @sport: the server's TCP port
3158 * @queue: queue to direct messages from this server to
3159 *
3160 * Create an IP server for the given port and address.
3161 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3162 */
3163int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00003164 __be32 sip, __be16 sport, __be16 vlan,
3165 unsigned int queue)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003166{
3167 unsigned int chan;
3168 struct sk_buff *skb;
3169 struct adapter *adap;
3170 struct cpl_pass_open_req *req;
3171
3172 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3173 if (!skb)
3174 return -ENOMEM;
3175
3176 adap = netdev2adap(dev);
3177 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3178 INIT_TP_WR(req, 0);
3179 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3180 req->local_port = sport;
3181 req->peer_port = htons(0);
3182 req->local_ip = sip;
3183 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00003184 chan = rxq_to_chan(&adap->sge, queue);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003185 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3186 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3187 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3188 return t4_mgmt_tx(adap, skb);
3189}
3190EXPORT_SYMBOL(cxgb4_create_server);
3191
3192/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003193 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3194 * @mtus: the HW MTU table
3195 * @mtu: the target MTU
3196 * @idx: index of selected entry in the MTU table
3197 *
3198 * Returns the index and the value in the HW MTU table that is closest to
3199 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3200 * table, in which case that smallest available value is selected.
3201 */
3202unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3203 unsigned int *idx)
3204{
3205 unsigned int i = 0;
3206
3207 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3208 ++i;
3209 if (idx)
3210 *idx = i;
3211 return mtus[i];
3212}
3213EXPORT_SYMBOL(cxgb4_best_mtu);
3214
3215/**
3216 * cxgb4_port_chan - get the HW channel of a port
3217 * @dev: the net device for the port
3218 *
3219 * Return the HW Tx channel of the given port.
3220 */
3221unsigned int cxgb4_port_chan(const struct net_device *dev)
3222{
3223 return netdev2pinfo(dev)->tx_chan;
3224}
3225EXPORT_SYMBOL(cxgb4_port_chan);
3226
Vipul Pandya881806b2012-05-18 15:29:24 +05303227unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3228{
3229 struct adapter *adap = netdev2adap(dev);
3230 u32 v;
3231
3232 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3233 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
3234}
3235EXPORT_SYMBOL(cxgb4_dbfifo_count);
3236
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003237/**
3238 * cxgb4_port_viid - get the VI id of a port
3239 * @dev: the net device for the port
3240 *
3241 * Return the VI id of the given port.
3242 */
3243unsigned int cxgb4_port_viid(const struct net_device *dev)
3244{
3245 return netdev2pinfo(dev)->viid;
3246}
3247EXPORT_SYMBOL(cxgb4_port_viid);
3248
3249/**
3250 * cxgb4_port_idx - get the index of a port
3251 * @dev: the net device for the port
3252 *
3253 * Return the index of the given port.
3254 */
3255unsigned int cxgb4_port_idx(const struct net_device *dev)
3256{
3257 return netdev2pinfo(dev)->port_id;
3258}
3259EXPORT_SYMBOL(cxgb4_port_idx);
3260
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003261void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3262 struct tp_tcp_stats *v6)
3263{
3264 struct adapter *adap = pci_get_drvdata(pdev);
3265
3266 spin_lock(&adap->stats_lock);
3267 t4_tp_get_tcp_stats(adap, v4, v6);
3268 spin_unlock(&adap->stats_lock);
3269}
3270EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3271
3272void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3273 const unsigned int *pgsz_order)
3274{
3275 struct adapter *adap = netdev2adap(dev);
3276
3277 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3278 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3279 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3280 HPZ3(pgsz_order[3]));
3281}
3282EXPORT_SYMBOL(cxgb4_iscsi_init);
3283
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303284int cxgb4_flush_eq_cache(struct net_device *dev)
3285{
3286 struct adapter *adap = netdev2adap(dev);
3287 int ret;
3288
3289 ret = t4_fwaddrspace_write(adap, adap->mbox,
3290 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3291 return ret;
3292}
3293EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3294
3295static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3296{
3297 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3298 __be64 indices;
3299 int ret;
3300
3301 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3302 if (!ret) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00003303 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3304 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303305 }
3306 return ret;
3307}
3308
3309int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3310 u16 size)
3311{
3312 struct adapter *adap = netdev2adap(dev);
3313 u16 hw_pidx, hw_cidx;
3314 int ret;
3315
3316 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3317 if (ret)
3318 goto out;
3319
3320 if (pidx != hw_pidx) {
3321 u16 delta;
3322
3323 if (pidx >= hw_pidx)
3324 delta = pidx - hw_pidx;
3325 else
3326 delta = size - hw_pidx + pidx;
3327 wmb();
Vipul Pandya840f3002012-09-05 02:01:55 +00003328 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3329 QID(qid) | PIDX(delta));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303330 }
3331out:
3332 return ret;
3333}
3334EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3335
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003336static struct pci_driver cxgb4_driver;
3337
3338static void check_neigh_update(struct neighbour *neigh)
3339{
3340 const struct device *parent;
3341 const struct net_device *netdev = neigh->dev;
3342
3343 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3344 netdev = vlan_dev_real_dev(netdev);
3345 parent = netdev->dev.parent;
3346 if (parent && parent->driver == &cxgb4_driver.driver)
3347 t4_l2t_update(dev_get_drvdata(parent), neigh);
3348}
3349
3350static int netevent_cb(struct notifier_block *nb, unsigned long event,
3351 void *data)
3352{
3353 switch (event) {
3354 case NETEVENT_NEIGH_UPDATE:
3355 check_neigh_update(data);
3356 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003357 case NETEVENT_REDIRECT:
3358 default:
3359 break;
3360 }
3361 return 0;
3362}
3363
3364static bool netevent_registered;
3365static struct notifier_block cxgb4_netevent_nb = {
3366 .notifier_call = netevent_cb
3367};
3368
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303369static void drain_db_fifo(struct adapter *adap, int usecs)
3370{
3371 u32 v;
3372
3373 do {
3374 set_current_state(TASK_UNINTERRUPTIBLE);
3375 schedule_timeout(usecs_to_jiffies(usecs));
3376 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3377 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
3378 break;
3379 } while (1);
3380}
3381
3382static void disable_txq_db(struct sge_txq *q)
3383{
3384 spin_lock_irq(&q->db_lock);
3385 q->db_disabled = 1;
3386 spin_unlock_irq(&q->db_lock);
3387}
3388
3389static void enable_txq_db(struct sge_txq *q)
3390{
3391 spin_lock_irq(&q->db_lock);
3392 q->db_disabled = 0;
3393 spin_unlock_irq(&q->db_lock);
3394}
3395
3396static void disable_dbs(struct adapter *adap)
3397{
3398 int i;
3399
3400 for_each_ethrxq(&adap->sge, i)
3401 disable_txq_db(&adap->sge.ethtxq[i].q);
3402 for_each_ofldrxq(&adap->sge, i)
3403 disable_txq_db(&adap->sge.ofldtxq[i].q);
3404 for_each_port(adap, i)
3405 disable_txq_db(&adap->sge.ctrlq[i].q);
3406}
3407
3408static void enable_dbs(struct adapter *adap)
3409{
3410 int i;
3411
3412 for_each_ethrxq(&adap->sge, i)
3413 enable_txq_db(&adap->sge.ethtxq[i].q);
3414 for_each_ofldrxq(&adap->sge, i)
3415 enable_txq_db(&adap->sge.ofldtxq[i].q);
3416 for_each_port(adap, i)
3417 enable_txq_db(&adap->sge.ctrlq[i].q);
3418}
3419
3420static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3421{
3422 u16 hw_pidx, hw_cidx;
3423 int ret;
3424
3425 spin_lock_bh(&q->db_lock);
3426 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3427 if (ret)
3428 goto out;
3429 if (q->db_pidx != hw_pidx) {
3430 u16 delta;
3431
3432 if (q->db_pidx >= hw_pidx)
3433 delta = q->db_pidx - hw_pidx;
3434 else
3435 delta = q->size - hw_pidx + q->db_pidx;
3436 wmb();
Vipul Pandya840f3002012-09-05 02:01:55 +00003437 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3438 QID(q->cntxt_id) | PIDX(delta));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303439 }
3440out:
3441 q->db_disabled = 0;
3442 spin_unlock_bh(&q->db_lock);
3443 if (ret)
3444 CH_WARN(adap, "DB drop recovery failed.\n");
3445}
3446static void recover_all_queues(struct adapter *adap)
3447{
3448 int i;
3449
3450 for_each_ethrxq(&adap->sge, i)
3451 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3452 for_each_ofldrxq(&adap->sge, i)
3453 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3454 for_each_port(adap, i)
3455 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3456}
3457
Vipul Pandya881806b2012-05-18 15:29:24 +05303458static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3459{
3460 mutex_lock(&uld_mutex);
3461 if (adap->uld_handle[CXGB4_ULD_RDMA])
3462 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3463 cmd);
3464 mutex_unlock(&uld_mutex);
3465}
3466
3467static void process_db_full(struct work_struct *work)
3468{
3469 struct adapter *adap;
Vipul Pandya881806b2012-05-18 15:29:24 +05303470
3471 adap = container_of(work, struct adapter, db_full_task);
3472
Vipul Pandya881806b2012-05-18 15:29:24 +05303473 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303474 drain_db_fifo(adap, dbfifo_drain_delay);
Vipul Pandya840f3002012-09-05 02:01:55 +00003475 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3476 DBFIFO_HP_INT | DBFIFO_LP_INT,
3477 DBFIFO_HP_INT | DBFIFO_LP_INT);
Vipul Pandya881806b2012-05-18 15:29:24 +05303478 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Vipul Pandya881806b2012-05-18 15:29:24 +05303479}
3480
3481static void process_db_drop(struct work_struct *work)
3482{
3483 struct adapter *adap;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303484
Vipul Pandya881806b2012-05-18 15:29:24 +05303485 adap = container_of(work, struct adapter, db_drop_task);
3486
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303487 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3488 disable_dbs(adap);
Vipul Pandya881806b2012-05-18 15:29:24 +05303489 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303490 drain_db_fifo(adap, 1);
3491 recover_all_queues(adap);
3492 enable_dbs(adap);
Vipul Pandya881806b2012-05-18 15:29:24 +05303493}
3494
3495void t4_db_full(struct adapter *adap)
3496{
Vipul Pandya840f3002012-09-05 02:01:55 +00003497 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3498 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303499 queue_work(workq, &adap->db_full_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303500}
3501
3502void t4_db_dropped(struct adapter *adap)
3503{
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303504 queue_work(workq, &adap->db_drop_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303505}
3506
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003507static void uld_attach(struct adapter *adap, unsigned int uld)
3508{
3509 void *handle;
3510 struct cxgb4_lld_info lli;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003511 unsigned short i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003512
3513 lli.pdev = adap->pdev;
3514 lli.l2t = adap->l2t;
3515 lli.tids = &adap->tids;
3516 lli.ports = adap->port;
3517 lli.vr = &adap->vres;
3518 lli.mtus = adap->params.mtus;
3519 if (uld == CXGB4_ULD_RDMA) {
3520 lli.rxq_ids = adap->sge.rdma_rxq;
3521 lli.nrxq = adap->sge.rdmaqs;
3522 } else if (uld == CXGB4_ULD_ISCSI) {
3523 lli.rxq_ids = adap->sge.ofld_rxq;
3524 lli.nrxq = adap->sge.ofldqsets;
3525 }
3526 lli.ntxq = adap->sge.ofldqsets;
3527 lli.nchan = adap->params.nports;
3528 lli.nports = adap->params.nports;
3529 lli.wr_cred = adap->params.ofldq_wr_cred;
3530 lli.adapter_type = adap->params.rev;
3531 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3532 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003533 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3534 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003535 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003536 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3537 (adap->fn * 4));
Vipul Pandya793dad92012-12-10 09:30:56 +00003538 lli.filt_mode = adap->filter_mode;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003539 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3540 for (i = 0; i < NCHAN; i++)
3541 lli.tx_modq[i] = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003542 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3543 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3544 lli.fw_vers = adap->params.fw_vers;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303545 lli.dbfifo_int_thresh = dbfifo_int_thresh;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003546 lli.sge_pktshift = adap->sge.pktshift;
3547 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003548
3549 handle = ulds[uld].add(&lli);
3550 if (IS_ERR(handle)) {
3551 dev_warn(adap->pdev_dev,
3552 "could not attach to the %s driver, error %ld\n",
3553 uld_str[uld], PTR_ERR(handle));
3554 return;
3555 }
3556
3557 adap->uld_handle[uld] = handle;
3558
3559 if (!netevent_registered) {
3560 register_netevent_notifier(&cxgb4_netevent_nb);
3561 netevent_registered = true;
3562 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00003563
3564 if (adap->flags & FULL_INIT_DONE)
3565 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003566}
3567
3568static void attach_ulds(struct adapter *adap)
3569{
3570 unsigned int i;
3571
3572 mutex_lock(&uld_mutex);
3573 list_add_tail(&adap->list_node, &adapter_list);
3574 for (i = 0; i < CXGB4_ULD_MAX; i++)
3575 if (ulds[i].add)
3576 uld_attach(adap, i);
3577 mutex_unlock(&uld_mutex);
3578}
3579
3580static void detach_ulds(struct adapter *adap)
3581{
3582 unsigned int i;
3583
3584 mutex_lock(&uld_mutex);
3585 list_del(&adap->list_node);
3586 for (i = 0; i < CXGB4_ULD_MAX; i++)
3587 if (adap->uld_handle[i]) {
3588 ulds[i].state_change(adap->uld_handle[i],
3589 CXGB4_STATE_DETACH);
3590 adap->uld_handle[i] = NULL;
3591 }
3592 if (netevent_registered && list_empty(&adapter_list)) {
3593 unregister_netevent_notifier(&cxgb4_netevent_nb);
3594 netevent_registered = false;
3595 }
3596 mutex_unlock(&uld_mutex);
3597}
3598
3599static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3600{
3601 unsigned int i;
3602
3603 mutex_lock(&uld_mutex);
3604 for (i = 0; i < CXGB4_ULD_MAX; i++)
3605 if (adap->uld_handle[i])
3606 ulds[i].state_change(adap->uld_handle[i], new_state);
3607 mutex_unlock(&uld_mutex);
3608}
3609
3610/**
3611 * cxgb4_register_uld - register an upper-layer driver
3612 * @type: the ULD type
3613 * @p: the ULD methods
3614 *
3615 * Registers an upper-layer driver with this driver and notifies the ULD
3616 * about any presently available devices that support its type. Returns
3617 * %-EBUSY if a ULD of the same type is already registered.
3618 */
3619int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3620{
3621 int ret = 0;
3622 struct adapter *adap;
3623
3624 if (type >= CXGB4_ULD_MAX)
3625 return -EINVAL;
3626 mutex_lock(&uld_mutex);
3627 if (ulds[type].add) {
3628 ret = -EBUSY;
3629 goto out;
3630 }
3631 ulds[type] = *p;
3632 list_for_each_entry(adap, &adapter_list, list_node)
3633 uld_attach(adap, type);
3634out: mutex_unlock(&uld_mutex);
3635 return ret;
3636}
3637EXPORT_SYMBOL(cxgb4_register_uld);
3638
3639/**
3640 * cxgb4_unregister_uld - unregister an upper-layer driver
3641 * @type: the ULD type
3642 *
3643 * Unregisters an existing upper-layer driver.
3644 */
3645int cxgb4_unregister_uld(enum cxgb4_uld type)
3646{
3647 struct adapter *adap;
3648
3649 if (type >= CXGB4_ULD_MAX)
3650 return -EINVAL;
3651 mutex_lock(&uld_mutex);
3652 list_for_each_entry(adap, &adapter_list, list_node)
3653 adap->uld_handle[type] = NULL;
3654 ulds[type].add = NULL;
3655 mutex_unlock(&uld_mutex);
3656 return 0;
3657}
3658EXPORT_SYMBOL(cxgb4_unregister_uld);
3659
3660/**
3661 * cxgb_up - enable the adapter
3662 * @adap: adapter being enabled
3663 *
3664 * Called when the first port is enabled, this function performs the
3665 * actions necessary to make an adapter operational, such as completing
3666 * the initialization of HW modules, and enabling interrupts.
3667 *
3668 * Must be called with the rtnl lock held.
3669 */
3670static int cxgb_up(struct adapter *adap)
3671{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003672 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003673
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003674 err = setup_sge_queues(adap);
3675 if (err)
3676 goto out;
3677 err = setup_rss(adap);
3678 if (err)
3679 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003680
3681 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003682 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003683 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3684 adap->msix_info[0].desc, adap);
3685 if (err)
3686 goto irq_err;
3687
3688 err = request_msix_queue_irqs(adap);
3689 if (err) {
3690 free_irq(adap->msix_info[0].vec, adap);
3691 goto irq_err;
3692 }
3693 } else {
3694 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3695 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003696 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003697 if (err)
3698 goto irq_err;
3699 }
3700 enable_rx(adap);
3701 t4_sge_start(adap);
3702 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003703 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003704 notify_ulds(adap, CXGB4_STATE_UP);
3705 out:
3706 return err;
3707 irq_err:
3708 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003709 freeq:
3710 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003711 goto out;
3712}
3713
3714static void cxgb_down(struct adapter *adapter)
3715{
3716 t4_intr_disable(adapter);
3717 cancel_work_sync(&adapter->tid_release_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303718 cancel_work_sync(&adapter->db_full_task);
3719 cancel_work_sync(&adapter->db_drop_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003720 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003721 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003722
3723 if (adapter->flags & USING_MSIX) {
3724 free_msix_queue_irqs(adapter);
3725 free_irq(adapter->msix_info[0].vec, adapter);
3726 } else
3727 free_irq(adapter->pdev->irq, adapter);
3728 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003729 t4_sge_stop(adapter);
3730 t4_free_sge_resources(adapter);
3731 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003732}
3733
3734/*
3735 * net_device operations
3736 */
3737static int cxgb_open(struct net_device *dev)
3738{
3739 int err;
3740 struct port_info *pi = netdev_priv(dev);
3741 struct adapter *adapter = pi->adapter;
3742
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00003743 netif_carrier_off(dev);
3744
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003745 if (!(adapter->flags & FULL_INIT_DONE)) {
3746 err = cxgb_up(adapter);
3747 if (err < 0)
3748 return err;
3749 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003750
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00003751 err = link_start(dev);
3752 if (!err)
3753 netif_tx_start_all_queues(dev);
3754 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003755}
3756
3757static int cxgb_close(struct net_device *dev)
3758{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003759 struct port_info *pi = netdev_priv(dev);
3760 struct adapter *adapter = pi->adapter;
3761
3762 netif_tx_stop_all_queues(dev);
3763 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003764 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003765}
3766
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003767/* Return an error number if the indicated filter isn't writable ...
3768 */
3769static int writable_filter(struct filter_entry *f)
3770{
3771 if (f->locked)
3772 return -EPERM;
3773 if (f->pending)
3774 return -EBUSY;
3775
3776 return 0;
3777}
3778
3779/* Delete the filter at the specified index (if valid). The checks for all
3780 * the common problems with doing this like the filter being locked, currently
3781 * pending in another operation, etc.
3782 */
3783static int delete_filter(struct adapter *adapter, unsigned int fidx)
3784{
3785 struct filter_entry *f;
3786 int ret;
3787
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003788 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003789 return -EINVAL;
3790
3791 f = &adapter->tids.ftid_tab[fidx];
3792 ret = writable_filter(f);
3793 if (ret)
3794 return ret;
3795 if (f->valid)
3796 return del_filter_wr(adapter, fidx);
3797
3798 return 0;
3799}
3800
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003801int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00003802 __be32 sip, __be16 sport, __be16 vlan,
3803 unsigned int queue, unsigned char port, unsigned char mask)
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003804{
3805 int ret;
3806 struct filter_entry *f;
3807 struct adapter *adap;
3808 int i;
3809 u8 *val;
3810
3811 adap = netdev2adap(dev);
3812
Vipul Pandya1cab7752012-12-10 09:30:55 +00003813 /* Adjust stid to correct filter index */
3814 stid -= adap->tids.nstids;
3815 stid += adap->tids.nftids;
3816
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003817 /* Check to make sure the filter requested is writable ...
3818 */
3819 f = &adap->tids.ftid_tab[stid];
3820 ret = writable_filter(f);
3821 if (ret)
3822 return ret;
3823
3824 /* Clear out any old resources being used by the filter before
3825 * we start constructing the new filter.
3826 */
3827 if (f->valid)
3828 clear_filter(adap, f);
3829
3830 /* Clear out filter specifications */
3831 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3832 f->fs.val.lport = cpu_to_be16(sport);
3833 f->fs.mask.lport = ~0;
3834 val = (u8 *)&sip;
Vipul Pandya793dad92012-12-10 09:30:56 +00003835 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003836 for (i = 0; i < 4; i++) {
3837 f->fs.val.lip[i] = val[i];
3838 f->fs.mask.lip[i] = ~0;
3839 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003840 if (adap->filter_mode & F_PORT) {
3841 f->fs.val.iport = port;
3842 f->fs.mask.iport = mask;
3843 }
3844 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003845
3846 f->fs.dirsteer = 1;
3847 f->fs.iq = queue;
3848 /* Mark filter as locked */
3849 f->locked = 1;
3850 f->fs.rpttid = 1;
3851
3852 ret = set_filter_wr(adap, stid);
3853 if (ret) {
3854 clear_filter(adap, f);
3855 return ret;
3856 }
3857
3858 return 0;
3859}
3860EXPORT_SYMBOL(cxgb4_create_server_filter);
3861
3862int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3863 unsigned int queue, bool ipv6)
3864{
3865 int ret;
3866 struct filter_entry *f;
3867 struct adapter *adap;
3868
3869 adap = netdev2adap(dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003870
3871 /* Adjust stid to correct filter index */
3872 stid -= adap->tids.nstids;
3873 stid += adap->tids.nftids;
3874
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003875 f = &adap->tids.ftid_tab[stid];
3876 /* Unlock the filter */
3877 f->locked = 0;
3878
3879 ret = delete_filter(adap, stid);
3880 if (ret)
3881 return ret;
3882
3883 return 0;
3884}
3885EXPORT_SYMBOL(cxgb4_remove_server_filter);
3886
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00003887static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
3888 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003889{
3890 struct port_stats stats;
3891 struct port_info *p = netdev_priv(dev);
3892 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003893
3894 spin_lock(&adapter->stats_lock);
3895 t4_get_port_stats(adapter, p->tx_chan, &stats);
3896 spin_unlock(&adapter->stats_lock);
3897
3898 ns->tx_bytes = stats.tx_octets;
3899 ns->tx_packets = stats.tx_frames;
3900 ns->rx_bytes = stats.rx_octets;
3901 ns->rx_packets = stats.rx_frames;
3902 ns->multicast = stats.rx_mcast_frames;
3903
3904 /* detailed rx_errors */
3905 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3906 stats.rx_runt;
3907 ns->rx_over_errors = 0;
3908 ns->rx_crc_errors = stats.rx_fcs_err;
3909 ns->rx_frame_errors = stats.rx_symbol_err;
3910 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
3911 stats.rx_ovflow2 + stats.rx_ovflow3 +
3912 stats.rx_trunc0 + stats.rx_trunc1 +
3913 stats.rx_trunc2 + stats.rx_trunc3;
3914 ns->rx_missed_errors = 0;
3915
3916 /* detailed tx_errors */
3917 ns->tx_aborted_errors = 0;
3918 ns->tx_carrier_errors = 0;
3919 ns->tx_fifo_errors = 0;
3920 ns->tx_heartbeat_errors = 0;
3921 ns->tx_window_errors = 0;
3922
3923 ns->tx_errors = stats.tx_error_frames;
3924 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3925 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3926 return ns;
3927}
3928
3929static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3930{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003931 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003932 int ret = 0, prtad, devad;
3933 struct port_info *pi = netdev_priv(dev);
3934 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3935
3936 switch (cmd) {
3937 case SIOCGMIIPHY:
3938 if (pi->mdio_addr < 0)
3939 return -EOPNOTSUPP;
3940 data->phy_id = pi->mdio_addr;
3941 break;
3942 case SIOCGMIIREG:
3943 case SIOCSMIIREG:
3944 if (mdio_phy_id_is_c45(data->phy_id)) {
3945 prtad = mdio_phy_id_prtad(data->phy_id);
3946 devad = mdio_phy_id_devad(data->phy_id);
3947 } else if (data->phy_id < 32) {
3948 prtad = data->phy_id;
3949 devad = 0;
3950 data->reg_num &= 0x1f;
3951 } else
3952 return -EINVAL;
3953
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003954 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003955 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003956 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003957 data->reg_num, &data->val_out);
3958 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003959 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003960 data->reg_num, data->val_in);
3961 break;
3962 default:
3963 return -EOPNOTSUPP;
3964 }
3965 return ret;
3966}
3967
3968static void cxgb_set_rxmode(struct net_device *dev)
3969{
3970 /* unfortunately we can't return errors to the stack */
3971 set_rxmode(dev, -1, false);
3972}
3973
3974static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3975{
3976 int ret;
3977 struct port_info *pi = netdev_priv(dev);
3978
3979 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
3980 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003981 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
3982 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003983 if (!ret)
3984 dev->mtu = new_mtu;
3985 return ret;
3986}
3987
3988static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3989{
3990 int ret;
3991 struct sockaddr *addr = p;
3992 struct port_info *pi = netdev_priv(dev);
3993
3994 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00003995 return -EADDRNOTAVAIL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003996
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003997 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
3998 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003999 if (ret < 0)
4000 return ret;
4001
4002 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4003 pi->xact_addr_filt = ret;
4004 return 0;
4005}
4006
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004007#ifdef CONFIG_NET_POLL_CONTROLLER
4008static void cxgb_netpoll(struct net_device *dev)
4009{
4010 struct port_info *pi = netdev_priv(dev);
4011 struct adapter *adap = pi->adapter;
4012
4013 if (adap->flags & USING_MSIX) {
4014 int i;
4015 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4016
4017 for (i = pi->nqsets; i; i--, rx++)
4018 t4_sge_intr_msix(0, &rx->rspq);
4019 } else
4020 t4_intr_handler(adap)(0, adap);
4021}
4022#endif
4023
4024static const struct net_device_ops cxgb4_netdev_ops = {
4025 .ndo_open = cxgb_open,
4026 .ndo_stop = cxgb_close,
4027 .ndo_start_xmit = t4_eth_xmit,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00004028 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004029 .ndo_set_rx_mode = cxgb_set_rxmode,
4030 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00004031 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004032 .ndo_validate_addr = eth_validate_addr,
4033 .ndo_do_ioctl = cxgb_ioctl,
4034 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004035#ifdef CONFIG_NET_POLL_CONTROLLER
4036 .ndo_poll_controller = cxgb_netpoll,
4037#endif
4038};
4039
4040void t4_fatal_err(struct adapter *adap)
4041{
4042 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4043 t4_intr_disable(adap);
4044 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4045}
4046
4047static void setup_memwin(struct adapter *adap)
4048{
4049 u32 bar0;
4050
4051 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4052 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4053 (bar0 + MEMWIN0_BASE) | BIR(0) |
4054 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4055 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4056 (bar0 + MEMWIN1_BASE) | BIR(0) |
4057 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4058 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4059 (bar0 + MEMWIN2_BASE) | BIR(0) |
4060 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004061}
4062
4063static void setup_memwin_rdma(struct adapter *adap)
4064{
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004065 if (adap->vres.ocq.size) {
4066 unsigned int start, sz_kb;
4067
4068 start = pci_resource_start(adap->pdev, 2) +
4069 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4070 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4071 t4_write_reg(adap,
4072 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4073 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4074 t4_write_reg(adap,
4075 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4076 adap->vres.ocq.start);
4077 t4_read_reg(adap,
4078 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4079 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004080}
4081
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004082static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4083{
4084 u32 v;
4085 int ret;
4086
4087 /* get device capabilities */
4088 memset(c, 0, sizeof(*c));
4089 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4090 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304091 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004092 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004093 if (ret < 0)
4094 return ret;
4095
4096 /* select capabilities we'll be using */
4097 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4098 if (!vf_acls)
4099 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4100 else
4101 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4102 } else if (vf_acls) {
4103 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4104 return ret;
4105 }
4106 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4107 FW_CMD_REQUEST | FW_CMD_WRITE);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004108 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004109 if (ret < 0)
4110 return ret;
4111
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004112 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004113 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4114 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4115 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4116 if (ret < 0)
4117 return ret;
4118
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004119 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4120 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004121 if (ret < 0)
4122 return ret;
4123
4124 t4_sge_init(adap);
4125
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004126 /* tweak some settings */
4127 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4128 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4129 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4130 v = t4_read_reg(adap, TP_PIO_DATA);
4131 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004132
Vipul Pandyadca4fae2012-12-10 09:30:53 +00004133 /* first 4 Tx modulation queues point to consecutive Tx channels */
4134 adap->params.tp.tx_modq_map = 0xE4;
4135 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4136 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4137
4138 /* associate each Tx modulation queue with consecutive Tx channels */
4139 v = 0x84218421;
4140 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4141 &v, 1, A_TP_TX_SCHED_HDR);
4142 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4143 &v, 1, A_TP_TX_SCHED_FIFO);
4144 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4145 &v, 1, A_TP_TX_SCHED_PCMD);
4146
4147#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4148 if (is_offload(adap)) {
4149 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4150 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4151 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4152 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4153 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4154 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4155 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4156 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4157 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4158 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4159 }
4160
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004161 /* get basic stuff going */
4162 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00004163}
4164
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004165/*
4166 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4167 */
4168#define MAX_ATIDS 8192U
4169
4170/*
4171 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Vipul Pandya636f9d32012-09-26 02:39:39 +00004172 *
4173 * If the firmware we're dealing with has Configuration File support, then
4174 * we use that to perform all configuration
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004175 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00004176
4177/*
4178 * Tweak configuration based on module parameters, etc. Most of these have
4179 * defaults assigned to them by Firmware Configuration Files (if we're using
4180 * them) but need to be explicitly set if we're using hard-coded
4181 * initialization. But even in the case of using Firmware Configuration
4182 * Files, we'd like to expose the ability to change these via module
4183 * parameters so these are essentially common tweaks/settings for
4184 * Configuration Files and hard-coded initialization ...
4185 */
4186static int adap_init0_tweaks(struct adapter *adapter)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004187{
Vipul Pandya636f9d32012-09-26 02:39:39 +00004188 /*
4189 * Fix up various Host-Dependent Parameters like Page Size, Cache
4190 * Line Size, etc. The firmware default is for a 4KB Page Size and
4191 * 64B Cache Line Size ...
4192 */
4193 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004194
Vipul Pandya636f9d32012-09-26 02:39:39 +00004195 /*
4196 * Process module parameters which affect early initialization.
4197 */
4198 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4199 dev_err(&adapter->pdev->dev,
4200 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4201 rx_dma_offset);
4202 rx_dma_offset = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004203 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004204 t4_set_reg_field(adapter, SGE_CONTROL,
4205 PKTSHIFT_MASK,
4206 PKTSHIFT(rx_dma_offset));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004207
Vipul Pandya636f9d32012-09-26 02:39:39 +00004208 /*
4209 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4210 * adds the pseudo header itself.
4211 */
4212 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4213 CSUM_HAS_PSEUDO_HDR, 0);
4214
4215 return 0;
4216}
4217
4218/*
4219 * Attempt to initialize the adapter via a Firmware Configuration File.
4220 */
4221static int adap_init0_config(struct adapter *adapter, int reset)
4222{
4223 struct fw_caps_config_cmd caps_cmd;
4224 const struct firmware *cf;
4225 unsigned long mtype = 0, maddr = 0;
4226 u32 finiver, finicsum, cfcsum;
4227 int ret, using_flash;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004228 char *fw_config_file, fw_config_file_path[256];
Vipul Pandya636f9d32012-09-26 02:39:39 +00004229
4230 /*
4231 * Reset device if necessary.
4232 */
4233 if (reset) {
4234 ret = t4_fw_reset(adapter, adapter->mbox,
4235 PIORSTMODE | PIORST);
4236 if (ret < 0)
4237 goto bye;
4238 }
4239
4240 /*
4241 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4242 * then use that. Otherwise, use the configuration file stored
4243 * in the adapter flash ...
4244 */
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004245 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4246 case CHELSIO_T4:
4247 fw_config_file = FW_CFNAME;
4248 break;
4249 case CHELSIO_T5:
4250 fw_config_file = FW5_CFNAME;
4251 break;
4252 default:
4253 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4254 adapter->pdev->device);
4255 ret = -EINVAL;
4256 goto bye;
4257 }
4258
4259 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004260 if (ret < 0) {
Vipul Pandya636f9d32012-09-26 02:39:39 +00004261 using_flash = 1;
4262 mtype = FW_MEMTYPE_CF_FLASH;
4263 maddr = t4_flash_cfg_addr(adapter);
4264 } else {
4265 u32 params[7], val[7];
4266
4267 using_flash = 0;
4268 if (cf->size >= FLASH_CFG_MAX_SIZE)
4269 ret = -ENOMEM;
4270 else {
4271 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4272 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4273 ret = t4_query_params(adapter, adapter->mbox,
4274 adapter->fn, 0, 1, params, val);
4275 if (ret == 0) {
4276 /*
4277 * For t4_memory_write() below addresses and
4278 * sizes have to be in terms of multiples of 4
4279 * bytes. So, if the Configuration File isn't
4280 * a multiple of 4 bytes in length we'll have
4281 * to write that out separately since we can't
4282 * guarantee that the bytes following the
4283 * residual byte in the buffer returned by
4284 * request_firmware() are zeroed out ...
4285 */
4286 size_t resid = cf->size & 0x3;
4287 size_t size = cf->size & ~0x3;
4288 __be32 *data = (__be32 *)cf->data;
4289
4290 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4291 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4292
4293 ret = t4_memory_write(adapter, mtype, maddr,
4294 size, data);
4295 if (ret == 0 && resid != 0) {
4296 union {
4297 __be32 word;
4298 char buf[4];
4299 } last;
4300 int i;
4301
4302 last.word = data[size >> 2];
4303 for (i = resid; i < 4; i++)
4304 last.buf[i] = 0;
4305 ret = t4_memory_write(adapter, mtype,
4306 maddr + size,
4307 4, &last.word);
4308 }
4309 }
4310 }
4311
4312 release_firmware(cf);
4313 if (ret)
4314 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004315 }
4316
Vipul Pandya636f9d32012-09-26 02:39:39 +00004317 /*
4318 * Issue a Capability Configuration command to the firmware to get it
4319 * to parse the Configuration File. We don't use t4_fw_config_file()
4320 * because we want the ability to modify various features after we've
4321 * processed the configuration file ...
4322 */
4323 memset(&caps_cmd, 0, sizeof(caps_cmd));
4324 caps_cmd.op_to_write =
4325 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4326 FW_CMD_REQUEST |
4327 FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304328 caps_cmd.cfvalid_to_len16 =
Vipul Pandya636f9d32012-09-26 02:39:39 +00004329 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4330 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4331 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4332 FW_LEN16(caps_cmd));
4333 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4334 &caps_cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004335 if (ret < 0)
4336 goto bye;
4337
Vipul Pandya636f9d32012-09-26 02:39:39 +00004338 finiver = ntohl(caps_cmd.finiver);
4339 finicsum = ntohl(caps_cmd.finicsum);
4340 cfcsum = ntohl(caps_cmd.cfcsum);
4341 if (finicsum != cfcsum)
4342 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4343 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4344 finicsum, cfcsum);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004345
Vipul Pandya636f9d32012-09-26 02:39:39 +00004346 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00004347 * And now tell the firmware to use the configuration we just loaded.
4348 */
4349 caps_cmd.op_to_write =
4350 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4351 FW_CMD_REQUEST |
4352 FW_CMD_WRITE);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304353 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004354 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4355 NULL);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004356 if (ret < 0)
4357 goto bye;
4358
Vipul Pandya636f9d32012-09-26 02:39:39 +00004359 /*
4360 * Tweak configuration based on system architecture, module
4361 * parameters, etc.
4362 */
4363 ret = adap_init0_tweaks(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004364 if (ret < 0)
4365 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004366
Vipul Pandya636f9d32012-09-26 02:39:39 +00004367 /*
4368 * And finally tell the firmware to initialize itself using the
4369 * parameters from the Configuration File.
4370 */
4371 ret = t4_fw_initialize(adapter, adapter->mbox);
4372 if (ret < 0)
4373 goto bye;
4374
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004375 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004376 /*
4377 * Return successfully and note that we're operating with parameters
4378 * not supplied by the driver, rather than from hard-wired
4379 * initialization constants burried in the driver.
4380 */
4381 adapter->flags |= USING_SOFT_PARAMS;
4382 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4383 "Configuration File %s, version %#x, computed checksum %#x\n",
4384 (using_flash
4385 ? "in device FLASH"
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004386 : fw_config_file_path),
Vipul Pandya636f9d32012-09-26 02:39:39 +00004387 finiver, cfcsum);
4388 return 0;
4389
4390 /*
4391 * Something bad happened. Return the error ... (If the "error"
4392 * is that there's no Configuration File on the adapter we don't
4393 * want to issue a warning since this is fairly common.)
4394 */
4395bye:
4396 if (ret != -ENOENT)
4397 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4398 -ret);
4399 return ret;
4400}
4401
4402/*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004403 * Attempt to initialize the adapter via hard-coded, driver supplied
4404 * parameters ...
4405 */
4406static int adap_init0_no_config(struct adapter *adapter, int reset)
4407{
4408 struct sge *s = &adapter->sge;
4409 struct fw_caps_config_cmd caps_cmd;
4410 u32 v;
4411 int i, ret;
4412
4413 /*
4414 * Reset device if necessary
4415 */
4416 if (reset) {
4417 ret = t4_fw_reset(adapter, adapter->mbox,
4418 PIORSTMODE | PIORST);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004419 if (ret < 0)
4420 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004421 }
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004422
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004423 /*
4424 * Get device capabilities and select which we'll be using.
4425 */
4426 memset(&caps_cmd, 0, sizeof(caps_cmd));
4427 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4428 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304429 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004430 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4431 &caps_cmd);
4432 if (ret < 0)
4433 goto bye;
4434
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004435 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4436 if (!vf_acls)
4437 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4438 else
4439 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4440 } else if (vf_acls) {
4441 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4442 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004443 }
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004444 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4445 FW_CMD_REQUEST | FW_CMD_WRITE);
4446 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4447 NULL);
4448 if (ret < 0)
4449 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004450
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004451 /*
4452 * Tweak configuration based on system architecture, module
4453 * parameters, etc.
4454 */
4455 ret = adap_init0_tweaks(adapter);
4456 if (ret < 0)
4457 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004458
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004459 /*
4460 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4461 * mode which maps each Virtual Interface to its own section of
4462 * the RSS Table and we turn on all map and hash enables ...
4463 */
4464 adapter->flags |= RSS_TNLALLLOOKUP;
4465 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4466 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4467 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4468 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4469 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4470 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4471 if (ret < 0)
4472 goto bye;
4473
4474 /*
4475 * Set up our own fundamental resource provisioning ...
4476 */
4477 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4478 PFRES_NEQ, PFRES_NETHCTRL,
4479 PFRES_NIQFLINT, PFRES_NIQ,
4480 PFRES_TC, PFRES_NVI,
4481 FW_PFVF_CMD_CMASK_MASK,
4482 pfvfres_pmask(adapter, adapter->fn, 0),
4483 PFRES_NEXACTF,
4484 PFRES_R_CAPS, PFRES_WX_CAPS);
4485 if (ret < 0)
4486 goto bye;
4487
4488 /*
4489 * Perform low level SGE initialization. We need to do this before we
4490 * send the firmware the INITIALIZE command because that will cause
4491 * any other PF Drivers which are waiting for the Master
4492 * Initialization to proceed forward.
4493 */
4494 for (i = 0; i < SGE_NTIMERS - 1; i++)
4495 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4496 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4497 s->counter_val[0] = 1;
4498 for (i = 1; i < SGE_NCOUNTERS; i++)
4499 s->counter_val[i] = min(intr_cnt[i - 1],
4500 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4501 t4_sge_init(adapter);
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004502
4503#ifdef CONFIG_PCI_IOV
4504 /*
4505 * Provision resource limits for Virtual Functions. We currently
4506 * grant them all the same static resource limits except for the Port
4507 * Access Rights Mask which we're assigning based on the PF. All of
4508 * the static provisioning stuff for both the PF and VF really needs
4509 * to be managed in a persistent manner for each device which the
4510 * firmware controls.
4511 */
4512 {
4513 int pf, vf;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004514 int max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
4515 NUM_OF_PF_WITH_SRIOV_T5;
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004516
Santosh Rastapur0a57a532013-03-14 05:08:49 +00004517 for (pf = 0; pf < max_no_pf; pf++) {
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004518 if (num_vf[pf] <= 0)
4519 continue;
4520
4521 /* VF numbering starts at 1! */
4522 for (vf = 1; vf <= num_vf[pf]; vf++) {
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004523 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4524 pf, vf,
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004525 VFRES_NEQ, VFRES_NETHCTRL,
4526 VFRES_NIQFLINT, VFRES_NIQ,
4527 VFRES_TC, VFRES_NVI,
Vipul Pandya1f1e4952013-01-09 07:42:49 +00004528 FW_PFVF_CMD_CMASK_MASK,
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004529 pfvfres_pmask(
4530 adapter, pf, vf),
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004531 VFRES_NEXACTF,
4532 VFRES_R_CAPS, VFRES_WX_CAPS);
4533 if (ret < 0)
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004534 dev_warn(adapter->pdev_dev,
4535 "failed to "\
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004536 "provision pf/vf=%d/%d; "
4537 "err=%d\n", pf, vf, ret);
4538 }
4539 }
4540 }
4541#endif
4542
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004543 /*
4544 * Set up the default filter mode. Later we'll want to implement this
4545 * via a firmware command, etc. ... This needs to be done before the
4546 * firmare initialization command ... If the selected set of fields
4547 * isn't equal to the default value, we'll need to make sure that the
4548 * field selections will fit in the 36-bit budget.
4549 */
4550 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00004551 int j, bits = 0;
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004552
Vipul Pandya404d9e32012-10-08 02:59:43 +00004553 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4554 switch (tp_vlan_pri_map & (1 << j)) {
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004555 case 0:
4556 /* compressed filter field not enabled */
4557 break;
4558 case FCOE_MASK:
4559 bits += 1;
4560 break;
4561 case PORT_MASK:
4562 bits += 3;
4563 break;
4564 case VNIC_ID_MASK:
4565 bits += 17;
4566 break;
4567 case VLAN_MASK:
4568 bits += 17;
4569 break;
4570 case TOS_MASK:
4571 bits += 8;
4572 break;
4573 case PROTOCOL_MASK:
4574 bits += 8;
4575 break;
4576 case ETHERTYPE_MASK:
4577 bits += 16;
4578 break;
4579 case MACMATCH_MASK:
4580 bits += 9;
4581 break;
4582 case MPSHITTYPE_MASK:
4583 bits += 3;
4584 break;
4585 case FRAGMENTATION_MASK:
4586 bits += 1;
4587 break;
4588 }
4589
4590 if (bits > 36) {
4591 dev_err(adapter->pdev_dev,
4592 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4593 " using %#x\n", tp_vlan_pri_map, bits,
4594 TP_VLAN_PRI_MAP_DEFAULT);
4595 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4596 }
4597 }
4598 v = tp_vlan_pri_map;
4599 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4600 &v, 1, TP_VLAN_PRI_MAP);
4601
4602 /*
4603 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4604 * to support any of the compressed filter fields above. Newer
4605 * versions of the firmware do this automatically but it doesn't hurt
4606 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4607 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4608 * since the firmware automatically turns this on and off when we have
4609 * a non-zero number of filters active (since it does have a
4610 * performance impact).
4611 */
4612 if (tp_vlan_pri_map)
4613 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4614 FIVETUPLELOOKUP_MASK,
4615 FIVETUPLELOOKUP_MASK);
4616
4617 /*
4618 * Tweak some settings.
4619 */
4620 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4621 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4622 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4623 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4624
4625 /*
4626 * Get basic stuff going by issuing the Firmware Initialize command.
4627 * Note that this _must_ be after all PFVF commands ...
4628 */
4629 ret = t4_fw_initialize(adapter, adapter->mbox);
4630 if (ret < 0)
4631 goto bye;
4632
4633 /*
4634 * Return successfully!
4635 */
4636 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4637 "driver parameters\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004638 return 0;
4639
4640 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004641 * Something bad happened. Return the error ...
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004642 */
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004643bye:
4644 return ret;
4645}
4646
4647/*
Vipul Pandya636f9d32012-09-26 02:39:39 +00004648 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004649 */
4650static int adap_init0(struct adapter *adap)
4651{
4652 int ret;
4653 u32 v, port_vec;
4654 enum dev_state state;
4655 u32 params[7], val[7];
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00004656 struct fw_caps_config_cmd caps_cmd;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004657 int reset = 1, j;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004658
Vipul Pandya636f9d32012-09-26 02:39:39 +00004659 /*
4660 * Contact FW, advertising Master capability (and potentially forcing
4661 * ourselves as the Master PF if our module parameter force_init is
4662 * set).
4663 */
4664 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4665 force_init ? MASTER_MUST : MASTER_MAY,
4666 &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004667 if (ret < 0) {
4668 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4669 ret);
4670 return ret;
4671 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004672 if (ret == adap->mbox)
4673 adap->flags |= MASTER_PF;
4674 if (force_init && state == DEV_STATE_INIT)
4675 state = DEV_STATE_UNINIT;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004676
Vipul Pandya636f9d32012-09-26 02:39:39 +00004677 /*
4678 * If we're the Master PF Driver and the device is uninitialized,
4679 * then let's consider upgrading the firmware ... (We always want
4680 * to check the firmware version number in order to A. get it for
4681 * later reporting and B. to warn if the currently loaded firmware
4682 * is excessively mismatched relative to the driver.)
4683 */
4684 ret = t4_check_fw_version(adap);
4685 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4686 if (ret == -EINVAL || ret > 0) {
4687 if (upgrade_fw(adap) >= 0) {
4688 /*
4689 * Note that the chip was reset as part of the
4690 * firmware upgrade so we don't reset it again
4691 * below and grab the new firmware version.
4692 */
4693 reset = 0;
4694 ret = t4_check_fw_version(adap);
4695 }
4696 }
4697 if (ret < 0)
4698 return ret;
4699 }
4700
4701 /*
4702 * Grab VPD parameters. This should be done after we establish a
4703 * connection to the firmware since some of the VPD parameters
4704 * (notably the Core Clock frequency) are retrieved via requests to
4705 * the firmware. On the other hand, we need these fairly early on
4706 * so we do this right after getting ahold of the firmware.
4707 */
4708 ret = get_vpd_params(adap, &adap->params.vpd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004709 if (ret < 0)
4710 goto bye;
4711
Vipul Pandya636f9d32012-09-26 02:39:39 +00004712 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004713 * Find out what ports are available to us. Note that we need to do
4714 * this before calling adap_init0_no_config() since it needs nports
4715 * and portvec ...
Vipul Pandya636f9d32012-09-26 02:39:39 +00004716 */
4717 v =
4718 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4719 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4720 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
4721 if (ret < 0)
4722 goto bye;
4723
4724 adap->params.nports = hweight32(port_vec);
4725 adap->params.portvec = port_vec;
4726
4727 /*
4728 * If the firmware is initialized already (and we're not forcing a
4729 * master initialization), note that we're living with existing
4730 * adapter parameters. Otherwise, it's time to try initializing the
4731 * adapter ...
4732 */
4733 if (state == DEV_STATE_INIT) {
4734 dev_info(adap->pdev_dev, "Coming up as %s: "\
4735 "Adapter already initialized\n",
4736 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4737 adap->flags |= USING_SOFT_PARAMS;
4738 } else {
4739 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4740 "Initializing adapter\n");
Vipul Pandya636f9d32012-09-26 02:39:39 +00004741
4742 /*
4743 * If the firmware doesn't support Configuration
4744 * Files warn user and exit,
4745 */
4746 if (ret < 0)
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004747 dev_warn(adap->pdev_dev, "Firmware doesn't support "
Vipul Pandya636f9d32012-09-26 02:39:39 +00004748 "configuration file.\n");
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004749 if (force_old_init)
4750 ret = adap_init0_no_config(adap, reset);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004751 else {
4752 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004753 * Find out whether we're dealing with a version of
4754 * the firmware which has configuration file support.
Vipul Pandya636f9d32012-09-26 02:39:39 +00004755 */
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004756 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4757 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4758 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4759 params, val);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004760
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004761 /*
4762 * If the firmware doesn't support Configuration
4763 * Files, use the old Driver-based, hard-wired
4764 * initialization. Otherwise, try using the
4765 * Configuration File support and fall back to the
4766 * Driver-based initialization if there's no
4767 * Configuration File found.
4768 */
4769 if (ret < 0)
4770 ret = adap_init0_no_config(adap, reset);
4771 else {
4772 /*
4773 * The firmware provides us with a memory
4774 * buffer where we can load a Configuration
4775 * File from the host if we want to override
4776 * the Configuration File in flash.
4777 */
4778
4779 ret = adap_init0_config(adap, reset);
4780 if (ret == -ENOENT) {
4781 dev_info(adap->pdev_dev,
4782 "No Configuration File present "
4783 "on adapter. Using hard-wired "
4784 "configuration parameters.\n");
4785 ret = adap_init0_no_config(adap, reset);
4786 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004787 }
4788 }
4789 if (ret < 0) {
4790 dev_err(adap->pdev_dev,
4791 "could not initialize adapter, error %d\n",
4792 -ret);
4793 goto bye;
4794 }
4795 }
4796
4797 /*
4798 * If we're living with non-hard-coded parameters (either from a
4799 * Firmware Configuration File or values programmed by a different PF
4800 * Driver), give the SGE code a chance to pull in anything that it
4801 * needs ... Note that this must be called after we retrieve our VPD
4802 * parameters in order to know how to convert core ticks to seconds.
4803 */
4804 if (adap->flags & USING_SOFT_PARAMS) {
4805 ret = t4_sge_init(adap);
4806 if (ret < 0)
4807 goto bye;
4808 }
4809
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00004810 if (is_bypass_device(adap->pdev->device))
4811 adap->params.bypass = 1;
4812
Vipul Pandya636f9d32012-09-26 02:39:39 +00004813 /*
4814 * Grab some of our basic fundamental operating parameters.
4815 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004816#define FW_PARAM_DEV(param) \
4817 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
Vipul Pandya636f9d32012-09-26 02:39:39 +00004818 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004819
4820#define FW_PARAM_PFVF(param) \
Vipul Pandya636f9d32012-09-26 02:39:39 +00004821 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4822 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
4823 FW_PARAMS_PARAM_Y(0) | \
4824 FW_PARAMS_PARAM_Z(0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004825
Vipul Pandya636f9d32012-09-26 02:39:39 +00004826 params[0] = FW_PARAM_PFVF(EQ_START);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004827 params[1] = FW_PARAM_PFVF(L2T_START);
4828 params[2] = FW_PARAM_PFVF(L2T_END);
4829 params[3] = FW_PARAM_PFVF(FILTER_START);
4830 params[4] = FW_PARAM_PFVF(FILTER_END);
4831 params[5] = FW_PARAM_PFVF(IQFLINT_START);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004832 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004833 if (ret < 0)
4834 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004835 adap->sge.egr_start = val[0];
4836 adap->l2t_start = val[1];
4837 adap->l2t_end = val[2];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004838 adap->tids.ftid_base = val[3];
4839 adap->tids.nftids = val[4] - val[3] + 1;
4840 adap->sge.ingr_start = val[5];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004841
Vipul Pandya636f9d32012-09-26 02:39:39 +00004842 /* query params related to active filter region */
4843 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4844 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4845 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
4846 /* If Active filter size is set we enable establishing
4847 * offload connection through firmware work request
4848 */
4849 if ((val[0] != val[1]) && (ret >= 0)) {
4850 adap->flags |= FW_OFLD_CONN;
4851 adap->tids.aftid_base = val[0];
4852 adap->tids.aftid_end = val[1];
4853 }
4854
Vipul Pandya636f9d32012-09-26 02:39:39 +00004855 /*
4856 * Get device capabilities so we can determine what resources we need
4857 * to manage.
4858 */
4859 memset(&caps_cmd, 0, sizeof(caps_cmd));
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00004860 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004861 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304862 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004863 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4864 &caps_cmd);
4865 if (ret < 0)
4866 goto bye;
4867
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004868 if (caps_cmd.ofldcaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004869 /* query offload-related parameters */
4870 params[0] = FW_PARAM_DEV(NTID);
4871 params[1] = FW_PARAM_PFVF(SERVER_START);
4872 params[2] = FW_PARAM_PFVF(SERVER_END);
4873 params[3] = FW_PARAM_PFVF(TDDP_START);
4874 params[4] = FW_PARAM_PFVF(TDDP_END);
4875 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004876 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4877 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004878 if (ret < 0)
4879 goto bye;
4880 adap->tids.ntids = val[0];
4881 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4882 adap->tids.stid_base = val[1];
4883 adap->tids.nstids = val[2] - val[1] + 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004884 /*
4885 * Setup server filter region. Divide the availble filter
4886 * region into two parts. Regular filters get 1/3rd and server
4887 * filters get 2/3rd part. This is only enabled if workarond
4888 * path is enabled.
4889 * 1. For regular filters.
4890 * 2. Server filter: This are special filters which are used
4891 * to redirect SYN packets to offload queue.
4892 */
4893 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4894 adap->tids.sftid_base = adap->tids.ftid_base +
4895 DIV_ROUND_UP(adap->tids.nftids, 3);
4896 adap->tids.nsftids = adap->tids.nftids -
4897 DIV_ROUND_UP(adap->tids.nftids, 3);
4898 adap->tids.nftids = adap->tids.sftid_base -
4899 adap->tids.ftid_base;
4900 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004901 adap->vres.ddp.start = val[3];
4902 adap->vres.ddp.size = val[4] - val[3] + 1;
4903 adap->params.ofldq_wr_cred = val[5];
Vipul Pandya636f9d32012-09-26 02:39:39 +00004904
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004905 adap->params.offload = 1;
4906 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004907 if (caps_cmd.rdmacaps) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004908 params[0] = FW_PARAM_PFVF(STAG_START);
4909 params[1] = FW_PARAM_PFVF(STAG_END);
4910 params[2] = FW_PARAM_PFVF(RQ_START);
4911 params[3] = FW_PARAM_PFVF(RQ_END);
4912 params[4] = FW_PARAM_PFVF(PBL_START);
4913 params[5] = FW_PARAM_PFVF(PBL_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004914 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4915 params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004916 if (ret < 0)
4917 goto bye;
4918 adap->vres.stag.start = val[0];
4919 adap->vres.stag.size = val[1] - val[0] + 1;
4920 adap->vres.rq.start = val[2];
4921 adap->vres.rq.size = val[3] - val[2] + 1;
4922 adap->vres.pbl.start = val[4];
4923 adap->vres.pbl.size = val[5] - val[4] + 1;
4924
4925 params[0] = FW_PARAM_PFVF(SQRQ_START);
4926 params[1] = FW_PARAM_PFVF(SQRQ_END);
4927 params[2] = FW_PARAM_PFVF(CQ_START);
4928 params[3] = FW_PARAM_PFVF(CQ_END);
4929 params[4] = FW_PARAM_PFVF(OCQ_START);
4930 params[5] = FW_PARAM_PFVF(OCQ_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004931 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004932 if (ret < 0)
4933 goto bye;
4934 adap->vres.qp.start = val[0];
4935 adap->vres.qp.size = val[1] - val[0] + 1;
4936 adap->vres.cq.start = val[2];
4937 adap->vres.cq.size = val[3] - val[2] + 1;
4938 adap->vres.ocq.start = val[4];
4939 adap->vres.ocq.size = val[5] - val[4] + 1;
4940 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004941 if (caps_cmd.iscsicaps) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004942 params[0] = FW_PARAM_PFVF(ISCSI_START);
4943 params[1] = FW_PARAM_PFVF(ISCSI_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004944 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
4945 params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004946 if (ret < 0)
4947 goto bye;
4948 adap->vres.iscsi.start = val[0];
4949 adap->vres.iscsi.size = val[1] - val[0] + 1;
4950 }
4951#undef FW_PARAM_PFVF
4952#undef FW_PARAM_DEV
4953
Vipul Pandya636f9d32012-09-26 02:39:39 +00004954 /*
4955 * These are finalized by FW initialization, load their values now.
4956 */
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004957 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4958 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004959 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004960 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4961 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4962 adap->params.b_wnd);
4963
Vipul Pandya636f9d32012-09-26 02:39:39 +00004964 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4965 for (j = 0; j < NCHAN; j++)
4966 adap->params.tp.tx_modq[j] = j;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004967
Vipul Pandya793dad92012-12-10 09:30:56 +00004968 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4969 &adap->filter_mode, 1,
4970 TP_VLAN_PRI_MAP);
4971
Vipul Pandya636f9d32012-09-26 02:39:39 +00004972 adap->flags |= FW_OK;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004973 return 0;
4974
4975 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00004976 * Something bad happened. If a command timed out or failed with EIO
4977 * FW does not operate within its spec or something catastrophic
4978 * happened to HW/FW, stop issuing commands.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004979 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00004980bye:
4981 if (ret != -ETIMEDOUT && ret != -EIO)
4982 t4_fw_bye(adap, adap->mbox);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004983 return ret;
4984}
4985
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004986/* EEH callbacks */
4987
4988static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4989 pci_channel_state_t state)
4990{
4991 int i;
4992 struct adapter *adap = pci_get_drvdata(pdev);
4993
4994 if (!adap)
4995 goto out;
4996
4997 rtnl_lock();
4998 adap->flags &= ~FW_OK;
4999 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5000 for_each_port(adap, i) {
5001 struct net_device *dev = adap->port[i];
5002
5003 netif_device_detach(dev);
5004 netif_carrier_off(dev);
5005 }
5006 if (adap->flags & FULL_INIT_DONE)
5007 cxgb_down(adap);
5008 rtnl_unlock();
5009 pci_disable_device(pdev);
5010out: return state == pci_channel_io_perm_failure ?
5011 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5012}
5013
5014static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5015{
5016 int i, ret;
5017 struct fw_caps_config_cmd c;
5018 struct adapter *adap = pci_get_drvdata(pdev);
5019
5020 if (!adap) {
5021 pci_restore_state(pdev);
5022 pci_save_state(pdev);
5023 return PCI_ERS_RESULT_RECOVERED;
5024 }
5025
5026 if (pci_enable_device(pdev)) {
5027 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5028 return PCI_ERS_RESULT_DISCONNECT;
5029 }
5030
5031 pci_set_master(pdev);
5032 pci_restore_state(pdev);
5033 pci_save_state(pdev);
5034 pci_cleanup_aer_uncorrect_error_status(pdev);
5035
5036 if (t4_wait_dev_ready(adap) < 0)
5037 return PCI_ERS_RESULT_DISCONNECT;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005038 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005039 return PCI_ERS_RESULT_DISCONNECT;
5040 adap->flags |= FW_OK;
5041 if (adap_init1(adap, &c))
5042 return PCI_ERS_RESULT_DISCONNECT;
5043
5044 for_each_port(adap, i) {
5045 struct port_info *p = adap2pinfo(adap, i);
5046
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005047 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5048 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005049 if (ret < 0)
5050 return PCI_ERS_RESULT_DISCONNECT;
5051 p->viid = ret;
5052 p->xact_addr_filt = -1;
5053 }
5054
5055 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5056 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00005057 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005058 if (cxgb_up(adap))
5059 return PCI_ERS_RESULT_DISCONNECT;
5060 return PCI_ERS_RESULT_RECOVERED;
5061}
5062
5063static void eeh_resume(struct pci_dev *pdev)
5064{
5065 int i;
5066 struct adapter *adap = pci_get_drvdata(pdev);
5067
5068 if (!adap)
5069 return;
5070
5071 rtnl_lock();
5072 for_each_port(adap, i) {
5073 struct net_device *dev = adap->port[i];
5074
5075 if (netif_running(dev)) {
5076 link_start(dev);
5077 cxgb_set_rxmode(dev);
5078 }
5079 netif_device_attach(dev);
5080 }
5081 rtnl_unlock();
5082}
5083
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005084static const struct pci_error_handlers cxgb4_eeh = {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005085 .error_detected = eeh_err_detected,
5086 .slot_reset = eeh_slot_reset,
5087 .resume = eeh_resume,
5088};
5089
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005090static inline bool is_10g_port(const struct link_config *lc)
5091{
5092 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5093}
5094
5095static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5096 unsigned int size, unsigned int iqe_size)
5097{
5098 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5099 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5100 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5101 q->iqe_len = iqe_size;
5102 q->size = size;
5103}
5104
5105/*
5106 * Perform default configuration of DMA queues depending on the number and type
5107 * of ports we found and the number of available CPUs. Most settings can be
5108 * modified by the admin prior to actual use.
5109 */
Bill Pemberton91744942012-12-03 09:23:02 -05005110static void cfg_queues(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005111{
5112 struct sge *s = &adap->sge;
5113 int i, q10g = 0, n10g = 0, qidx = 0;
5114
5115 for_each_port(adap, i)
5116 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5117
5118 /*
5119 * We default to 1 queue per non-10G port and up to # of cores queues
5120 * per 10G port.
5121 */
5122 if (n10g)
5123 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
Yuval Mintz5952dde2012-07-01 03:18:55 +00005124 if (q10g > netif_get_num_default_rss_queues())
5125 q10g = netif_get_num_default_rss_queues();
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005126
5127 for_each_port(adap, i) {
5128 struct port_info *pi = adap2pinfo(adap, i);
5129
5130 pi->first_qset = qidx;
5131 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5132 qidx += pi->nqsets;
5133 }
5134
5135 s->ethqsets = qidx;
5136 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5137
5138 if (is_offload(adap)) {
5139 /*
5140 * For offload we use 1 queue/channel if all ports are up to 1G,
5141 * otherwise we divide all available queues amongst the channels
5142 * capped by the number of available cores.
5143 */
5144 if (n10g) {
5145 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5146 num_online_cpus());
5147 s->ofldqsets = roundup(i, adap->params.nports);
5148 } else
5149 s->ofldqsets = adap->params.nports;
5150 /* For RDMA one Rx queue per channel suffices */
5151 s->rdmaqs = adap->params.nports;
5152 }
5153
5154 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5155 struct sge_eth_rxq *r = &s->ethrxq[i];
5156
5157 init_rspq(&r->rspq, 0, 0, 1024, 64);
5158 r->fl.size = 72;
5159 }
5160
5161 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5162 s->ethtxq[i].q.size = 1024;
5163
5164 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5165 s->ctrlq[i].q.size = 512;
5166
5167 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5168 s->ofldtxq[i].q.size = 1024;
5169
5170 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5171 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5172
5173 init_rspq(&r->rspq, 0, 0, 1024, 64);
5174 r->rspq.uld = CXGB4_ULD_ISCSI;
5175 r->fl.size = 72;
5176 }
5177
5178 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5179 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5180
5181 init_rspq(&r->rspq, 0, 0, 511, 64);
5182 r->rspq.uld = CXGB4_ULD_RDMA;
5183 r->fl.size = 72;
5184 }
5185
5186 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5187 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5188}
5189
5190/*
5191 * Reduce the number of Ethernet queues across all ports to at most n.
5192 * n provides at least one queue per port.
5193 */
Bill Pemberton91744942012-12-03 09:23:02 -05005194static void reduce_ethqs(struct adapter *adap, int n)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005195{
5196 int i;
5197 struct port_info *pi;
5198
5199 while (n < adap->sge.ethqsets)
5200 for_each_port(adap, i) {
5201 pi = adap2pinfo(adap, i);
5202 if (pi->nqsets > 1) {
5203 pi->nqsets--;
5204 adap->sge.ethqsets--;
5205 if (adap->sge.ethqsets <= n)
5206 break;
5207 }
5208 }
5209
5210 n = 0;
5211 for_each_port(adap, i) {
5212 pi = adap2pinfo(adap, i);
5213 pi->first_qset = n;
5214 n += pi->nqsets;
5215 }
5216}
5217
5218/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5219#define EXTRA_VECS 2
5220
Bill Pemberton91744942012-12-03 09:23:02 -05005221static int enable_msix(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005222{
5223 int ofld_need = 0;
5224 int i, err, want, need;
5225 struct sge *s = &adap->sge;
5226 unsigned int nchan = adap->params.nports;
5227 struct msix_entry entries[MAX_INGQ + 1];
5228
5229 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5230 entries[i].entry = i;
5231
5232 want = s->max_ethqsets + EXTRA_VECS;
5233 if (is_offload(adap)) {
5234 want += s->rdmaqs + s->ofldqsets;
5235 /* need nchan for each possible ULD */
5236 ofld_need = 2 * nchan;
5237 }
5238 need = adap->params.nports + EXTRA_VECS + ofld_need;
5239
5240 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5241 want = err;
5242
5243 if (!err) {
5244 /*
5245 * Distribute available vectors to the various queue groups.
5246 * Every group gets its minimum requirement and NIC gets top
5247 * priority for leftovers.
5248 */
5249 i = want - EXTRA_VECS - ofld_need;
5250 if (i < s->max_ethqsets) {
5251 s->max_ethqsets = i;
5252 if (i < s->ethqsets)
5253 reduce_ethqs(adap, i);
5254 }
5255 if (is_offload(adap)) {
5256 i = want - EXTRA_VECS - s->max_ethqsets;
5257 i -= ofld_need - nchan;
5258 s->ofldqsets = (i / nchan) * nchan; /* round down */
5259 }
5260 for (i = 0; i < want; ++i)
5261 adap->msix_info[i].vec = entries[i].vector;
5262 } else if (err > 0)
5263 dev_info(adap->pdev_dev,
5264 "only %d MSI-X vectors left, not using MSI-X\n", err);
5265 return err;
5266}
5267
5268#undef EXTRA_VECS
5269
Bill Pemberton91744942012-12-03 09:23:02 -05005270static int init_rss(struct adapter *adap)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005271{
5272 unsigned int i, j;
5273
5274 for_each_port(adap, i) {
5275 struct port_info *pi = adap2pinfo(adap, i);
5276
5277 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5278 if (!pi->rss)
5279 return -ENOMEM;
5280 for (j = 0; j < pi->rss_size; j++)
Ben Hutchings278bc422011-12-15 13:56:49 +00005281 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005282 }
5283 return 0;
5284}
5285
Bill Pemberton91744942012-12-03 09:23:02 -05005286static void print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005287{
5288 static const char *base[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00005289 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00005290 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005291 };
5292
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005293 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005294 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00005295 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005296 const struct port_info *pi = netdev_priv(dev);
5297 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00005298
5299 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5300 spd = " 2.5 GT/s";
5301 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5302 spd = " 5 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005303
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005304 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5305 bufp += sprintf(bufp, "100/");
5306 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5307 bufp += sprintf(bufp, "1000/");
5308 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5309 bufp += sprintf(bufp, "10G/");
5310 if (bufp != buf)
5311 --bufp;
5312 sprintf(bufp, "BASE-%s", base[pi->port_type]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005313
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005314 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005315 adap->params.vpd.id,
5316 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
Dimitris Michailidis118969e2010-12-14 21:36:48 +00005317 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5318 (adap->flags & USING_MSIX) ? " MSI-X" :
5319 (adap->flags & USING_MSI) ? " MSI" : "");
5320 netdev_info(dev, "S/N: %s, E/C: %s\n",
5321 adap->params.vpd.sn, adap->params.vpd.ec);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005322}
5323
Bill Pemberton91744942012-12-03 09:23:02 -05005324static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005325{
Jiang Liue5c8ae52012-08-20 13:53:19 -06005326 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005327}
5328
Dimitris Michailidis06546392010-07-11 12:01:16 +00005329/*
5330 * Free the following resources:
5331 * - memory used for tables
5332 * - MSI/MSI-X
5333 * - net devices
5334 * - resources FW is holding for us
5335 */
5336static void free_some_resources(struct adapter *adapter)
5337{
5338 unsigned int i;
5339
5340 t4_free_mem(adapter->l2t);
5341 t4_free_mem(adapter->tids.tid_tab);
5342 disable_msi(adapter);
5343
5344 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005345 if (adapter->port[i]) {
5346 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00005347 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005348 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00005349 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005350 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00005351}
5352
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00005353#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00005354#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005355 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005356#define SEGMENT_SIZE 128
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005357
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005358static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005359{
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005360 int func, i, err, s_qpp, qpp, num_seg;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005361 struct port_info *pi;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005362 bool highdma = false;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005363 struct adapter *adapter = NULL;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005364#ifdef CONFIG_PCI_IOV
5365 int max_no_pf;
5366#endif
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005367
5368 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5369
5370 err = pci_request_regions(pdev, KBUILD_MODNAME);
5371 if (err) {
5372 /* Just info, some other driver may have claimed the device. */
5373 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5374 return err;
5375 }
5376
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005377 /* We control everything through one PF */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005378 func = PCI_FUNC(pdev->devfn);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005379 if (func != ent->driver_data) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005380 pci_save_state(pdev); /* to restore SR-IOV later */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005381 goto sriov;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005382 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005383
5384 err = pci_enable_device(pdev);
5385 if (err) {
5386 dev_err(&pdev->dev, "cannot enable PCI device\n");
5387 goto out_release_regions;
5388 }
5389
5390 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005391 highdma = true;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005392 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5393 if (err) {
5394 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5395 "coherent allocations\n");
5396 goto out_disable_device;
5397 }
5398 } else {
5399 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5400 if (err) {
5401 dev_err(&pdev->dev, "no usable DMA configuration\n");
5402 goto out_disable_device;
5403 }
5404 }
5405
5406 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00005407 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005408 pci_set_master(pdev);
5409 pci_save_state(pdev);
5410
5411 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5412 if (!adapter) {
5413 err = -ENOMEM;
5414 goto out_disable_device;
5415 }
5416
5417 adapter->regs = pci_ioremap_bar(pdev, 0);
5418 if (!adapter->regs) {
5419 dev_err(&pdev->dev, "cannot map device registers\n");
5420 err = -ENOMEM;
5421 goto out_free_adapter;
5422 }
5423
5424 adapter->pdev = pdev;
5425 adapter->pdev_dev = &pdev->dev;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05305426 adapter->mbox = func;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005427 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005428 adapter->msg_enable = dflt_msg_enable;
5429 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5430
5431 spin_lock_init(&adapter->stats_lock);
5432 spin_lock_init(&adapter->tid_release_lock);
5433
5434 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
Vipul Pandya881806b2012-05-18 15:29:24 +05305435 INIT_WORK(&adapter->db_full_task, process_db_full);
5436 INIT_WORK(&adapter->db_drop_task, process_db_drop);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005437
5438 err = t4_prep_adapter(adapter);
5439 if (err)
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005440 goto out_unmap_bar0;
5441
5442 if (!is_t4(adapter->chip)) {
5443 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5444 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5445 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5446 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5447
5448 /* Each segment size is 128B. Write coalescing is enabled only
5449 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5450 * queue is less no of segments that can be accommodated in
5451 * a page size.
5452 */
5453 if (qpp > num_seg) {
5454 dev_err(&pdev->dev,
5455 "Incorrect number of egress queues per page\n");
5456 err = -EINVAL;
5457 goto out_unmap_bar0;
5458 }
5459 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5460 pci_resource_len(pdev, 2));
5461 if (!adapter->bar2) {
5462 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5463 err = -ENOMEM;
5464 goto out_unmap_bar0;
5465 }
5466 }
5467
Vipul Pandya636f9d32012-09-26 02:39:39 +00005468 setup_memwin(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005469 err = adap_init0(adapter);
Vipul Pandya636f9d32012-09-26 02:39:39 +00005470 setup_memwin_rdma(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005471 if (err)
5472 goto out_unmap_bar;
5473
5474 for_each_port(adapter, i) {
5475 struct net_device *netdev;
5476
5477 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5478 MAX_ETH_QSETS);
5479 if (!netdev) {
5480 err = -ENOMEM;
5481 goto out_free_dev;
5482 }
5483
5484 SET_NETDEV_DEV(netdev, &pdev->dev);
5485
5486 adapter->port[i] = netdev;
5487 pi = netdev_priv(netdev);
5488 pi->adapter = adapter;
5489 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005490 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005491 netdev->irq = pdev->irq;
5492
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00005493 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5494 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5495 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5496 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005497 if (highdma)
5498 netdev->hw_features |= NETIF_F_HIGHDMA;
5499 netdev->features |= netdev->hw_features;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005500 netdev->vlan_features = netdev->features & VLAN_FEAT;
5501
Jiri Pirko01789342011-08-16 06:29:00 +00005502 netdev->priv_flags |= IFF_UNICAST_FLT;
5503
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005504 netdev->netdev_ops = &cxgb4_netdev_ops;
5505 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5506 }
5507
5508 pci_set_drvdata(pdev, adapter);
5509
5510 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00005511 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005512 if (err)
5513 goto out_free_dev;
5514 }
5515
5516 /*
5517 * Configure queues and allocate tables now, they can be needed as
5518 * soon as the first register_netdev completes.
5519 */
5520 cfg_queues(adapter);
5521
5522 adapter->l2t = t4_init_l2t();
5523 if (!adapter->l2t) {
5524 /* We tolerate a lack of L2T, giving up some functionality */
5525 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5526 adapter->params.offload = 0;
5527 }
5528
5529 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5530 dev_warn(&pdev->dev, "could not allocate TID table, "
5531 "continuing\n");
5532 adapter->params.offload = 0;
5533 }
5534
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00005535 /* See what interrupts we'll be using */
5536 if (msi > 1 && enable_msix(adapter) == 0)
5537 adapter->flags |= USING_MSIX;
5538 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5539 adapter->flags |= USING_MSI;
5540
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005541 err = init_rss(adapter);
5542 if (err)
5543 goto out_free_dev;
5544
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005545 /*
5546 * The card is now ready to go. If any errors occur during device
5547 * registration we do not fail the whole card but rather proceed only
5548 * with the ports we manage to register successfully. However we must
5549 * register at least one net device.
5550 */
5551 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00005552 pi = adap2pinfo(adapter, i);
5553 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5554 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5555
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005556 err = register_netdev(adapter->port[i]);
5557 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00005558 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00005559 adapter->chan_map[pi->tx_chan] = i;
5560 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005561 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00005562 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005563 dev_err(&pdev->dev, "could not register any net devices\n");
5564 goto out_free_dev;
5565 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00005566 if (err) {
5567 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5568 err = 0;
Joe Perches6403eab2011-06-03 11:51:20 +00005569 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005570
5571 if (cxgb4_debugfs_root) {
5572 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5573 cxgb4_debugfs_root);
5574 setup_debugfs(adapter);
5575 }
5576
David S. Miller88c51002011-10-07 13:38:43 -04005577 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5578 pdev->needs_freset = 1;
5579
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005580 if (is_offload(adapter))
5581 attach_ulds(adapter);
5582
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005583sriov:
5584#ifdef CONFIG_PCI_IOV
Santosh Rastapur0a57a532013-03-14 05:08:49 +00005585 max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
5586 NUM_OF_PF_WITH_SRIOV_T5;
5587
5588 if (func < max_no_pf && num_vf[func] > 0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005589 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5590 dev_info(&pdev->dev,
5591 "instantiated %u virtual functions\n",
5592 num_vf[func]);
5593#endif
5594 return 0;
5595
5596 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00005597 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005598 out_unmap_bar:
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005599 if (!is_t4(adapter->chip))
5600 iounmap(adapter->bar2);
5601 out_unmap_bar0:
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005602 iounmap(adapter->regs);
5603 out_free_adapter:
5604 kfree(adapter);
5605 out_disable_device:
5606 pci_disable_pcie_error_reporting(pdev);
5607 pci_disable_device(pdev);
5608 out_release_regions:
5609 pci_release_regions(pdev);
5610 pci_set_drvdata(pdev, NULL);
5611 return err;
5612}
5613
Bill Pemberton91744942012-12-03 09:23:02 -05005614static void remove_one(struct pci_dev *pdev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005615{
5616 struct adapter *adapter = pci_get_drvdata(pdev);
5617
Vipul Pandya636f9d32012-09-26 02:39:39 +00005618#ifdef CONFIG_PCI_IOV
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005619 pci_disable_sriov(pdev);
5620
Vipul Pandya636f9d32012-09-26 02:39:39 +00005621#endif
5622
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005623 if (adapter) {
5624 int i;
5625
5626 if (is_offload(adapter))
5627 detach_ulds(adapter);
5628
5629 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00005630 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005631 unregister_netdev(adapter->port[i]);
5632
5633 if (adapter->debugfs_root)
5634 debugfs_remove_recursive(adapter->debugfs_root);
5635
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00005636 /* If we allocated filters, free up state associated with any
5637 * valid filters ...
5638 */
5639 if (adapter->tids.ftid_tab) {
5640 struct filter_entry *f = &adapter->tids.ftid_tab[0];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00005641 for (i = 0; i < (adapter->tids.nftids +
5642 adapter->tids.nsftids); i++, f++)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00005643 if (f->valid)
5644 clear_filter(adapter, f);
5645 }
5646
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00005647 if (adapter->flags & FULL_INIT_DONE)
5648 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005649
Dimitris Michailidis06546392010-07-11 12:01:16 +00005650 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005651 iounmap(adapter->regs);
Santosh Rastapur22adfe02013-03-14 05:08:51 +00005652 if (!is_t4(adapter->chip))
5653 iounmap(adapter->bar2);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005654 kfree(adapter);
5655 pci_disable_pcie_error_reporting(pdev);
5656 pci_disable_device(pdev);
5657 pci_release_regions(pdev);
5658 pci_set_drvdata(pdev, NULL);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00005659 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005660 pci_release_regions(pdev);
5661}
5662
5663static struct pci_driver cxgb4_driver = {
5664 .name = KBUILD_MODNAME,
5665 .id_table = cxgb4_pci_tbl,
5666 .probe = init_one,
Bill Pemberton91744942012-12-03 09:23:02 -05005667 .remove = remove_one,
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005668 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005669};
5670
5671static int __init cxgb4_init_module(void)
5672{
5673 int ret;
5674
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05305675 workq = create_singlethread_workqueue("cxgb4");
5676 if (!workq)
5677 return -ENOMEM;
5678
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005679 /* Debugfs support is optional, just warn if this fails */
5680 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5681 if (!cxgb4_debugfs_root)
Joe Perches428ac432013-01-06 13:34:49 +00005682 pr_warn("could not create debugfs entry, continuing\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005683
5684 ret = pci_register_driver(&cxgb4_driver);
5685 if (ret < 0)
5686 debugfs_remove(cxgb4_debugfs_root);
5687 return ret;
5688}
5689
5690static void __exit cxgb4_cleanup_module(void)
5691{
5692 pci_unregister_driver(&cxgb4_driver);
5693 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05305694 flush_workqueue(workq);
5695 destroy_workqueue(workq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005696}
5697
5698module_init(cxgb4_init_module);
5699module_exit(cxgb4_cleanup_module);