blob: 8fb1ccfe74dc5b31a35ff25ff244287b5f423011 [file] [log] [blame]
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000045#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <asm/uaccess.h>
64
65#include "cxgb4.h"
66#include "t4_regs.h"
67#include "t4_msg.h"
68#include "t4fw_api.h"
69#include "l2t.h"
70
Dimitris Michailidis99e6d062010-08-02 13:19:24 +000071#define DRV_VERSION "1.3.0-ko"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +000072#define DRV_DESC "Chelsio T4 Network Driver"
73
74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
77 * recover.
78 */
79#define MAX_SGE_TIMERVAL 200U
80
Casey Leedom7ee9ff92010-06-25 12:11:46 +000081enum {
Vipul Pandya13ee15d2012-09-26 02:39:40 +000082 /*
83 * Physical Function provisioning constants.
84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97#ifdef CONFIG_PCI_IOV
98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context.
105 */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
108
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
116
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000119#endif
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000120};
121
122/*
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
126 * controls.
127 */
128static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
130{
131 unsigned int portn, portvec;
132
133 /*
134 * Give PF's access to all of the ports.
135 */
136 if (vf == 0)
137 return FW_PFVF_CMD_PMASK_MASK;
138
139 /*
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
145 */
146 if (adapter->params.nports == 0)
147 return 0;
148
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
151 for (;;) {
152 /*
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
157 */
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159 if (portn == 0)
160 return pmask;
161 portn--;
162 portvec &= ~pmask;
163 }
164 /*NOTREACHED*/
165}
Casey Leedom7ee9ff92010-06-25 12:11:46 +0000166
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000167enum {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
175 MIN_FL_ENTRIES = 16
176};
177
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000178/* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
183 */
184struct filter_entry {
185 /* Administrative fields for filter.
186 */
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
189
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
193
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
198 */
199 struct ch_filter_specification fs;
200};
201
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000202#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
205
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000206#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000207
208static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000209 CH_DEVICE(0xa000, 0), /* PE10K */
Dimitris Michailidisccea7902010-08-23 17:21:01 +0000210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
Vipul Pandyaf637d572012-03-05 22:56:36 +0000230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000232 { 0, }
233};
234
235#define FW_FNAME "cxgb4/t4fw.bin"
Vipul Pandya636f9d32012-09-26 02:39:39 +0000236#define FW_CFNAME "cxgb4/t4-config.txt"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000237
238MODULE_DESCRIPTION(DRV_DESC);
239MODULE_AUTHOR("Chelsio Communications");
240MODULE_LICENSE("Dual BSD/GPL");
241MODULE_VERSION(DRV_VERSION);
242MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
243MODULE_FIRMWARE(FW_FNAME);
244
Vipul Pandya636f9d32012-09-26 02:39:39 +0000245/*
246 * Normally we're willing to become the firmware's Master PF but will be happy
247 * if another PF has already become the Master and initialized the adapter.
248 * Setting "force_init" will cause this driver to forcibly establish itself as
249 * the Master PF and initialize the adapter.
250 */
251static uint force_init;
252
253module_param(force_init, uint, 0644);
254MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
255
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000256/*
257 * Normally if the firmware we connect to has Configuration File support, we
258 * use that and only fall back to the old Driver-based initialization if the
259 * Configuration File fails for some reason. If force_old_init is set, then
260 * we'll always use the old Driver-based initialization sequence.
261 */
262static uint force_old_init;
263
264module_param(force_old_init, uint, 0644);
265MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
266
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000267static int dflt_msg_enable = DFLT_MSG_ENABLE;
268
269module_param(dflt_msg_enable, int, 0644);
270MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
271
272/*
273 * The driver uses the best interrupt scheme available on a platform in the
274 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
275 * of these schemes the driver may consider as follows:
276 *
277 * msi = 2: choose from among all three options
278 * msi = 1: only consider MSI and INTx interrupts
279 * msi = 0: force INTx interrupts
280 */
281static int msi = 2;
282
283module_param(msi, int, 0644);
284MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
285
286/*
287 * Queue interrupt hold-off timer values. Queues default to the first of these
288 * upon creation.
289 */
290static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
291
292module_param_array(intr_holdoff, uint, NULL, 0644);
293MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
294 "0..4 in microseconds");
295
296static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
297
298module_param_array(intr_cnt, uint, NULL, 0644);
299MODULE_PARM_DESC(intr_cnt,
300 "thresholds 1..3 for queue interrupt packet counters");
301
Vipul Pandya636f9d32012-09-26 02:39:39 +0000302/*
303 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
304 * offset by 2 bytes in order to have the IP headers line up on 4-byte
305 * boundaries. This is a requirement for many architectures which will throw
306 * a machine check fault if an attempt is made to access one of the 4-byte IP
307 * header fields on a non-4-byte boundary. And it's a major performance issue
308 * even on some architectures which allow it like some implementations of the
309 * x86 ISA. However, some architectures don't mind this and for some very
310 * edge-case performance sensitive applications (like forwarding large volumes
311 * of small packets), setting this DMA offset to 0 will decrease the number of
312 * PCI-E Bus transfers enough to measurably affect performance.
313 */
314static int rx_dma_offset = 2;
315
Rusty Russelleb939922011-12-19 14:08:01 +0000316static bool vf_acls;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000317
318#ifdef CONFIG_PCI_IOV
319module_param(vf_acls, bool, 0644);
320MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
321
322static unsigned int num_vf[4];
323
324module_param_array(num_vf, uint, NULL, 0644);
325MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
326#endif
327
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000328/*
329 * The filter TCAM has a fixed portion and a variable portion. The fixed
330 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
331 * ports. The variable portion is 36 bits which can include things like Exact
332 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
333 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
334 * far exceed the 36-bit budget for this "compressed" header portion of the
335 * filter. Thus, we have a scarce resource which must be carefully managed.
336 *
337 * By default we set this up to mostly match the set of filter matching
338 * capabilities of T3 but with accommodations for some of T4's more
339 * interesting features:
340 *
341 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
342 * [Inner] VLAN (17), Port (3), FCoE (1) }
343 */
344enum {
345 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
346 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
347 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
348};
349
350static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
351
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000352module_param(tp_vlan_pri_map, uint, 0644);
353MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
354
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000355static struct dentry *cxgb4_debugfs_root;
356
357static LIST_HEAD(adapter_list);
358static DEFINE_MUTEX(uld_mutex);
359static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
360static const char *uld_str[] = { "RDMA", "iSCSI" };
361
362static void link_report(struct net_device *dev)
363{
364 if (!netif_carrier_ok(dev))
365 netdev_info(dev, "link down\n");
366 else {
367 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
368
369 const char *s = "10Mbps";
370 const struct port_info *p = netdev_priv(dev);
371
372 switch (p->link_cfg.speed) {
373 case SPEED_10000:
374 s = "10Gbps";
375 break;
376 case SPEED_1000:
377 s = "1000Mbps";
378 break;
379 case SPEED_100:
380 s = "100Mbps";
381 break;
382 }
383
384 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
385 fc[p->link_cfg.fc]);
386 }
387}
388
389void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
390{
391 struct net_device *dev = adapter->port[port_id];
392
393 /* Skip changes from disabled ports. */
394 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
395 if (link_stat)
396 netif_carrier_on(dev);
397 else
398 netif_carrier_off(dev);
399
400 link_report(dev);
401 }
402}
403
404void t4_os_portmod_changed(const struct adapter *adap, int port_id)
405{
406 static const char *mod_str[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000407 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000408 };
409
410 const struct net_device *dev = adap->port[port_id];
411 const struct port_info *pi = netdev_priv(dev);
412
413 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
414 netdev_info(dev, "port module unplugged\n");
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +0000415 else if (pi->mod_type < ARRAY_SIZE(mod_str))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000416 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
417}
418
419/*
420 * Configure the exact and hash address filters to handle a port's multicast
421 * and secondary unicast MAC addresses.
422 */
423static int set_addr_filters(const struct net_device *dev, bool sleep)
424{
425 u64 mhash = 0;
426 u64 uhash = 0;
427 bool free = true;
428 u16 filt_idx[7];
429 const u8 *addr[7];
430 int ret, naddr = 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000431 const struct netdev_hw_addr *ha;
432 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700433 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000434 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000435 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000436
437 /* first do the secondary unicast addresses */
438 netdev_for_each_uc_addr(ha, dev) {
439 addr[naddr++] = ha->addr;
440 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000441 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000442 naddr, addr, filt_idx, &uhash, sleep);
443 if (ret < 0)
444 return ret;
445
446 free = false;
447 naddr = 0;
448 }
449 }
450
451 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700452 netdev_for_each_mc_addr(ha, dev) {
453 addr[naddr++] = ha->addr;
454 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000455 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000456 naddr, addr, filt_idx, &mhash, sleep);
457 if (ret < 0)
458 return ret;
459
460 free = false;
461 naddr = 0;
462 }
463 }
464
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000465 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000466 uhash | mhash, sleep);
467}
468
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530469int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
470module_param(dbfifo_int_thresh, int, 0644);
471MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
472
Vipul Pandya404d9e32012-10-08 02:59:43 +0000473/*
474 * usecs to sleep while draining the dbfifo
475 */
476static int dbfifo_drain_delay = 1000;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530477module_param(dbfifo_drain_delay, int, 0644);
478MODULE_PARM_DESC(dbfifo_drain_delay,
479 "usecs to sleep while draining the dbfifo");
480
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000481/*
482 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
483 * If @mtu is -1 it is left unchanged.
484 */
485static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
486{
487 int ret;
488 struct port_info *pi = netdev_priv(dev);
489
490 ret = set_addr_filters(dev, sleep_ok);
491 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000492 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000493 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000494 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000495 sleep_ok);
496 return ret;
497}
498
Vipul Pandya3069ee9b2012-05-18 15:29:26 +0530499static struct workqueue_struct *workq;
500
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000501/**
502 * link_start - enable a port
503 * @dev: the port to enable
504 *
505 * Performs the MAC and PHY actions needed to enable a port.
506 */
507static int link_start(struct net_device *dev)
508{
509 int ret;
510 struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000511 unsigned int mb = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000512
513 /*
514 * We do not set address filters and promiscuity here, the stack does
515 * that step explicitly.
516 */
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000517 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
Dimitris Michailidis19ecae22010-10-21 11:29:56 +0000518 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000519 if (ret == 0) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000520 ret = t4_change_mac(pi->adapter, mb, pi->viid,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000521 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000522 true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000523 if (ret >= 0) {
524 pi->xact_addr_filt = ret;
525 ret = 0;
526 }
527 }
528 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000529 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
530 &pi->link_cfg);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000531 if (ret == 0)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000532 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000533 return ret;
534}
535
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000536/* Clear a filter and release any of its resources that we own. This also
537 * clears the filter's "pending" status.
538 */
539static void clear_filter(struct adapter *adap, struct filter_entry *f)
540{
541 /* If the new or old filter have loopback rewriteing rules then we'll
542 * need to free any existing Layer Two Table (L2T) entries of the old
543 * filter rule. The firmware will handle freeing up any Source MAC
544 * Table (SMT) entries used for rewriting Source MAC Addresses in
545 * loopback rules.
546 */
547 if (f->l2t)
548 cxgb4_l2t_release(f->l2t);
549
550 /* The zeroing of the filter rule below clears the filter valid,
551 * pending, locked flags, l2t pointer, etc. so it's all we need for
552 * this operation.
553 */
554 memset(f, 0, sizeof(*f));
555}
556
557/* Handle a filter write/deletion reply.
558 */
559static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
560{
561 unsigned int idx = GET_TID(rpl);
562 unsigned int nidx = idx - adap->tids.ftid_base;
563 unsigned int ret;
564 struct filter_entry *f;
565
566 if (idx >= adap->tids.ftid_base && nidx <
567 (adap->tids.nftids + adap->tids.nsftids)) {
568 idx = nidx;
569 ret = GET_TCB_COOKIE(rpl->cookie);
570 f = &adap->tids.ftid_tab[idx];
571
572 if (ret == FW_FILTER_WR_FLT_DELETED) {
573 /* Clear the filter when we get confirmation from the
574 * hardware that the filter has been deleted.
575 */
576 clear_filter(adap, f);
577 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
578 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
579 idx);
580 clear_filter(adap, f);
581 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
582 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
583 f->pending = 0; /* asynchronous setup completed */
584 f->valid = 1;
585 } else {
586 /* Something went wrong. Issue a warning about the
587 * problem and clear everything out.
588 */
589 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
590 idx, ret);
591 clear_filter(adap, f);
592 }
593 }
594}
595
596/* Response queue handler for the FW event queue.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000597 */
598static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
599 const struct pkt_gl *gl)
600{
601 u8 opcode = ((const struct rss_header *)rsp)->opcode;
602
603 rsp++; /* skip RSS header */
604 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
605 const struct cpl_sge_egr_update *p = (void *)rsp;
606 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000607 struct sge_txq *txq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000608
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000609 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000610 txq->restarts++;
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000611 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000612 struct sge_eth_txq *eq;
613
614 eq = container_of(txq, struct sge_eth_txq, q);
615 netif_tx_wake_queue(eq->txq);
616 } else {
617 struct sge_ofld_txq *oq;
618
619 oq = container_of(txq, struct sge_ofld_txq, q);
620 tasklet_schedule(&oq->qresume_tsk);
621 }
622 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
623 const struct cpl_fw6_msg *p = (void *)rsp;
624
625 if (p->type == 0)
626 t4_handle_fw_rpl(q->adap, p->data);
627 } else if (opcode == CPL_L2T_WRITE_RPL) {
628 const struct cpl_l2t_write_rpl *p = (void *)rsp;
629
630 do_l2t_write_rpl(q->adap, p);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000631 } else if (opcode == CPL_SET_TCB_RPL) {
632 const struct cpl_set_tcb_rpl *p = (void *)rsp;
633
634 filter_rpl(q->adap, p);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000635 } else
636 dev_err(q->adap->pdev_dev,
637 "unexpected CPL %#x on FW event queue\n", opcode);
638 return 0;
639}
640
641/**
642 * uldrx_handler - response queue handler for ULD queues
643 * @q: the response queue that received the packet
644 * @rsp: the response queue descriptor holding the offload message
645 * @gl: the gather list of packet fragments
646 *
647 * Deliver an ingress offload packet to a ULD. All processing is done by
648 * the ULD, we just maintain statistics.
649 */
650static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
651 const struct pkt_gl *gl)
652{
653 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
654
655 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
656 rxq->stats.nomem++;
657 return -1;
658 }
659 if (gl == NULL)
660 rxq->stats.imm++;
661 else if (gl == CXGB4_MSG_AN)
662 rxq->stats.an++;
663 else
664 rxq->stats.pkts++;
665 return 0;
666}
667
668static void disable_msi(struct adapter *adapter)
669{
670 if (adapter->flags & USING_MSIX) {
671 pci_disable_msix(adapter->pdev);
672 adapter->flags &= ~USING_MSIX;
673 } else if (adapter->flags & USING_MSI) {
674 pci_disable_msi(adapter->pdev);
675 adapter->flags &= ~USING_MSI;
676 }
677}
678
679/*
680 * Interrupt handler for non-data events used with MSI-X.
681 */
682static irqreturn_t t4_nondata_intr(int irq, void *cookie)
683{
684 struct adapter *adap = cookie;
685
686 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
687 if (v & PFSW) {
688 adap->swintr = 1;
689 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
690 }
691 t4_slow_intr_handler(adap);
692 return IRQ_HANDLED;
693}
694
695/*
696 * Name the MSI-X interrupts.
697 */
698static void name_msix_vecs(struct adapter *adap)
699{
Dimitris Michailidisba278162010-12-14 21:36:50 +0000700 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000701
702 /* non-data interrupts */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000703 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000704
705 /* FW events */
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000706 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
707 adap->port[0]->name);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000708
709 /* Ethernet queues */
710 for_each_port(adap, j) {
711 struct net_device *d = adap->port[j];
712 const struct port_info *pi = netdev_priv(d);
713
Dimitris Michailidisba278162010-12-14 21:36:50 +0000714 for (i = 0; i < pi->nqsets; i++, msi_idx++)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000715 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
716 d->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000717 }
718
719 /* offload queues */
Dimitris Michailidisba278162010-12-14 21:36:50 +0000720 for_each_ofldrxq(&adap->sge, i)
721 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000722 adap->port[0]->name, i);
Dimitris Michailidisba278162010-12-14 21:36:50 +0000723
724 for_each_rdmarxq(&adap->sge, i)
725 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +0000726 adap->port[0]->name, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000727}
728
729static int request_msix_queue_irqs(struct adapter *adap)
730{
731 struct sge *s = &adap->sge;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000732 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000733
734 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
735 adap->msix_info[1].desc, &s->fw_evtq);
736 if (err)
737 return err;
738
739 for_each_ethrxq(s, ethqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000740 err = request_irq(adap->msix_info[msi_index].vec,
741 t4_sge_intr_msix, 0,
742 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000743 &s->ethrxq[ethqidx].rspq);
744 if (err)
745 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000746 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000747 }
748 for_each_ofldrxq(s, ofldqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000749 err = request_irq(adap->msix_info[msi_index].vec,
750 t4_sge_intr_msix, 0,
751 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000752 &s->ofldrxq[ofldqidx].rspq);
753 if (err)
754 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000755 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000756 }
757 for_each_rdmarxq(s, rdmaqidx) {
Vipul Pandya404d9e32012-10-08 02:59:43 +0000758 err = request_irq(adap->msix_info[msi_index].vec,
759 t4_sge_intr_msix, 0,
760 adap->msix_info[msi_index].desc,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000761 &s->rdmarxq[rdmaqidx].rspq);
762 if (err)
763 goto unwind;
Vipul Pandya404d9e32012-10-08 02:59:43 +0000764 msi_index++;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000765 }
766 return 0;
767
768unwind:
769 while (--rdmaqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000770 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000771 &s->rdmarxq[rdmaqidx].rspq);
772 while (--ofldqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000773 free_irq(adap->msix_info[--msi_index].vec,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000774 &s->ofldrxq[ofldqidx].rspq);
775 while (--ethqidx >= 0)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000776 free_irq(adap->msix_info[--msi_index].vec,
777 &s->ethrxq[ethqidx].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000778 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
779 return err;
780}
781
782static void free_msix_queue_irqs(struct adapter *adap)
783{
Vipul Pandya404d9e32012-10-08 02:59:43 +0000784 int i, msi_index = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000785 struct sge *s = &adap->sge;
786
787 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
788 for_each_ethrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000789 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000790 for_each_ofldrxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000791 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000792 for_each_rdmarxq(s, i)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000793 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000794}
795
796/**
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000797 * write_rss - write the RSS table for a given port
798 * @pi: the port
799 * @queues: array of queue indices for RSS
800 *
801 * Sets up the portion of the HW RSS table for the port's VI to distribute
802 * packets to the Rx queues in @queues.
803 */
804static int write_rss(const struct port_info *pi, const u16 *queues)
805{
806 u16 *rss;
807 int i, err;
808 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
809
810 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
811 if (!rss)
812 return -ENOMEM;
813
814 /* map the queue indices to queue ids */
815 for (i = 0; i < pi->rss_size; i++, queues++)
816 rss[i] = q[*queues].rspq.abs_id;
817
Dimitris Michailidis060e0c72010-08-02 13:19:21 +0000818 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
819 pi->rss_size, rss, pi->rss_size);
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000820 kfree(rss);
821 return err;
822}
823
824/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000825 * setup_rss - configure RSS
826 * @adap: the adapter
827 *
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000828 * Sets up RSS for each port.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000829 */
830static int setup_rss(struct adapter *adap)
831{
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000832 int i, err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000833
834 for_each_port(adap, i) {
835 const struct port_info *pi = adap2pinfo(adap, i);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000836
Dimitris Michailidis671b0062010-07-11 12:01:17 +0000837 err = write_rss(pi, pi->rss);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000838 if (err)
839 return err;
840 }
841 return 0;
842}
843
844/*
Dimitris Michailidise46dab42010-08-23 17:20:58 +0000845 * Return the channel of the ingress queue with the given qid.
846 */
847static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
848{
849 qid -= p->ingr_start;
850 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
851}
852
853/*
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +0000854 * Wait until all NAPI handlers are descheduled.
855 */
856static void quiesce_rx(struct adapter *adap)
857{
858 int i;
859
860 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
861 struct sge_rspq *q = adap->sge.ingr_map[i];
862
863 if (q && q->handler)
864 napi_disable(&q->napi);
865 }
866}
867
868/*
869 * Enable NAPI scheduling and interrupt generation for all Rx queues.
870 */
871static void enable_rx(struct adapter *adap)
872{
873 int i;
874
875 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
876 struct sge_rspq *q = adap->sge.ingr_map[i];
877
878 if (!q)
879 continue;
880 if (q->handler)
881 napi_enable(&q->napi);
882 /* 0-increment GTS to start the timer and enable interrupts */
883 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
884 SEINTARM(q->intr_params) |
885 INGRESSQID(q->cntxt_id));
886 }
887}
888
889/**
890 * setup_sge_queues - configure SGE Tx/Rx/response queues
891 * @adap: the adapter
892 *
893 * Determines how many sets of SGE queues to use and initializes them.
894 * We support multiple queue sets per port if we have MSI-X, otherwise
895 * just one queue set per port.
896 */
897static int setup_sge_queues(struct adapter *adap)
898{
899 int err, msi_idx, i, j;
900 struct sge *s = &adap->sge;
901
902 bitmap_zero(s->starving_fl, MAX_EGRQ);
903 bitmap_zero(s->txq_maperr, MAX_EGRQ);
904
905 if (adap->flags & USING_MSIX)
906 msi_idx = 1; /* vector 0 is for non-queue interrupts */
907 else {
908 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
909 NULL, NULL);
910 if (err)
911 return err;
912 msi_idx = -((int)s->intrq.abs_id + 1);
913 }
914
915 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
916 msi_idx, NULL, fwevtq_handler);
917 if (err) {
918freeout: t4_free_sge_resources(adap);
919 return err;
920 }
921
922 for_each_port(adap, i) {
923 struct net_device *dev = adap->port[i];
924 struct port_info *pi = netdev_priv(dev);
925 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
926 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
927
928 for (j = 0; j < pi->nqsets; j++, q++) {
929 if (msi_idx > 0)
930 msi_idx++;
931 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
932 msi_idx, &q->fl,
933 t4_ethrx_handler);
934 if (err)
935 goto freeout;
936 q->rspq.idx = j;
937 memset(&q->stats, 0, sizeof(q->stats));
938 }
939 for (j = 0; j < pi->nqsets; j++, t++) {
940 err = t4_sge_alloc_eth_txq(adap, t, dev,
941 netdev_get_tx_queue(dev, j),
942 s->fw_evtq.cntxt_id);
943 if (err)
944 goto freeout;
945 }
946 }
947
948 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
949 for_each_ofldrxq(s, i) {
950 struct sge_ofld_rxq *q = &s->ofldrxq[i];
951 struct net_device *dev = adap->port[i / j];
952
953 if (msi_idx > 0)
954 msi_idx++;
955 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
956 &q->fl, uldrx_handler);
957 if (err)
958 goto freeout;
959 memset(&q->stats, 0, sizeof(q->stats));
960 s->ofld_rxq[i] = q->rspq.abs_id;
961 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
962 s->fw_evtq.cntxt_id);
963 if (err)
964 goto freeout;
965 }
966
967 for_each_rdmarxq(s, i) {
968 struct sge_ofld_rxq *q = &s->rdmarxq[i];
969
970 if (msi_idx > 0)
971 msi_idx++;
972 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
973 msi_idx, &q->fl, uldrx_handler);
974 if (err)
975 goto freeout;
976 memset(&q->stats, 0, sizeof(q->stats));
977 s->rdma_rxq[i] = q->rspq.abs_id;
978 }
979
980 for_each_port(adap, i) {
981 /*
982 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
983 * have RDMA queues, and that's the right value.
984 */
985 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
986 s->fw_evtq.cntxt_id,
987 s->rdmarxq[i].rspq.cntxt_id);
988 if (err)
989 goto freeout;
990 }
991
992 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
993 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
994 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
995 return 0;
996}
997
998/*
999 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1000 * started but failed, and a negative errno if flash load couldn't start.
1001 */
1002static int upgrade_fw(struct adapter *adap)
1003{
1004 int ret;
1005 u32 vers;
1006 const struct fw_hdr *hdr;
1007 const struct firmware *fw;
1008 struct device *dev = adap->pdev_dev;
1009
1010 ret = request_firmware(&fw, FW_FNAME, dev);
1011 if (ret < 0) {
1012 dev_err(dev, "unable to load firmware image " FW_FNAME
1013 ", error %d\n", ret);
1014 return ret;
1015 }
1016
1017 hdr = (const struct fw_hdr *)fw->data;
1018 vers = ntohl(hdr->fw_ver);
1019 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
1020 ret = -EINVAL; /* wrong major version, won't do */
1021 goto out;
1022 }
1023
1024 /*
1025 * If the flash FW is unusable or we found something newer, load it.
1026 */
1027 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
1028 vers > adap->params.fw_vers) {
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00001029 dev_info(dev, "upgrading firmware ...\n");
1030 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1031 /*force=*/false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001032 if (!ret)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00001033 dev_info(dev, "firmware successfully upgraded to "
1034 FW_FNAME " (%d.%d.%d.%d)\n",
1035 FW_HDR_FW_VER_MAJOR_GET(vers),
1036 FW_HDR_FW_VER_MINOR_GET(vers),
1037 FW_HDR_FW_VER_MICRO_GET(vers),
1038 FW_HDR_FW_VER_BUILD_GET(vers));
1039 else
1040 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
Vipul Pandya1648a222012-09-26 02:39:41 +00001041 } else {
1042 /*
1043 * Tell our caller that we didn't upgrade the firmware.
1044 */
1045 ret = -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001046 }
Vipul Pandya1648a222012-09-26 02:39:41 +00001047
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001048out: release_firmware(fw);
1049 return ret;
1050}
1051
1052/*
1053 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1054 * The allocated memory is cleared.
1055 */
1056void *t4_alloc_mem(size_t size)
1057{
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001058 void *p = kzalloc(size, GFP_KERNEL);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001059
1060 if (!p)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001061 p = vzalloc(size);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001062 return p;
1063}
1064
1065/*
1066 * Free memory allocated through alloc_mem().
1067 */
stephen hemminger31b9c192010-10-18 05:39:18 +00001068static void t4_free_mem(void *addr)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001069{
1070 if (is_vmalloc_addr(addr))
1071 vfree(addr);
1072 else
1073 kfree(addr);
1074}
1075
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00001076/* Send a Work Request to write the filter at a specified index. We construct
1077 * a Firmware Filter Work Request to have the work done and put the indicated
1078 * filter into "pending" mode which will prevent any further actions against
1079 * it till we get a reply from the firmware on the completion status of the
1080 * request.
1081 */
1082static int set_filter_wr(struct adapter *adapter, int fidx)
1083{
1084 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1085 struct sk_buff *skb;
1086 struct fw_filter_wr *fwr;
1087 unsigned int ftid;
1088
1089 /* If the new filter requires loopback Destination MAC and/or VLAN
1090 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1091 * the filter.
1092 */
1093 if (f->fs.newdmac || f->fs.newvlan) {
1094 /* allocate L2T entry for new filter */
1095 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1096 if (f->l2t == NULL)
1097 return -EAGAIN;
1098 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1099 f->fs.eport, f->fs.dmac)) {
1100 cxgb4_l2t_release(f->l2t);
1101 f->l2t = NULL;
1102 return -ENOMEM;
1103 }
1104 }
1105
1106 ftid = adapter->tids.ftid_base + fidx;
1107
1108 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1109 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1110 memset(fwr, 0, sizeof(*fwr));
1111
1112 /* It would be nice to put most of the following in t4_hw.c but most
1113 * of the work is translating the cxgbtool ch_filter_specification
1114 * into the Work Request and the definition of that structure is
1115 * currently in cxgbtool.h which isn't appropriate to pull into the
1116 * common code. We may eventually try to come up with a more neutral
1117 * filter specification structure but for now it's easiest to simply
1118 * put this fairly direct code in line ...
1119 */
1120 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1121 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1122 fwr->tid_to_iq =
1123 htonl(V_FW_FILTER_WR_TID(ftid) |
1124 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1125 V_FW_FILTER_WR_NOREPLY(0) |
1126 V_FW_FILTER_WR_IQ(f->fs.iq));
1127 fwr->del_filter_to_l2tix =
1128 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1129 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1130 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1131 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1132 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1133 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1134 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1135 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1136 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1137 f->fs.newvlan == VLAN_REWRITE) |
1138 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1139 f->fs.newvlan == VLAN_REWRITE) |
1140 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1141 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1142 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1143 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1144 fwr->ethtype = htons(f->fs.val.ethtype);
1145 fwr->ethtypem = htons(f->fs.mask.ethtype);
1146 fwr->frag_to_ovlan_vldm =
1147 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1148 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1149 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1150 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1151 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1152 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1153 fwr->smac_sel = 0;
1154 fwr->rx_chan_rx_rpl_iq =
1155 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1156 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1157 fwr->maci_to_matchtypem =
1158 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1159 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1160 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1161 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1162 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1163 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1164 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1165 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1166 fwr->ptcl = f->fs.val.proto;
1167 fwr->ptclm = f->fs.mask.proto;
1168 fwr->ttyp = f->fs.val.tos;
1169 fwr->ttypm = f->fs.mask.tos;
1170 fwr->ivlan = htons(f->fs.val.ivlan);
1171 fwr->ivlanm = htons(f->fs.mask.ivlan);
1172 fwr->ovlan = htons(f->fs.val.ovlan);
1173 fwr->ovlanm = htons(f->fs.mask.ovlan);
1174 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1175 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1176 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1177 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1178 fwr->lp = htons(f->fs.val.lport);
1179 fwr->lpm = htons(f->fs.mask.lport);
1180 fwr->fp = htons(f->fs.val.fport);
1181 fwr->fpm = htons(f->fs.mask.fport);
1182 if (f->fs.newsmac)
1183 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1184
1185 /* Mark the filter as "pending" and ship off the Filter Work Request.
1186 * When we get the Work Request Reply we'll clear the pending status.
1187 */
1188 f->pending = 1;
1189 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1190 t4_ofld_send(adapter, skb);
1191 return 0;
1192}
1193
1194/* Delete the filter at a specified index.
1195 */
1196static int del_filter_wr(struct adapter *adapter, int fidx)
1197{
1198 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1199 struct sk_buff *skb;
1200 struct fw_filter_wr *fwr;
1201 unsigned int len, ftid;
1202
1203 len = sizeof(*fwr);
1204 ftid = adapter->tids.ftid_base + fidx;
1205
1206 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1207 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1208 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1209
1210 /* Mark the filter as "pending" and ship off the Filter Work Request.
1211 * When we get the Work Request Reply we'll clear the pending status.
1212 */
1213 f->pending = 1;
1214 t4_mgmt_tx(adapter, skb);
1215 return 0;
1216}
1217
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001218static inline int is_offload(const struct adapter *adap)
1219{
1220 return adap->params.offload;
1221}
1222
1223/*
1224 * Implementation of ethtool operations.
1225 */
1226
1227static u32 get_msglevel(struct net_device *dev)
1228{
1229 return netdev2adap(dev)->msg_enable;
1230}
1231
1232static void set_msglevel(struct net_device *dev, u32 val)
1233{
1234 netdev2adap(dev)->msg_enable = val;
1235}
1236
1237static char stats_strings[][ETH_GSTRING_LEN] = {
1238 "TxOctetsOK ",
1239 "TxFramesOK ",
1240 "TxBroadcastFrames ",
1241 "TxMulticastFrames ",
1242 "TxUnicastFrames ",
1243 "TxErrorFrames ",
1244
1245 "TxFrames64 ",
1246 "TxFrames65To127 ",
1247 "TxFrames128To255 ",
1248 "TxFrames256To511 ",
1249 "TxFrames512To1023 ",
1250 "TxFrames1024To1518 ",
1251 "TxFrames1519ToMax ",
1252
1253 "TxFramesDropped ",
1254 "TxPauseFrames ",
1255 "TxPPP0Frames ",
1256 "TxPPP1Frames ",
1257 "TxPPP2Frames ",
1258 "TxPPP3Frames ",
1259 "TxPPP4Frames ",
1260 "TxPPP5Frames ",
1261 "TxPPP6Frames ",
1262 "TxPPP7Frames ",
1263
1264 "RxOctetsOK ",
1265 "RxFramesOK ",
1266 "RxBroadcastFrames ",
1267 "RxMulticastFrames ",
1268 "RxUnicastFrames ",
1269
1270 "RxFramesTooLong ",
1271 "RxJabberErrors ",
1272 "RxFCSErrors ",
1273 "RxLengthErrors ",
1274 "RxSymbolErrors ",
1275 "RxRuntFrames ",
1276
1277 "RxFrames64 ",
1278 "RxFrames65To127 ",
1279 "RxFrames128To255 ",
1280 "RxFrames256To511 ",
1281 "RxFrames512To1023 ",
1282 "RxFrames1024To1518 ",
1283 "RxFrames1519ToMax ",
1284
1285 "RxPauseFrames ",
1286 "RxPPP0Frames ",
1287 "RxPPP1Frames ",
1288 "RxPPP2Frames ",
1289 "RxPPP3Frames ",
1290 "RxPPP4Frames ",
1291 "RxPPP5Frames ",
1292 "RxPPP6Frames ",
1293 "RxPPP7Frames ",
1294
1295 "RxBG0FramesDropped ",
1296 "RxBG1FramesDropped ",
1297 "RxBG2FramesDropped ",
1298 "RxBG3FramesDropped ",
1299 "RxBG0FramesTrunc ",
1300 "RxBG1FramesTrunc ",
1301 "RxBG2FramesTrunc ",
1302 "RxBG3FramesTrunc ",
1303
1304 "TSO ",
1305 "TxCsumOffload ",
1306 "RxCsumGood ",
1307 "VLANextractions ",
1308 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001309 "GROpackets ",
1310 "GROmerged ",
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001311};
1312
1313static int get_sset_count(struct net_device *dev, int sset)
1314{
1315 switch (sset) {
1316 case ETH_SS_STATS:
1317 return ARRAY_SIZE(stats_strings);
1318 default:
1319 return -EOPNOTSUPP;
1320 }
1321}
1322
1323#define T4_REGMAP_SIZE (160 * 1024)
1324
1325static int get_regs_len(struct net_device *dev)
1326{
1327 return T4_REGMAP_SIZE;
1328}
1329
1330static int get_eeprom_len(struct net_device *dev)
1331{
1332 return EEPROMSIZE;
1333}
1334
1335static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1336{
1337 struct adapter *adapter = netdev2adap(dev);
1338
Rick Jones23020ab2011-11-09 09:58:07 +00001339 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1340 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1341 strlcpy(info->bus_info, pci_name(adapter->pdev),
1342 sizeof(info->bus_info));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001343
Rick Jones84b40502011-11-21 10:54:05 +00001344 if (adapter->params.fw_vers)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001345 snprintf(info->fw_version, sizeof(info->fw_version),
1346 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1347 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1348 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1349 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1350 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1351 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1352 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1353 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1354 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1355}
1356
1357static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1358{
1359 if (stringset == ETH_SS_STATS)
1360 memcpy(data, stats_strings, sizeof(stats_strings));
1361}
1362
1363/*
1364 * port stats maintained per queue of the port. They should be in the same
1365 * order as in stats_strings above.
1366 */
1367struct queue_port_stats {
1368 u64 tso;
1369 u64 tx_csum;
1370 u64 rx_csum;
1371 u64 vlan_ex;
1372 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001373 u64 gro_pkts;
1374 u64 gro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001375};
1376
1377static void collect_sge_port_stats(const struct adapter *adap,
1378 const struct port_info *p, struct queue_port_stats *s)
1379{
1380 int i;
1381 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1382 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1383
1384 memset(s, 0, sizeof(*s));
1385 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1386 s->tso += tx->tso;
1387 s->tx_csum += tx->tx_cso;
1388 s->rx_csum += rx->stats.rx_cso;
1389 s->vlan_ex += rx->stats.vlan_ex;
1390 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +00001391 s->gro_pkts += rx->stats.lro_pkts;
1392 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001393 }
1394}
1395
1396static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1397 u64 *data)
1398{
1399 struct port_info *pi = netdev_priv(dev);
1400 struct adapter *adapter = pi->adapter;
1401
1402 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1403
1404 data += sizeof(struct port_stats) / sizeof(u64);
1405 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1406}
1407
1408/*
1409 * Return a version number to identify the type of adapter. The scheme is:
1410 * - bits 0..9: chip version
1411 * - bits 10..15: chip revision
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001412 * - bits 16..23: register dump version
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001413 */
1414static inline unsigned int mk_adap_vers(const struct adapter *ap)
1415{
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001416 return 4 | (ap->params.rev << 10) | (1 << 16);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001417}
1418
1419static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1420 unsigned int end)
1421{
1422 u32 *p = buf + start;
1423
1424 for ( ; start <= end; start += sizeof(u32))
1425 *p++ = t4_read_reg(ap, start);
1426}
1427
1428static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1429 void *buf)
1430{
1431 static const unsigned int reg_ranges[] = {
1432 0x1008, 0x1108,
1433 0x1180, 0x11b4,
1434 0x11fc, 0x123c,
1435 0x1300, 0x173c,
1436 0x1800, 0x18fc,
1437 0x3000, 0x30d8,
1438 0x30e0, 0x5924,
1439 0x5960, 0x59d4,
1440 0x5a00, 0x5af8,
1441 0x6000, 0x6098,
1442 0x6100, 0x6150,
1443 0x6200, 0x6208,
1444 0x6240, 0x6248,
1445 0x6280, 0x6338,
1446 0x6370, 0x638c,
1447 0x6400, 0x643c,
1448 0x6500, 0x6524,
1449 0x6a00, 0x6a38,
1450 0x6a60, 0x6a78,
1451 0x6b00, 0x6b84,
1452 0x6bf0, 0x6c84,
1453 0x6cf0, 0x6d84,
1454 0x6df0, 0x6e84,
1455 0x6ef0, 0x6f84,
1456 0x6ff0, 0x7084,
1457 0x70f0, 0x7184,
1458 0x71f0, 0x7284,
1459 0x72f0, 0x7384,
1460 0x73f0, 0x7450,
1461 0x7500, 0x7530,
1462 0x7600, 0x761c,
1463 0x7680, 0x76cc,
1464 0x7700, 0x7798,
1465 0x77c0, 0x77fc,
1466 0x7900, 0x79fc,
1467 0x7b00, 0x7c38,
1468 0x7d00, 0x7efc,
1469 0x8dc0, 0x8e1c,
1470 0x8e30, 0x8e78,
1471 0x8ea0, 0x8f6c,
1472 0x8fc0, 0x9074,
1473 0x90fc, 0x90fc,
1474 0x9400, 0x9458,
1475 0x9600, 0x96bc,
1476 0x9800, 0x9808,
1477 0x9820, 0x983c,
1478 0x9850, 0x9864,
1479 0x9c00, 0x9c6c,
1480 0x9c80, 0x9cec,
1481 0x9d00, 0x9d6c,
1482 0x9d80, 0x9dec,
1483 0x9e00, 0x9e6c,
1484 0x9e80, 0x9eec,
1485 0x9f00, 0x9f6c,
1486 0x9f80, 0x9fec,
1487 0xd004, 0xd03c,
1488 0xdfc0, 0xdfe0,
1489 0xe000, 0xea7c,
1490 0xf000, 0x11190,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001491 0x19040, 0x1906c,
1492 0x19078, 0x19080,
1493 0x1908c, 0x19124,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001494 0x19150, 0x191b0,
1495 0x191d0, 0x191e8,
1496 0x19238, 0x1924c,
1497 0x193f8, 0x19474,
1498 0x19490, 0x194f8,
1499 0x19800, 0x19f30,
1500 0x1a000, 0x1a06c,
1501 0x1a0b0, 0x1a120,
1502 0x1a128, 0x1a138,
1503 0x1a190, 0x1a1c4,
1504 0x1a1fc, 0x1a1fc,
1505 0x1e040, 0x1e04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001506 0x1e284, 0x1e28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001507 0x1e2c0, 0x1e2c0,
1508 0x1e2e0, 0x1e2e0,
1509 0x1e300, 0x1e384,
1510 0x1e3c0, 0x1e3c8,
1511 0x1e440, 0x1e44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001512 0x1e684, 0x1e68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001513 0x1e6c0, 0x1e6c0,
1514 0x1e6e0, 0x1e6e0,
1515 0x1e700, 0x1e784,
1516 0x1e7c0, 0x1e7c8,
1517 0x1e840, 0x1e84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001518 0x1ea84, 0x1ea8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001519 0x1eac0, 0x1eac0,
1520 0x1eae0, 0x1eae0,
1521 0x1eb00, 0x1eb84,
1522 0x1ebc0, 0x1ebc8,
1523 0x1ec40, 0x1ec4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001524 0x1ee84, 0x1ee8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001525 0x1eec0, 0x1eec0,
1526 0x1eee0, 0x1eee0,
1527 0x1ef00, 0x1ef84,
1528 0x1efc0, 0x1efc8,
1529 0x1f040, 0x1f04c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001530 0x1f284, 0x1f28c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001531 0x1f2c0, 0x1f2c0,
1532 0x1f2e0, 0x1f2e0,
1533 0x1f300, 0x1f384,
1534 0x1f3c0, 0x1f3c8,
1535 0x1f440, 0x1f44c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001536 0x1f684, 0x1f68c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001537 0x1f6c0, 0x1f6c0,
1538 0x1f6e0, 0x1f6e0,
1539 0x1f700, 0x1f784,
1540 0x1f7c0, 0x1f7c8,
1541 0x1f840, 0x1f84c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001542 0x1fa84, 0x1fa8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001543 0x1fac0, 0x1fac0,
1544 0x1fae0, 0x1fae0,
1545 0x1fb00, 0x1fb84,
1546 0x1fbc0, 0x1fbc8,
1547 0x1fc40, 0x1fc4c,
Dimitris Michailidis835bb602010-07-11 17:33:48 -07001548 0x1fe84, 0x1fe8c,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001549 0x1fec0, 0x1fec0,
1550 0x1fee0, 0x1fee0,
1551 0x1ff00, 0x1ff84,
1552 0x1ffc0, 0x1ffc8,
1553 0x20000, 0x2002c,
1554 0x20100, 0x2013c,
1555 0x20190, 0x201c8,
1556 0x20200, 0x20318,
1557 0x20400, 0x20528,
1558 0x20540, 0x20614,
1559 0x21000, 0x21040,
1560 0x2104c, 0x21060,
1561 0x210c0, 0x210ec,
1562 0x21200, 0x21268,
1563 0x21270, 0x21284,
1564 0x212fc, 0x21388,
1565 0x21400, 0x21404,
1566 0x21500, 0x21518,
1567 0x2152c, 0x2153c,
1568 0x21550, 0x21554,
1569 0x21600, 0x21600,
1570 0x21608, 0x21628,
1571 0x21630, 0x2163c,
1572 0x21700, 0x2171c,
1573 0x21780, 0x2178c,
1574 0x21800, 0x21c38,
1575 0x21c80, 0x21d7c,
1576 0x21e00, 0x21e04,
1577 0x22000, 0x2202c,
1578 0x22100, 0x2213c,
1579 0x22190, 0x221c8,
1580 0x22200, 0x22318,
1581 0x22400, 0x22528,
1582 0x22540, 0x22614,
1583 0x23000, 0x23040,
1584 0x2304c, 0x23060,
1585 0x230c0, 0x230ec,
1586 0x23200, 0x23268,
1587 0x23270, 0x23284,
1588 0x232fc, 0x23388,
1589 0x23400, 0x23404,
1590 0x23500, 0x23518,
1591 0x2352c, 0x2353c,
1592 0x23550, 0x23554,
1593 0x23600, 0x23600,
1594 0x23608, 0x23628,
1595 0x23630, 0x2363c,
1596 0x23700, 0x2371c,
1597 0x23780, 0x2378c,
1598 0x23800, 0x23c38,
1599 0x23c80, 0x23d7c,
1600 0x23e00, 0x23e04,
1601 0x24000, 0x2402c,
1602 0x24100, 0x2413c,
1603 0x24190, 0x241c8,
1604 0x24200, 0x24318,
1605 0x24400, 0x24528,
1606 0x24540, 0x24614,
1607 0x25000, 0x25040,
1608 0x2504c, 0x25060,
1609 0x250c0, 0x250ec,
1610 0x25200, 0x25268,
1611 0x25270, 0x25284,
1612 0x252fc, 0x25388,
1613 0x25400, 0x25404,
1614 0x25500, 0x25518,
1615 0x2552c, 0x2553c,
1616 0x25550, 0x25554,
1617 0x25600, 0x25600,
1618 0x25608, 0x25628,
1619 0x25630, 0x2563c,
1620 0x25700, 0x2571c,
1621 0x25780, 0x2578c,
1622 0x25800, 0x25c38,
1623 0x25c80, 0x25d7c,
1624 0x25e00, 0x25e04,
1625 0x26000, 0x2602c,
1626 0x26100, 0x2613c,
1627 0x26190, 0x261c8,
1628 0x26200, 0x26318,
1629 0x26400, 0x26528,
1630 0x26540, 0x26614,
1631 0x27000, 0x27040,
1632 0x2704c, 0x27060,
1633 0x270c0, 0x270ec,
1634 0x27200, 0x27268,
1635 0x27270, 0x27284,
1636 0x272fc, 0x27388,
1637 0x27400, 0x27404,
1638 0x27500, 0x27518,
1639 0x2752c, 0x2753c,
1640 0x27550, 0x27554,
1641 0x27600, 0x27600,
1642 0x27608, 0x27628,
1643 0x27630, 0x2763c,
1644 0x27700, 0x2771c,
1645 0x27780, 0x2778c,
1646 0x27800, 0x27c38,
1647 0x27c80, 0x27d7c,
1648 0x27e00, 0x27e04
1649 };
1650
1651 int i;
1652 struct adapter *ap = netdev2adap(dev);
1653
1654 regs->version = mk_adap_vers(ap);
1655
1656 memset(buf, 0, T4_REGMAP_SIZE);
1657 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1658 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1659}
1660
1661static int restart_autoneg(struct net_device *dev)
1662{
1663 struct port_info *p = netdev_priv(dev);
1664
1665 if (!netif_running(dev))
1666 return -EAGAIN;
1667 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1668 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001669 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001670 return 0;
1671}
1672
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001673static int identify_port(struct net_device *dev,
1674 enum ethtool_phys_id_state state)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001675{
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001676 unsigned int val;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001677 struct adapter *adap = netdev2adap(dev);
1678
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001679 if (state == ETHTOOL_ID_ACTIVE)
1680 val = 0xffff;
1681 else if (state == ETHTOOL_ID_INACTIVE)
1682 val = 0;
1683 else
1684 return -EINVAL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001685
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07001686 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001687}
1688
1689static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1690{
1691 unsigned int v = 0;
1692
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001693 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1694 type == FW_PORT_TYPE_BT_XAUI) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001695 v |= SUPPORTED_TP;
1696 if (caps & FW_PORT_CAP_SPEED_100M)
1697 v |= SUPPORTED_100baseT_Full;
1698 if (caps & FW_PORT_CAP_SPEED_1G)
1699 v |= SUPPORTED_1000baseT_Full;
1700 if (caps & FW_PORT_CAP_SPEED_10G)
1701 v |= SUPPORTED_10000baseT_Full;
1702 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1703 v |= SUPPORTED_Backplane;
1704 if (caps & FW_PORT_CAP_SPEED_1G)
1705 v |= SUPPORTED_1000baseKX_Full;
1706 if (caps & FW_PORT_CAP_SPEED_10G)
1707 v |= SUPPORTED_10000baseKX4_Full;
1708 } else if (type == FW_PORT_TYPE_KR)
1709 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001710 else if (type == FW_PORT_TYPE_BP_AP)
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00001711 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1712 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1713 else if (type == FW_PORT_TYPE_BP4_AP)
1714 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1715 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1716 SUPPORTED_10000baseKX4_Full;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001717 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1718 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001719 v |= SUPPORTED_FIBRE;
1720
1721 if (caps & FW_PORT_CAP_ANEG)
1722 v |= SUPPORTED_Autoneg;
1723 return v;
1724}
1725
1726static unsigned int to_fw_linkcaps(unsigned int caps)
1727{
1728 unsigned int v = 0;
1729
1730 if (caps & ADVERTISED_100baseT_Full)
1731 v |= FW_PORT_CAP_SPEED_100M;
1732 if (caps & ADVERTISED_1000baseT_Full)
1733 v |= FW_PORT_CAP_SPEED_1G;
1734 if (caps & ADVERTISED_10000baseT_Full)
1735 v |= FW_PORT_CAP_SPEED_10G;
1736 return v;
1737}
1738
1739static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1740{
1741 const struct port_info *p = netdev_priv(dev);
1742
1743 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001744 p->port_type == FW_PORT_TYPE_BT_XFI ||
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001745 p->port_type == FW_PORT_TYPE_BT_XAUI)
1746 cmd->port = PORT_TP;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001747 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1748 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001749 cmd->port = PORT_FIBRE;
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00001750 else if (p->port_type == FW_PORT_TYPE_SFP) {
1751 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1752 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1753 cmd->port = PORT_DA;
1754 else
1755 cmd->port = PORT_FIBRE;
1756 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001757 cmd->port = PORT_OTHER;
1758
1759 if (p->mdio_addr >= 0) {
1760 cmd->phy_address = p->mdio_addr;
1761 cmd->transceiver = XCVR_EXTERNAL;
1762 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1763 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1764 } else {
1765 cmd->phy_address = 0; /* not really, but no better option */
1766 cmd->transceiver = XCVR_INTERNAL;
1767 cmd->mdio_support = 0;
1768 }
1769
1770 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1771 cmd->advertising = from_fw_linkcaps(p->port_type,
1772 p->link_cfg.advertising);
David Decotigny70739492011-04-27 18:32:40 +00001773 ethtool_cmd_speed_set(cmd,
1774 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001775 cmd->duplex = DUPLEX_FULL;
1776 cmd->autoneg = p->link_cfg.autoneg;
1777 cmd->maxtxpkt = 0;
1778 cmd->maxrxpkt = 0;
1779 return 0;
1780}
1781
1782static unsigned int speed_to_caps(int speed)
1783{
1784 if (speed == SPEED_100)
1785 return FW_PORT_CAP_SPEED_100M;
1786 if (speed == SPEED_1000)
1787 return FW_PORT_CAP_SPEED_1G;
1788 if (speed == SPEED_10000)
1789 return FW_PORT_CAP_SPEED_10G;
1790 return 0;
1791}
1792
1793static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1794{
1795 unsigned int cap;
1796 struct port_info *p = netdev_priv(dev);
1797 struct link_config *lc = &p->link_cfg;
David Decotigny25db0332011-04-27 18:32:39 +00001798 u32 speed = ethtool_cmd_speed(cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001799
1800 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1801 return -EINVAL;
1802
1803 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1804 /*
1805 * PHY offers a single speed. See if that's what's
1806 * being requested.
1807 */
1808 if (cmd->autoneg == AUTONEG_DISABLE &&
David Decotigny25db0332011-04-27 18:32:39 +00001809 (lc->supported & speed_to_caps(speed)))
1810 return 0;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001811 return -EINVAL;
1812 }
1813
1814 if (cmd->autoneg == AUTONEG_DISABLE) {
David Decotigny25db0332011-04-27 18:32:39 +00001815 cap = speed_to_caps(speed);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001816
David Decotigny25db0332011-04-27 18:32:39 +00001817 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1818 (speed == SPEED_10000))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001819 return -EINVAL;
1820 lc->requested_speed = cap;
1821 lc->advertising = 0;
1822 } else {
1823 cap = to_fw_linkcaps(cmd->advertising);
1824 if (!(lc->supported & cap))
1825 return -EINVAL;
1826 lc->requested_speed = 0;
1827 lc->advertising = cap | FW_PORT_CAP_ANEG;
1828 }
1829 lc->autoneg = cmd->autoneg;
1830
1831 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001832 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1833 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001834 return 0;
1835}
1836
1837static void get_pauseparam(struct net_device *dev,
1838 struct ethtool_pauseparam *epause)
1839{
1840 struct port_info *p = netdev_priv(dev);
1841
1842 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1843 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1844 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1845}
1846
1847static int set_pauseparam(struct net_device *dev,
1848 struct ethtool_pauseparam *epause)
1849{
1850 struct port_info *p = netdev_priv(dev);
1851 struct link_config *lc = &p->link_cfg;
1852
1853 if (epause->autoneg == AUTONEG_DISABLE)
1854 lc->requested_fc = 0;
1855 else if (lc->supported & FW_PORT_CAP_ANEG)
1856 lc->requested_fc = PAUSE_AUTONEG;
1857 else
1858 return -EINVAL;
1859
1860 if (epause->rx_pause)
1861 lc->requested_fc |= PAUSE_RX;
1862 if (epause->tx_pause)
1863 lc->requested_fc |= PAUSE_TX;
1864 if (netif_running(dev))
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001865 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1866 lc);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001867 return 0;
1868}
1869
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001870static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1871{
1872 const struct port_info *pi = netdev_priv(dev);
1873 const struct sge *s = &pi->adapter->sge;
1874
1875 e->rx_max_pending = MAX_RX_BUFFERS;
1876 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1877 e->rx_jumbo_max_pending = 0;
1878 e->tx_max_pending = MAX_TXQ_ENTRIES;
1879
1880 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1881 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1882 e->rx_jumbo_pending = 0;
1883 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1884}
1885
1886static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1887{
1888 int i;
1889 const struct port_info *pi = netdev_priv(dev);
1890 struct adapter *adapter = pi->adapter;
1891 struct sge *s = &adapter->sge;
1892
1893 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1894 e->tx_pending > MAX_TXQ_ENTRIES ||
1895 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1896 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1897 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1898 return -EINVAL;
1899
1900 if (adapter->flags & FULL_INIT_DONE)
1901 return -EBUSY;
1902
1903 for (i = 0; i < pi->nqsets; ++i) {
1904 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1905 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1906 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1907 }
1908 return 0;
1909}
1910
1911static int closest_timer(const struct sge *s, int time)
1912{
1913 int i, delta, match = 0, min_delta = INT_MAX;
1914
1915 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1916 delta = time - s->timer_val[i];
1917 if (delta < 0)
1918 delta = -delta;
1919 if (delta < min_delta) {
1920 min_delta = delta;
1921 match = i;
1922 }
1923 }
1924 return match;
1925}
1926
1927static int closest_thres(const struct sge *s, int thres)
1928{
1929 int i, delta, match = 0, min_delta = INT_MAX;
1930
1931 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1932 delta = thres - s->counter_val[i];
1933 if (delta < 0)
1934 delta = -delta;
1935 if (delta < min_delta) {
1936 min_delta = delta;
1937 match = i;
1938 }
1939 }
1940 return match;
1941}
1942
1943/*
1944 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1945 */
1946static unsigned int qtimer_val(const struct adapter *adap,
1947 const struct sge_rspq *q)
1948{
1949 unsigned int idx = q->intr_params >> 1;
1950
1951 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1952}
1953
1954/**
1955 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1956 * @adap: the adapter
1957 * @q: the Rx queue
1958 * @us: the hold-off time in us, or 0 to disable timer
1959 * @cnt: the hold-off packet count, or 0 to disable counter
1960 *
1961 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1962 * one of the two needs to be enabled for the queue to generate interrupts.
1963 */
1964static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1965 unsigned int us, unsigned int cnt)
1966{
1967 if ((us | cnt) == 0)
1968 cnt = 1;
1969
1970 if (cnt) {
1971 int err;
1972 u32 v, new_idx;
1973
1974 new_idx = closest_thres(&adap->sge, cnt);
1975 if (q->desc && q->pktcnt_idx != new_idx) {
1976 /* the queue has already been created, update it */
1977 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1978 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1979 FW_PARAMS_PARAM_YZ(q->cntxt_id);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00001980 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1981 &new_idx);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00001982 if (err)
1983 return err;
1984 }
1985 q->pktcnt_idx = new_idx;
1986 }
1987
1988 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1989 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1990 return 0;
1991}
1992
1993static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1994{
1995 const struct port_info *pi = netdev_priv(dev);
1996 struct adapter *adap = pi->adapter;
1997
1998 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1999 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
2000}
2001
2002static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2003{
2004 const struct port_info *pi = netdev_priv(dev);
2005 const struct adapter *adap = pi->adapter;
2006 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2007
2008 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2009 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2010 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2011 return 0;
2012}
2013
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002014/**
2015 * eeprom_ptov - translate a physical EEPROM address to virtual
2016 * @phys_addr: the physical EEPROM address
2017 * @fn: the PCI function number
2018 * @sz: size of function-specific area
2019 *
2020 * Translate a physical EEPROM address to virtual. The first 1K is
2021 * accessed through virtual addresses starting at 31K, the rest is
2022 * accessed through virtual addresses starting at 0.
2023 *
2024 * The mapping is as follows:
2025 * [0..1K) -> [31K..32K)
2026 * [1K..1K+A) -> [31K-A..31K)
2027 * [1K+A..ES) -> [0..ES-A-1K)
2028 *
2029 * where A = @fn * @sz, and ES = EEPROM size.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002030 */
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002031static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002032{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002033 fn *= sz;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002034 if (phys_addr < 1024)
2035 return phys_addr + (31 << 10);
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002036 if (phys_addr < 1024 + fn)
2037 return 31744 - fn + phys_addr - 1024;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002038 if (phys_addr < EEPROMSIZE)
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002039 return phys_addr - 1024 - fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002040 return -EINVAL;
2041}
2042
2043/*
2044 * The next two routines implement eeprom read/write from physical addresses.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002045 */
2046static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2047{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002048 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002049
2050 if (vaddr >= 0)
2051 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2052 return vaddr < 0 ? vaddr : 0;
2053}
2054
2055static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2056{
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002057 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002058
2059 if (vaddr >= 0)
2060 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2061 return vaddr < 0 ? vaddr : 0;
2062}
2063
2064#define EEPROM_MAGIC 0x38E2F10C
2065
2066static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2067 u8 *data)
2068{
2069 int i, err = 0;
2070 struct adapter *adapter = netdev2adap(dev);
2071
2072 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2073 if (!buf)
2074 return -ENOMEM;
2075
2076 e->magic = EEPROM_MAGIC;
2077 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2078 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2079
2080 if (!err)
2081 memcpy(data, buf + e->offset, e->len);
2082 kfree(buf);
2083 return err;
2084}
2085
2086static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2087 u8 *data)
2088{
2089 u8 *buf;
2090 int err = 0;
2091 u32 aligned_offset, aligned_len, *p;
2092 struct adapter *adapter = netdev2adap(dev);
2093
2094 if (eeprom->magic != EEPROM_MAGIC)
2095 return -EINVAL;
2096
2097 aligned_offset = eeprom->offset & ~3;
2098 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2099
Dimitris Michailidis1478b3e2010-08-23 17:20:59 +00002100 if (adapter->fn > 0) {
2101 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2102
2103 if (aligned_offset < start ||
2104 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2105 return -EPERM;
2106 }
2107
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002108 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2109 /*
2110 * RMW possibly needed for first or last words.
2111 */
2112 buf = kmalloc(aligned_len, GFP_KERNEL);
2113 if (!buf)
2114 return -ENOMEM;
2115 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2116 if (!err && aligned_len > 4)
2117 err = eeprom_rd_phys(adapter,
2118 aligned_offset + aligned_len - 4,
2119 (u32 *)&buf[aligned_len - 4]);
2120 if (err)
2121 goto out;
2122 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2123 } else
2124 buf = data;
2125
2126 err = t4_seeprom_wp(adapter, false);
2127 if (err)
2128 goto out;
2129
2130 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2131 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2132 aligned_offset += 4;
2133 }
2134
2135 if (!err)
2136 err = t4_seeprom_wp(adapter, true);
2137out:
2138 if (buf != data)
2139 kfree(buf);
2140 return err;
2141}
2142
2143static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2144{
2145 int ret;
2146 const struct firmware *fw;
2147 struct adapter *adap = netdev2adap(netdev);
2148
2149 ef->data[sizeof(ef->data) - 1] = '\0';
2150 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2151 if (ret < 0)
2152 return ret;
2153
2154 ret = t4_load_fw(adap, fw->data, fw->size);
2155 release_firmware(fw);
2156 if (!ret)
2157 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2158 return ret;
2159}
2160
2161#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2162#define BCAST_CRC 0xa0ccc1a6
2163
2164static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2165{
2166 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2167 wol->wolopts = netdev2adap(dev)->wol;
2168 memset(&wol->sopass, 0, sizeof(wol->sopass));
2169}
2170
2171static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2172{
2173 int err = 0;
2174 struct port_info *pi = netdev_priv(dev);
2175
2176 if (wol->wolopts & ~WOL_SUPPORTED)
2177 return -EINVAL;
2178 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2179 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2180 if (wol->wolopts & WAKE_BCAST) {
2181 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2182 ~0ULL, 0, false);
2183 if (!err)
2184 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2185 ~6ULL, ~0ULL, BCAST_CRC, true);
2186 } else
2187 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2188 return err;
2189}
2190
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002191static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002192{
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002193 const struct port_info *pi = netdev_priv(dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002194 netdev_features_t changed = dev->features ^ features;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002195 int err;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002196
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002197 if (!(changed & NETIF_F_HW_VLAN_RX))
2198 return 0;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002199
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00002200 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2201 -1, -1, -1,
2202 !!(features & NETIF_F_HW_VLAN_RX), true);
2203 if (unlikely(err))
2204 dev->features = features ^ NETIF_F_HW_VLAN_RX;
Dimitris Michailidis19ecae22010-10-21 11:29:56 +00002205 return err;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07002206}
2207
Ben Hutchings7850f632011-12-15 13:55:01 +00002208static u32 get_rss_table_size(struct net_device *dev)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002209{
2210 const struct port_info *pi = netdev_priv(dev);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002211
Ben Hutchings7850f632011-12-15 13:55:01 +00002212 return pi->rss_size;
2213}
2214
2215static int get_rss_table(struct net_device *dev, u32 *p)
2216{
2217 const struct port_info *pi = netdev_priv(dev);
2218 unsigned int n = pi->rss_size;
2219
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002220 while (n--)
Ben Hutchings7850f632011-12-15 13:55:01 +00002221 p[n] = pi->rss[n];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002222 return 0;
2223}
2224
Ben Hutchings7850f632011-12-15 13:55:01 +00002225static int set_rss_table(struct net_device *dev, const u32 *p)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002226{
2227 unsigned int i;
2228 struct port_info *pi = netdev_priv(dev);
2229
Ben Hutchings7850f632011-12-15 13:55:01 +00002230 for (i = 0; i < pi->rss_size; i++)
2231 pi->rss[i] = p[i];
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002232 if (pi->adapter->flags & FULL_INIT_DONE)
2233 return write_rss(pi, pi->rss);
2234 return 0;
2235}
2236
2237static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
Ben Hutchings815c7db2011-09-06 13:49:12 +00002238 u32 *rules)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002239{
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002240 const struct port_info *pi = netdev_priv(dev);
2241
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002242 switch (info->cmd) {
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002243 case ETHTOOL_GRXFH: {
2244 unsigned int v = pi->rss_mode;
2245
2246 info->data = 0;
2247 switch (info->flow_type) {
2248 case TCP_V4_FLOW:
2249 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2250 info->data = RXH_IP_SRC | RXH_IP_DST |
2251 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2252 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2253 info->data = RXH_IP_SRC | RXH_IP_DST;
2254 break;
2255 case UDP_V4_FLOW:
2256 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2257 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2258 info->data = RXH_IP_SRC | RXH_IP_DST |
2259 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2260 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2261 info->data = RXH_IP_SRC | RXH_IP_DST;
2262 break;
2263 case SCTP_V4_FLOW:
2264 case AH_ESP_V4_FLOW:
2265 case IPV4_FLOW:
2266 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2267 info->data = RXH_IP_SRC | RXH_IP_DST;
2268 break;
2269 case TCP_V6_FLOW:
2270 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2271 info->data = RXH_IP_SRC | RXH_IP_DST |
2272 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2273 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2274 info->data = RXH_IP_SRC | RXH_IP_DST;
2275 break;
2276 case UDP_V6_FLOW:
2277 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2278 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2279 info->data = RXH_IP_SRC | RXH_IP_DST |
2280 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2281 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2282 info->data = RXH_IP_SRC | RXH_IP_DST;
2283 break;
2284 case SCTP_V6_FLOW:
2285 case AH_ESP_V6_FLOW:
2286 case IPV6_FLOW:
2287 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2288 info->data = RXH_IP_SRC | RXH_IP_DST;
2289 break;
2290 }
2291 return 0;
2292 }
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002293 case ETHTOOL_GRXRINGS:
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002294 info->data = pi->nqsets;
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002295 return 0;
2296 }
2297 return -EOPNOTSUPP;
2298}
2299
stephen hemminger9b07be42012-01-04 12:59:49 +00002300static const struct ethtool_ops cxgb_ethtool_ops = {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002301 .get_settings = get_settings,
2302 .set_settings = set_settings,
2303 .get_drvinfo = get_drvinfo,
2304 .get_msglevel = get_msglevel,
2305 .set_msglevel = set_msglevel,
2306 .get_ringparam = get_sge_param,
2307 .set_ringparam = set_sge_param,
2308 .get_coalesce = get_coalesce,
2309 .set_coalesce = set_coalesce,
2310 .get_eeprom_len = get_eeprom_len,
2311 .get_eeprom = get_eeprom,
2312 .set_eeprom = set_eeprom,
2313 .get_pauseparam = get_pauseparam,
2314 .set_pauseparam = set_pauseparam,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002315 .get_link = ethtool_op_get_link,
2316 .get_strings = get_strings,
Dimitris Michailidisc5e06362011-04-08 13:06:25 -07002317 .set_phys_id = identify_port,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002318 .nway_reset = restart_autoneg,
2319 .get_sset_count = get_sset_count,
2320 .get_ethtool_stats = get_stats,
2321 .get_regs_len = get_regs_len,
2322 .get_regs = get_regs,
2323 .get_wol = get_wol,
2324 .set_wol = set_wol,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002325 .get_rxnfc = get_rxnfc,
Ben Hutchings7850f632011-12-15 13:55:01 +00002326 .get_rxfh_indir_size = get_rss_table_size,
Dimitris Michailidis671b0062010-07-11 12:01:17 +00002327 .get_rxfh_indir = get_rss_table,
2328 .set_rxfh_indir = set_rss_table,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002329 .flash_device = set_flash,
2330};
2331
2332/*
2333 * debugfs support
2334 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002335static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2336 loff_t *ppos)
2337{
2338 loff_t pos = *ppos;
Al Viro496ad9a2013-01-23 17:07:38 -05002339 loff_t avail = file_inode(file)->i_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002340 unsigned int mem = (uintptr_t)file->private_data & 3;
2341 struct adapter *adap = file->private_data - mem;
2342
2343 if (pos < 0)
2344 return -EINVAL;
2345 if (pos >= avail)
2346 return 0;
2347 if (count > avail - pos)
2348 count = avail - pos;
2349
2350 while (count) {
2351 size_t len;
2352 int ret, ofst;
2353 __be32 data[16];
2354
2355 if (mem == MEM_MC)
2356 ret = t4_mc_read(adap, pos, data, NULL);
2357 else
2358 ret = t4_edc_read(adap, mem, pos, data, NULL);
2359 if (ret)
2360 return ret;
2361
2362 ofst = pos % sizeof(data);
2363 len = min(count, sizeof(data) - ofst);
2364 if (copy_to_user(buf, (u8 *)data + ofst, len))
2365 return -EFAULT;
2366
2367 buf += len;
2368 pos += len;
2369 count -= len;
2370 }
2371 count = pos - *ppos;
2372 *ppos = pos;
2373 return count;
2374}
2375
2376static const struct file_operations mem_debugfs_fops = {
2377 .owner = THIS_MODULE,
Stephen Boyd234e3402012-04-05 14:25:11 -07002378 .open = simple_open,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002379 .read = mem_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02002380 .llseek = default_llseek,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002381};
2382
Bill Pemberton91744942012-12-03 09:23:02 -05002383static void add_debugfs_mem(struct adapter *adap, const char *name,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002384 unsigned int idx, unsigned int size_mb)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002385{
2386 struct dentry *de;
2387
2388 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2389 (void *)adap + idx, &mem_debugfs_fops);
2390 if (de && de->d_inode)
2391 de->d_inode->i_size = size_mb << 20;
2392}
2393
Bill Pemberton91744942012-12-03 09:23:02 -05002394static int setup_debugfs(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002395{
2396 int i;
2397
2398 if (IS_ERR_OR_NULL(adap->debugfs_root))
2399 return -1;
2400
2401 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2402 if (i & EDRAM0_ENABLE)
2403 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2404 if (i & EDRAM1_ENABLE)
2405 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2406 if (i & EXT_MEM_ENABLE)
2407 add_debugfs_mem(adap, "mc", MEM_MC,
2408 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2409 if (adap->l2t)
2410 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2411 &t4_l2t_fops);
2412 return 0;
2413}
2414
2415/*
2416 * upper-layer driver support
2417 */
2418
2419/*
2420 * Allocate an active-open TID and set it to the supplied value.
2421 */
2422int cxgb4_alloc_atid(struct tid_info *t, void *data)
2423{
2424 int atid = -1;
2425
2426 spin_lock_bh(&t->atid_lock);
2427 if (t->afree) {
2428 union aopen_entry *p = t->afree;
2429
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002430 atid = (p - t->atid_tab) + t->atid_base;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002431 t->afree = p->next;
2432 p->data = data;
2433 t->atids_in_use++;
2434 }
2435 spin_unlock_bh(&t->atid_lock);
2436 return atid;
2437}
2438EXPORT_SYMBOL(cxgb4_alloc_atid);
2439
2440/*
2441 * Release an active-open TID.
2442 */
2443void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2444{
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002445 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002446
2447 spin_lock_bh(&t->atid_lock);
2448 p->next = t->afree;
2449 t->afree = p;
2450 t->atids_in_use--;
2451 spin_unlock_bh(&t->atid_lock);
2452}
2453EXPORT_SYMBOL(cxgb4_free_atid);
2454
2455/*
2456 * Allocate a server TID and set it to the supplied value.
2457 */
2458int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2459{
2460 int stid;
2461
2462 spin_lock_bh(&t->stid_lock);
2463 if (family == PF_INET) {
2464 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2465 if (stid < t->nstids)
2466 __set_bit(stid, t->stid_bmap);
2467 else
2468 stid = -1;
2469 } else {
2470 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2471 if (stid < 0)
2472 stid = -1;
2473 }
2474 if (stid >= 0) {
2475 t->stid_tab[stid].data = data;
2476 stid += t->stid_base;
2477 t->stids_in_use++;
2478 }
2479 spin_unlock_bh(&t->stid_lock);
2480 return stid;
2481}
2482EXPORT_SYMBOL(cxgb4_alloc_stid);
2483
Vipul Pandyadca4fae2012-12-10 09:30:53 +00002484/* Allocate a server filter TID and set it to the supplied value.
2485 */
2486int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2487{
2488 int stid;
2489
2490 spin_lock_bh(&t->stid_lock);
2491 if (family == PF_INET) {
2492 stid = find_next_zero_bit(t->stid_bmap,
2493 t->nstids + t->nsftids, t->nstids);
2494 if (stid < (t->nstids + t->nsftids))
2495 __set_bit(stid, t->stid_bmap);
2496 else
2497 stid = -1;
2498 } else {
2499 stid = -1;
2500 }
2501 if (stid >= 0) {
2502 t->stid_tab[stid].data = data;
2503 stid += t->stid_base;
2504 t->stids_in_use++;
2505 }
2506 spin_unlock_bh(&t->stid_lock);
2507 return stid;
2508}
2509EXPORT_SYMBOL(cxgb4_alloc_sftid);
2510
2511/* Release a server TID.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002512 */
2513void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2514{
2515 stid -= t->stid_base;
2516 spin_lock_bh(&t->stid_lock);
2517 if (family == PF_INET)
2518 __clear_bit(stid, t->stid_bmap);
2519 else
2520 bitmap_release_region(t->stid_bmap, stid, 2);
2521 t->stid_tab[stid].data = NULL;
2522 t->stids_in_use--;
2523 spin_unlock_bh(&t->stid_lock);
2524}
2525EXPORT_SYMBOL(cxgb4_free_stid);
2526
2527/*
2528 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2529 */
2530static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2531 unsigned int tid)
2532{
2533 struct cpl_tid_release *req;
2534
2535 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2536 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2537 INIT_TP_WR(req, tid);
2538 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2539}
2540
2541/*
2542 * Queue a TID release request and if necessary schedule a work queue to
2543 * process it.
2544 */
stephen hemminger31b9c192010-10-18 05:39:18 +00002545static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2546 unsigned int tid)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002547{
2548 void **p = &t->tid_tab[tid];
2549 struct adapter *adap = container_of(t, struct adapter, tids);
2550
2551 spin_lock_bh(&adap->tid_release_lock);
2552 *p = adap->tid_release_head;
2553 /* Low 2 bits encode the Tx channel number */
2554 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2555 if (!adap->tid_release_task_busy) {
2556 adap->tid_release_task_busy = true;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302557 queue_work(workq, &adap->tid_release_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002558 }
2559 spin_unlock_bh(&adap->tid_release_lock);
2560}
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002561
2562/*
2563 * Process the list of pending TID release requests.
2564 */
2565static void process_tid_release_list(struct work_struct *work)
2566{
2567 struct sk_buff *skb;
2568 struct adapter *adap;
2569
2570 adap = container_of(work, struct adapter, tid_release_task);
2571
2572 spin_lock_bh(&adap->tid_release_lock);
2573 while (adap->tid_release_head) {
2574 void **p = adap->tid_release_head;
2575 unsigned int chan = (uintptr_t)p & 3;
2576 p = (void *)p - chan;
2577
2578 adap->tid_release_head = *p;
2579 *p = NULL;
2580 spin_unlock_bh(&adap->tid_release_lock);
2581
2582 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2583 GFP_KERNEL)))
2584 schedule_timeout_uninterruptible(1);
2585
2586 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2587 t4_ofld_send(adap, skb);
2588 spin_lock_bh(&adap->tid_release_lock);
2589 }
2590 adap->tid_release_task_busy = false;
2591 spin_unlock_bh(&adap->tid_release_lock);
2592}
2593
2594/*
2595 * Release a TID and inform HW. If we are unable to allocate the release
2596 * message we defer to a work queue.
2597 */
2598void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2599{
2600 void *old;
2601 struct sk_buff *skb;
2602 struct adapter *adap = container_of(t, struct adapter, tids);
2603
2604 old = t->tid_tab[tid];
2605 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2606 if (likely(skb)) {
2607 t->tid_tab[tid] = NULL;
2608 mk_tid_release(skb, chan, tid);
2609 t4_ofld_send(adap, skb);
2610 } else
2611 cxgb4_queue_tid_release(t, chan, tid);
2612 if (old)
2613 atomic_dec(&t->tids_in_use);
2614}
2615EXPORT_SYMBOL(cxgb4_remove_tid);
2616
2617/*
2618 * Allocate and initialize the TID tables. Returns 0 on success.
2619 */
2620static int tid_init(struct tid_info *t)
2621{
2622 size_t size;
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002623 unsigned int stid_bmap_size;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002624 unsigned int natids = t->natids;
2625
Vipul Pandyadca4fae2012-12-10 09:30:53 +00002626 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002627 size = t->ntids * sizeof(*t->tid_tab) +
2628 natids * sizeof(*t->atid_tab) +
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002629 t->nstids * sizeof(*t->stid_tab) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00002630 t->nsftids * sizeof(*t->stid_tab) +
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002631 stid_bmap_size * sizeof(long) +
Vipul Pandyadca4fae2012-12-10 09:30:53 +00002632 t->nftids * sizeof(*t->ftid_tab) +
2633 t->nsftids * sizeof(*t->ftid_tab);
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002634
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002635 t->tid_tab = t4_alloc_mem(size);
2636 if (!t->tid_tab)
2637 return -ENOMEM;
2638
2639 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2640 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00002641 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002642 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002643 spin_lock_init(&t->stid_lock);
2644 spin_lock_init(&t->atid_lock);
2645
2646 t->stids_in_use = 0;
2647 t->afree = NULL;
2648 t->atids_in_use = 0;
2649 atomic_set(&t->tids_in_use, 0);
2650
2651 /* Setup the free list for atid_tab and clear the stid bitmap. */
2652 if (natids) {
2653 while (--natids)
2654 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2655 t->afree = t->atid_tab;
2656 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00002657 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002658 return 0;
2659}
2660
2661/**
2662 * cxgb4_create_server - create an IP server
2663 * @dev: the device
2664 * @stid: the server TID
2665 * @sip: local IP address to bind server to
2666 * @sport: the server's TCP port
2667 * @queue: queue to direct messages from this server to
2668 *
2669 * Create an IP server for the given port and address.
2670 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2671 */
2672int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00002673 __be32 sip, __be16 sport, __be16 vlan,
2674 unsigned int queue)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002675{
2676 unsigned int chan;
2677 struct sk_buff *skb;
2678 struct adapter *adap;
2679 struct cpl_pass_open_req *req;
2680
2681 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2682 if (!skb)
2683 return -ENOMEM;
2684
2685 adap = netdev2adap(dev);
2686 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2687 INIT_TP_WR(req, 0);
2688 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2689 req->local_port = sport;
2690 req->peer_port = htons(0);
2691 req->local_ip = sip;
2692 req->peer_ip = htonl(0);
Dimitris Michailidise46dab42010-08-23 17:20:58 +00002693 chan = rxq_to_chan(&adap->sge, queue);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002694 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2695 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2696 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2697 return t4_mgmt_tx(adap, skb);
2698}
2699EXPORT_SYMBOL(cxgb4_create_server);
2700
2701/**
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002702 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2703 * @mtus: the HW MTU table
2704 * @mtu: the target MTU
2705 * @idx: index of selected entry in the MTU table
2706 *
2707 * Returns the index and the value in the HW MTU table that is closest to
2708 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2709 * table, in which case that smallest available value is selected.
2710 */
2711unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2712 unsigned int *idx)
2713{
2714 unsigned int i = 0;
2715
2716 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2717 ++i;
2718 if (idx)
2719 *idx = i;
2720 return mtus[i];
2721}
2722EXPORT_SYMBOL(cxgb4_best_mtu);
2723
2724/**
2725 * cxgb4_port_chan - get the HW channel of a port
2726 * @dev: the net device for the port
2727 *
2728 * Return the HW Tx channel of the given port.
2729 */
2730unsigned int cxgb4_port_chan(const struct net_device *dev)
2731{
2732 return netdev2pinfo(dev)->tx_chan;
2733}
2734EXPORT_SYMBOL(cxgb4_port_chan);
2735
Vipul Pandya881806b2012-05-18 15:29:24 +05302736unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2737{
2738 struct adapter *adap = netdev2adap(dev);
2739 u32 v;
2740
2741 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2742 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
2743}
2744EXPORT_SYMBOL(cxgb4_dbfifo_count);
2745
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002746/**
2747 * cxgb4_port_viid - get the VI id of a port
2748 * @dev: the net device for the port
2749 *
2750 * Return the VI id of the given port.
2751 */
2752unsigned int cxgb4_port_viid(const struct net_device *dev)
2753{
2754 return netdev2pinfo(dev)->viid;
2755}
2756EXPORT_SYMBOL(cxgb4_port_viid);
2757
2758/**
2759 * cxgb4_port_idx - get the index of a port
2760 * @dev: the net device for the port
2761 *
2762 * Return the index of the given port.
2763 */
2764unsigned int cxgb4_port_idx(const struct net_device *dev)
2765{
2766 return netdev2pinfo(dev)->port_id;
2767}
2768EXPORT_SYMBOL(cxgb4_port_idx);
2769
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002770void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2771 struct tp_tcp_stats *v6)
2772{
2773 struct adapter *adap = pci_get_drvdata(pdev);
2774
2775 spin_lock(&adap->stats_lock);
2776 t4_tp_get_tcp_stats(adap, v4, v6);
2777 spin_unlock(&adap->stats_lock);
2778}
2779EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2780
2781void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2782 const unsigned int *pgsz_order)
2783{
2784 struct adapter *adap = netdev2adap(dev);
2785
2786 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2787 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2788 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2789 HPZ3(pgsz_order[3]));
2790}
2791EXPORT_SYMBOL(cxgb4_iscsi_init);
2792
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302793int cxgb4_flush_eq_cache(struct net_device *dev)
2794{
2795 struct adapter *adap = netdev2adap(dev);
2796 int ret;
2797
2798 ret = t4_fwaddrspace_write(adap, adap->mbox,
2799 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
2800 return ret;
2801}
2802EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2803
2804static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2805{
2806 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
2807 __be64 indices;
2808 int ret;
2809
2810 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2811 if (!ret) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00002812 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2813 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302814 }
2815 return ret;
2816}
2817
2818int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2819 u16 size)
2820{
2821 struct adapter *adap = netdev2adap(dev);
2822 u16 hw_pidx, hw_cidx;
2823 int ret;
2824
2825 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2826 if (ret)
2827 goto out;
2828
2829 if (pidx != hw_pidx) {
2830 u16 delta;
2831
2832 if (pidx >= hw_pidx)
2833 delta = pidx - hw_pidx;
2834 else
2835 delta = size - hw_pidx + pidx;
2836 wmb();
Vipul Pandya840f3002012-09-05 02:01:55 +00002837 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2838 QID(qid) | PIDX(delta));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302839 }
2840out:
2841 return ret;
2842}
2843EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2844
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002845static struct pci_driver cxgb4_driver;
2846
2847static void check_neigh_update(struct neighbour *neigh)
2848{
2849 const struct device *parent;
2850 const struct net_device *netdev = neigh->dev;
2851
2852 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2853 netdev = vlan_dev_real_dev(netdev);
2854 parent = netdev->dev.parent;
2855 if (parent && parent->driver == &cxgb4_driver.driver)
2856 t4_l2t_update(dev_get_drvdata(parent), neigh);
2857}
2858
2859static int netevent_cb(struct notifier_block *nb, unsigned long event,
2860 void *data)
2861{
2862 switch (event) {
2863 case NETEVENT_NEIGH_UPDATE:
2864 check_neigh_update(data);
2865 break;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00002866 case NETEVENT_REDIRECT:
2867 default:
2868 break;
2869 }
2870 return 0;
2871}
2872
2873static bool netevent_registered;
2874static struct notifier_block cxgb4_netevent_nb = {
2875 .notifier_call = netevent_cb
2876};
2877
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302878static void drain_db_fifo(struct adapter *adap, int usecs)
2879{
2880 u32 v;
2881
2882 do {
2883 set_current_state(TASK_UNINTERRUPTIBLE);
2884 schedule_timeout(usecs_to_jiffies(usecs));
2885 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2886 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2887 break;
2888 } while (1);
2889}
2890
2891static void disable_txq_db(struct sge_txq *q)
2892{
2893 spin_lock_irq(&q->db_lock);
2894 q->db_disabled = 1;
2895 spin_unlock_irq(&q->db_lock);
2896}
2897
2898static void enable_txq_db(struct sge_txq *q)
2899{
2900 spin_lock_irq(&q->db_lock);
2901 q->db_disabled = 0;
2902 spin_unlock_irq(&q->db_lock);
2903}
2904
2905static void disable_dbs(struct adapter *adap)
2906{
2907 int i;
2908
2909 for_each_ethrxq(&adap->sge, i)
2910 disable_txq_db(&adap->sge.ethtxq[i].q);
2911 for_each_ofldrxq(&adap->sge, i)
2912 disable_txq_db(&adap->sge.ofldtxq[i].q);
2913 for_each_port(adap, i)
2914 disable_txq_db(&adap->sge.ctrlq[i].q);
2915}
2916
2917static void enable_dbs(struct adapter *adap)
2918{
2919 int i;
2920
2921 for_each_ethrxq(&adap->sge, i)
2922 enable_txq_db(&adap->sge.ethtxq[i].q);
2923 for_each_ofldrxq(&adap->sge, i)
2924 enable_txq_db(&adap->sge.ofldtxq[i].q);
2925 for_each_port(adap, i)
2926 enable_txq_db(&adap->sge.ctrlq[i].q);
2927}
2928
2929static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2930{
2931 u16 hw_pidx, hw_cidx;
2932 int ret;
2933
2934 spin_lock_bh(&q->db_lock);
2935 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2936 if (ret)
2937 goto out;
2938 if (q->db_pidx != hw_pidx) {
2939 u16 delta;
2940
2941 if (q->db_pidx >= hw_pidx)
2942 delta = q->db_pidx - hw_pidx;
2943 else
2944 delta = q->size - hw_pidx + q->db_pidx;
2945 wmb();
Vipul Pandya840f3002012-09-05 02:01:55 +00002946 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2947 QID(q->cntxt_id) | PIDX(delta));
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302948 }
2949out:
2950 q->db_disabled = 0;
2951 spin_unlock_bh(&q->db_lock);
2952 if (ret)
2953 CH_WARN(adap, "DB drop recovery failed.\n");
2954}
2955static void recover_all_queues(struct adapter *adap)
2956{
2957 int i;
2958
2959 for_each_ethrxq(&adap->sge, i)
2960 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2961 for_each_ofldrxq(&adap->sge, i)
2962 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2963 for_each_port(adap, i)
2964 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2965}
2966
Vipul Pandya881806b2012-05-18 15:29:24 +05302967static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2968{
2969 mutex_lock(&uld_mutex);
2970 if (adap->uld_handle[CXGB4_ULD_RDMA])
2971 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2972 cmd);
2973 mutex_unlock(&uld_mutex);
2974}
2975
2976static void process_db_full(struct work_struct *work)
2977{
2978 struct adapter *adap;
Vipul Pandya881806b2012-05-18 15:29:24 +05302979
2980 adap = container_of(work, struct adapter, db_full_task);
2981
Vipul Pandya881806b2012-05-18 15:29:24 +05302982 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302983 drain_db_fifo(adap, dbfifo_drain_delay);
Vipul Pandya840f3002012-09-05 02:01:55 +00002984 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2985 DBFIFO_HP_INT | DBFIFO_LP_INT,
2986 DBFIFO_HP_INT | DBFIFO_LP_INT);
Vipul Pandya881806b2012-05-18 15:29:24 +05302987 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
Vipul Pandya881806b2012-05-18 15:29:24 +05302988}
2989
2990static void process_db_drop(struct work_struct *work)
2991{
2992 struct adapter *adap;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302993
Vipul Pandya881806b2012-05-18 15:29:24 +05302994 adap = container_of(work, struct adapter, db_drop_task);
2995
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302996 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
2997 disable_dbs(adap);
Vipul Pandya881806b2012-05-18 15:29:24 +05302998 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05302999 drain_db_fifo(adap, 1);
3000 recover_all_queues(adap);
3001 enable_dbs(adap);
Vipul Pandya881806b2012-05-18 15:29:24 +05303002}
3003
3004void t4_db_full(struct adapter *adap)
3005{
Vipul Pandya840f3002012-09-05 02:01:55 +00003006 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3007 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303008 queue_work(workq, &adap->db_full_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303009}
3010
3011void t4_db_dropped(struct adapter *adap)
3012{
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303013 queue_work(workq, &adap->db_drop_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303014}
3015
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003016static void uld_attach(struct adapter *adap, unsigned int uld)
3017{
3018 void *handle;
3019 struct cxgb4_lld_info lli;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003020 unsigned short i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003021
3022 lli.pdev = adap->pdev;
3023 lli.l2t = adap->l2t;
3024 lli.tids = &adap->tids;
3025 lli.ports = adap->port;
3026 lli.vr = &adap->vres;
3027 lli.mtus = adap->params.mtus;
3028 if (uld == CXGB4_ULD_RDMA) {
3029 lli.rxq_ids = adap->sge.rdma_rxq;
3030 lli.nrxq = adap->sge.rdmaqs;
3031 } else if (uld == CXGB4_ULD_ISCSI) {
3032 lli.rxq_ids = adap->sge.ofld_rxq;
3033 lli.nrxq = adap->sge.ofldqsets;
3034 }
3035 lli.ntxq = adap->sge.ofldqsets;
3036 lli.nchan = adap->params.nports;
3037 lli.nports = adap->params.nports;
3038 lli.wr_cred = adap->params.ofldq_wr_cred;
3039 lli.adapter_type = adap->params.rev;
3040 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3041 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003042 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3043 (adap->fn * 4));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003044 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003045 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3046 (adap->fn * 4));
Vipul Pandya793dad92012-12-10 09:30:56 +00003047 lli.filt_mode = adap->filter_mode;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003048 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3049 for (i = 0; i < NCHAN; i++)
3050 lli.tx_modq[i] = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003051 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3052 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3053 lli.fw_vers = adap->params.fw_vers;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05303054 lli.dbfifo_int_thresh = dbfifo_int_thresh;
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003055 lli.sge_pktshift = adap->sge.pktshift;
3056 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003057
3058 handle = ulds[uld].add(&lli);
3059 if (IS_ERR(handle)) {
3060 dev_warn(adap->pdev_dev,
3061 "could not attach to the %s driver, error %ld\n",
3062 uld_str[uld], PTR_ERR(handle));
3063 return;
3064 }
3065
3066 adap->uld_handle[uld] = handle;
3067
3068 if (!netevent_registered) {
3069 register_netevent_notifier(&cxgb4_netevent_nb);
3070 netevent_registered = true;
3071 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00003072
3073 if (adap->flags & FULL_INIT_DONE)
3074 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003075}
3076
3077static void attach_ulds(struct adapter *adap)
3078{
3079 unsigned int i;
3080
3081 mutex_lock(&uld_mutex);
3082 list_add_tail(&adap->list_node, &adapter_list);
3083 for (i = 0; i < CXGB4_ULD_MAX; i++)
3084 if (ulds[i].add)
3085 uld_attach(adap, i);
3086 mutex_unlock(&uld_mutex);
3087}
3088
3089static void detach_ulds(struct adapter *adap)
3090{
3091 unsigned int i;
3092
3093 mutex_lock(&uld_mutex);
3094 list_del(&adap->list_node);
3095 for (i = 0; i < CXGB4_ULD_MAX; i++)
3096 if (adap->uld_handle[i]) {
3097 ulds[i].state_change(adap->uld_handle[i],
3098 CXGB4_STATE_DETACH);
3099 adap->uld_handle[i] = NULL;
3100 }
3101 if (netevent_registered && list_empty(&adapter_list)) {
3102 unregister_netevent_notifier(&cxgb4_netevent_nb);
3103 netevent_registered = false;
3104 }
3105 mutex_unlock(&uld_mutex);
3106}
3107
3108static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3109{
3110 unsigned int i;
3111
3112 mutex_lock(&uld_mutex);
3113 for (i = 0; i < CXGB4_ULD_MAX; i++)
3114 if (adap->uld_handle[i])
3115 ulds[i].state_change(adap->uld_handle[i], new_state);
3116 mutex_unlock(&uld_mutex);
3117}
3118
3119/**
3120 * cxgb4_register_uld - register an upper-layer driver
3121 * @type: the ULD type
3122 * @p: the ULD methods
3123 *
3124 * Registers an upper-layer driver with this driver and notifies the ULD
3125 * about any presently available devices that support its type. Returns
3126 * %-EBUSY if a ULD of the same type is already registered.
3127 */
3128int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3129{
3130 int ret = 0;
3131 struct adapter *adap;
3132
3133 if (type >= CXGB4_ULD_MAX)
3134 return -EINVAL;
3135 mutex_lock(&uld_mutex);
3136 if (ulds[type].add) {
3137 ret = -EBUSY;
3138 goto out;
3139 }
3140 ulds[type] = *p;
3141 list_for_each_entry(adap, &adapter_list, list_node)
3142 uld_attach(adap, type);
3143out: mutex_unlock(&uld_mutex);
3144 return ret;
3145}
3146EXPORT_SYMBOL(cxgb4_register_uld);
3147
3148/**
3149 * cxgb4_unregister_uld - unregister an upper-layer driver
3150 * @type: the ULD type
3151 *
3152 * Unregisters an existing upper-layer driver.
3153 */
3154int cxgb4_unregister_uld(enum cxgb4_uld type)
3155{
3156 struct adapter *adap;
3157
3158 if (type >= CXGB4_ULD_MAX)
3159 return -EINVAL;
3160 mutex_lock(&uld_mutex);
3161 list_for_each_entry(adap, &adapter_list, list_node)
3162 adap->uld_handle[type] = NULL;
3163 ulds[type].add = NULL;
3164 mutex_unlock(&uld_mutex);
3165 return 0;
3166}
3167EXPORT_SYMBOL(cxgb4_unregister_uld);
3168
3169/**
3170 * cxgb_up - enable the adapter
3171 * @adap: adapter being enabled
3172 *
3173 * Called when the first port is enabled, this function performs the
3174 * actions necessary to make an adapter operational, such as completing
3175 * the initialization of HW modules, and enabling interrupts.
3176 *
3177 * Must be called with the rtnl lock held.
3178 */
3179static int cxgb_up(struct adapter *adap)
3180{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003181 int err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003182
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003183 err = setup_sge_queues(adap);
3184 if (err)
3185 goto out;
3186 err = setup_rss(adap);
3187 if (err)
3188 goto freeq;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003189
3190 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003191 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003192 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3193 adap->msix_info[0].desc, adap);
3194 if (err)
3195 goto irq_err;
3196
3197 err = request_msix_queue_irqs(adap);
3198 if (err) {
3199 free_irq(adap->msix_info[0].vec, adap);
3200 goto irq_err;
3201 }
3202 } else {
3203 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3204 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00003205 adap->port[0]->name, adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003206 if (err)
3207 goto irq_err;
3208 }
3209 enable_rx(adap);
3210 t4_sge_start(adap);
3211 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003212 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003213 notify_ulds(adap, CXGB4_STATE_UP);
3214 out:
3215 return err;
3216 irq_err:
3217 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003218 freeq:
3219 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003220 goto out;
3221}
3222
3223static void cxgb_down(struct adapter *adapter)
3224{
3225 t4_intr_disable(adapter);
3226 cancel_work_sync(&adapter->tid_release_task);
Vipul Pandya881806b2012-05-18 15:29:24 +05303227 cancel_work_sync(&adapter->db_full_task);
3228 cancel_work_sync(&adapter->db_drop_task);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003229 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003230 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003231
3232 if (adapter->flags & USING_MSIX) {
3233 free_msix_queue_irqs(adapter);
3234 free_irq(adapter->msix_info[0].vec, adapter);
3235 } else
3236 free_irq(adapter->pdev->irq, adapter);
3237 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003238 t4_sge_stop(adapter);
3239 t4_free_sge_resources(adapter);
3240 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003241}
3242
3243/*
3244 * net_device operations
3245 */
3246static int cxgb_open(struct net_device *dev)
3247{
3248 int err;
3249 struct port_info *pi = netdev_priv(dev);
3250 struct adapter *adapter = pi->adapter;
3251
Dimitris Michailidis6a3c8692011-01-19 15:29:05 +00003252 netif_carrier_off(dev);
3253
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003254 if (!(adapter->flags & FULL_INIT_DONE)) {
3255 err = cxgb_up(adapter);
3256 if (err < 0)
3257 return err;
3258 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003259
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00003260 err = link_start(dev);
3261 if (!err)
3262 netif_tx_start_all_queues(dev);
3263 return err;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003264}
3265
3266static int cxgb_close(struct net_device *dev)
3267{
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003268 struct port_info *pi = netdev_priv(dev);
3269 struct adapter *adapter = pi->adapter;
3270
3271 netif_tx_stop_all_queues(dev);
3272 netif_carrier_off(dev);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003273 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003274}
3275
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003276/* Return an error number if the indicated filter isn't writable ...
3277 */
3278static int writable_filter(struct filter_entry *f)
3279{
3280 if (f->locked)
3281 return -EPERM;
3282 if (f->pending)
3283 return -EBUSY;
3284
3285 return 0;
3286}
3287
3288/* Delete the filter at the specified index (if valid). The checks for all
3289 * the common problems with doing this like the filter being locked, currently
3290 * pending in another operation, etc.
3291 */
3292static int delete_filter(struct adapter *adapter, unsigned int fidx)
3293{
3294 struct filter_entry *f;
3295 int ret;
3296
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003297 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00003298 return -EINVAL;
3299
3300 f = &adapter->tids.ftid_tab[fidx];
3301 ret = writable_filter(f);
3302 if (ret)
3303 return ret;
3304 if (f->valid)
3305 return del_filter_wr(adapter, fidx);
3306
3307 return 0;
3308}
3309
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003310int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
Vipul Pandya793dad92012-12-10 09:30:56 +00003311 __be32 sip, __be16 sport, __be16 vlan,
3312 unsigned int queue, unsigned char port, unsigned char mask)
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003313{
3314 int ret;
3315 struct filter_entry *f;
3316 struct adapter *adap;
3317 int i;
3318 u8 *val;
3319
3320 adap = netdev2adap(dev);
3321
Vipul Pandya1cab7752012-12-10 09:30:55 +00003322 /* Adjust stid to correct filter index */
3323 stid -= adap->tids.nstids;
3324 stid += adap->tids.nftids;
3325
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003326 /* Check to make sure the filter requested is writable ...
3327 */
3328 f = &adap->tids.ftid_tab[stid];
3329 ret = writable_filter(f);
3330 if (ret)
3331 return ret;
3332
3333 /* Clear out any old resources being used by the filter before
3334 * we start constructing the new filter.
3335 */
3336 if (f->valid)
3337 clear_filter(adap, f);
3338
3339 /* Clear out filter specifications */
3340 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3341 f->fs.val.lport = cpu_to_be16(sport);
3342 f->fs.mask.lport = ~0;
3343 val = (u8 *)&sip;
Vipul Pandya793dad92012-12-10 09:30:56 +00003344 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003345 for (i = 0; i < 4; i++) {
3346 f->fs.val.lip[i] = val[i];
3347 f->fs.mask.lip[i] = ~0;
3348 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003349 if (adap->filter_mode & F_PORT) {
3350 f->fs.val.iport = port;
3351 f->fs.mask.iport = mask;
3352 }
3353 }
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003354
3355 f->fs.dirsteer = 1;
3356 f->fs.iq = queue;
3357 /* Mark filter as locked */
3358 f->locked = 1;
3359 f->fs.rpttid = 1;
3360
3361 ret = set_filter_wr(adap, stid);
3362 if (ret) {
3363 clear_filter(adap, f);
3364 return ret;
3365 }
3366
3367 return 0;
3368}
3369EXPORT_SYMBOL(cxgb4_create_server_filter);
3370
3371int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3372 unsigned int queue, bool ipv6)
3373{
3374 int ret;
3375 struct filter_entry *f;
3376 struct adapter *adap;
3377
3378 adap = netdev2adap(dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003379
3380 /* Adjust stid to correct filter index */
3381 stid -= adap->tids.nstids;
3382 stid += adap->tids.nftids;
3383
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003384 f = &adap->tids.ftid_tab[stid];
3385 /* Unlock the filter */
3386 f->locked = 0;
3387
3388 ret = delete_filter(adap, stid);
3389 if (ret)
3390 return ret;
3391
3392 return 0;
3393}
3394EXPORT_SYMBOL(cxgb4_remove_server_filter);
3395
Dimitris Michailidisf5152c92010-07-07 16:11:25 +00003396static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
3397 struct rtnl_link_stats64 *ns)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003398{
3399 struct port_stats stats;
3400 struct port_info *p = netdev_priv(dev);
3401 struct adapter *adapter = p->adapter;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003402
3403 spin_lock(&adapter->stats_lock);
3404 t4_get_port_stats(adapter, p->tx_chan, &stats);
3405 spin_unlock(&adapter->stats_lock);
3406
3407 ns->tx_bytes = stats.tx_octets;
3408 ns->tx_packets = stats.tx_frames;
3409 ns->rx_bytes = stats.rx_octets;
3410 ns->rx_packets = stats.rx_frames;
3411 ns->multicast = stats.rx_mcast_frames;
3412
3413 /* detailed rx_errors */
3414 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3415 stats.rx_runt;
3416 ns->rx_over_errors = 0;
3417 ns->rx_crc_errors = stats.rx_fcs_err;
3418 ns->rx_frame_errors = stats.rx_symbol_err;
3419 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
3420 stats.rx_ovflow2 + stats.rx_ovflow3 +
3421 stats.rx_trunc0 + stats.rx_trunc1 +
3422 stats.rx_trunc2 + stats.rx_trunc3;
3423 ns->rx_missed_errors = 0;
3424
3425 /* detailed tx_errors */
3426 ns->tx_aborted_errors = 0;
3427 ns->tx_carrier_errors = 0;
3428 ns->tx_fifo_errors = 0;
3429 ns->tx_heartbeat_errors = 0;
3430 ns->tx_window_errors = 0;
3431
3432 ns->tx_errors = stats.tx_error_frames;
3433 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3434 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3435 return ns;
3436}
3437
3438static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3439{
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003440 unsigned int mbox;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003441 int ret = 0, prtad, devad;
3442 struct port_info *pi = netdev_priv(dev);
3443 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3444
3445 switch (cmd) {
3446 case SIOCGMIIPHY:
3447 if (pi->mdio_addr < 0)
3448 return -EOPNOTSUPP;
3449 data->phy_id = pi->mdio_addr;
3450 break;
3451 case SIOCGMIIREG:
3452 case SIOCSMIIREG:
3453 if (mdio_phy_id_is_c45(data->phy_id)) {
3454 prtad = mdio_phy_id_prtad(data->phy_id);
3455 devad = mdio_phy_id_devad(data->phy_id);
3456 } else if (data->phy_id < 32) {
3457 prtad = data->phy_id;
3458 devad = 0;
3459 data->reg_num &= 0x1f;
3460 } else
3461 return -EINVAL;
3462
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003463 mbox = pi->adapter->fn;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003464 if (cmd == SIOCGMIIREG)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003465 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003466 data->reg_num, &data->val_out);
3467 else
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003468 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003469 data->reg_num, data->val_in);
3470 break;
3471 default:
3472 return -EOPNOTSUPP;
3473 }
3474 return ret;
3475}
3476
3477static void cxgb_set_rxmode(struct net_device *dev)
3478{
3479 /* unfortunately we can't return errors to the stack */
3480 set_rxmode(dev, -1, false);
3481}
3482
3483static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3484{
3485 int ret;
3486 struct port_info *pi = netdev_priv(dev);
3487
3488 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
3489 return -EINVAL;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003490 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
3491 -1, -1, -1, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003492 if (!ret)
3493 dev->mtu = new_mtu;
3494 return ret;
3495}
3496
3497static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3498{
3499 int ret;
3500 struct sockaddr *addr = p;
3501 struct port_info *pi = netdev_priv(dev);
3502
3503 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00003504 return -EADDRNOTAVAIL;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003505
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003506 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
3507 pi->xact_addr_filt, addr->sa_data, true, true);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003508 if (ret < 0)
3509 return ret;
3510
3511 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3512 pi->xact_addr_filt = ret;
3513 return 0;
3514}
3515
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003516#ifdef CONFIG_NET_POLL_CONTROLLER
3517static void cxgb_netpoll(struct net_device *dev)
3518{
3519 struct port_info *pi = netdev_priv(dev);
3520 struct adapter *adap = pi->adapter;
3521
3522 if (adap->flags & USING_MSIX) {
3523 int i;
3524 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3525
3526 for (i = pi->nqsets; i; i--, rx++)
3527 t4_sge_intr_msix(0, &rx->rspq);
3528 } else
3529 t4_intr_handler(adap)(0, adap);
3530}
3531#endif
3532
3533static const struct net_device_ops cxgb4_netdev_ops = {
3534 .ndo_open = cxgb_open,
3535 .ndo_stop = cxgb_close,
3536 .ndo_start_xmit = t4_eth_xmit,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00003537 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003538 .ndo_set_rx_mode = cxgb_set_rxmode,
3539 .ndo_set_mac_address = cxgb_set_mac_addr,
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00003540 .ndo_set_features = cxgb_set_features,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003541 .ndo_validate_addr = eth_validate_addr,
3542 .ndo_do_ioctl = cxgb_ioctl,
3543 .ndo_change_mtu = cxgb_change_mtu,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003544#ifdef CONFIG_NET_POLL_CONTROLLER
3545 .ndo_poll_controller = cxgb_netpoll,
3546#endif
3547};
3548
3549void t4_fatal_err(struct adapter *adap)
3550{
3551 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
3552 t4_intr_disable(adap);
3553 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3554}
3555
3556static void setup_memwin(struct adapter *adap)
3557{
3558 u32 bar0;
3559
3560 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
3561 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
3562 (bar0 + MEMWIN0_BASE) | BIR(0) |
3563 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
3564 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
3565 (bar0 + MEMWIN1_BASE) | BIR(0) |
3566 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
3567 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3568 (bar0 + MEMWIN2_BASE) | BIR(0) |
3569 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
Vipul Pandya636f9d32012-09-26 02:39:39 +00003570}
3571
3572static void setup_memwin_rdma(struct adapter *adap)
3573{
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00003574 if (adap->vres.ocq.size) {
3575 unsigned int start, sz_kb;
3576
3577 start = pci_resource_start(adap->pdev, 2) +
3578 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3579 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3580 t4_write_reg(adap,
3581 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
3582 start | BIR(1) | WINDOW(ilog2(sz_kb)));
3583 t4_write_reg(adap,
3584 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
3585 adap->vres.ocq.start);
3586 t4_read_reg(adap,
3587 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
3588 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003589}
3590
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00003591static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3592{
3593 u32 v;
3594 int ret;
3595
3596 /* get device capabilities */
3597 memset(c, 0, sizeof(*c));
3598 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3599 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05303600 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003601 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00003602 if (ret < 0)
3603 return ret;
3604
3605 /* select capabilities we'll be using */
3606 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3607 if (!vf_acls)
3608 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3609 else
3610 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3611 } else if (vf_acls) {
3612 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
3613 return ret;
3614 }
3615 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3616 FW_CMD_REQUEST | FW_CMD_WRITE);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003617 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00003618 if (ret < 0)
3619 return ret;
3620
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003621 ret = t4_config_glbl_rss(adap, adap->fn,
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00003622 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3623 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3624 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
3625 if (ret < 0)
3626 return ret;
3627
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003628 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
3629 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00003630 if (ret < 0)
3631 return ret;
3632
3633 t4_sge_init(adap);
3634
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00003635 /* tweak some settings */
3636 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
3637 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
3638 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
3639 v = t4_read_reg(adap, TP_PIO_DATA);
3640 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003641
Vipul Pandyadca4fae2012-12-10 09:30:53 +00003642 /* first 4 Tx modulation queues point to consecutive Tx channels */
3643 adap->params.tp.tx_modq_map = 0xE4;
3644 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3645 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
3646
3647 /* associate each Tx modulation queue with consecutive Tx channels */
3648 v = 0x84218421;
3649 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3650 &v, 1, A_TP_TX_SCHED_HDR);
3651 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3652 &v, 1, A_TP_TX_SCHED_FIFO);
3653 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3654 &v, 1, A_TP_TX_SCHED_PCMD);
3655
3656#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3657 if (is_offload(adap)) {
3658 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
3659 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3660 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3661 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3662 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3663 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
3664 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3665 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3666 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3667 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3668 }
3669
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00003670 /* get basic stuff going */
3671 return t4_early_init(adap, adap->fn);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00003672}
3673
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003674/*
3675 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3676 */
3677#define MAX_ATIDS 8192U
3678
3679/*
3680 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Vipul Pandya636f9d32012-09-26 02:39:39 +00003681 *
3682 * If the firmware we're dealing with has Configuration File support, then
3683 * we use that to perform all configuration
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003684 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00003685
3686/*
3687 * Tweak configuration based on module parameters, etc. Most of these have
3688 * defaults assigned to them by Firmware Configuration Files (if we're using
3689 * them) but need to be explicitly set if we're using hard-coded
3690 * initialization. But even in the case of using Firmware Configuration
3691 * Files, we'd like to expose the ability to change these via module
3692 * parameters so these are essentially common tweaks/settings for
3693 * Configuration Files and hard-coded initialization ...
3694 */
3695static int adap_init0_tweaks(struct adapter *adapter)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003696{
Vipul Pandya636f9d32012-09-26 02:39:39 +00003697 /*
3698 * Fix up various Host-Dependent Parameters like Page Size, Cache
3699 * Line Size, etc. The firmware default is for a 4KB Page Size and
3700 * 64B Cache Line Size ...
3701 */
3702 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003703
Vipul Pandya636f9d32012-09-26 02:39:39 +00003704 /*
3705 * Process module parameters which affect early initialization.
3706 */
3707 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3708 dev_err(&adapter->pdev->dev,
3709 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3710 rx_dma_offset);
3711 rx_dma_offset = 2;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003712 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00003713 t4_set_reg_field(adapter, SGE_CONTROL,
3714 PKTSHIFT_MASK,
3715 PKTSHIFT(rx_dma_offset));
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003716
Vipul Pandya636f9d32012-09-26 02:39:39 +00003717 /*
3718 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3719 * adds the pseudo header itself.
3720 */
3721 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
3722 CSUM_HAS_PSEUDO_HDR, 0);
3723
3724 return 0;
3725}
3726
3727/*
3728 * Attempt to initialize the adapter via a Firmware Configuration File.
3729 */
3730static int adap_init0_config(struct adapter *adapter, int reset)
3731{
3732 struct fw_caps_config_cmd caps_cmd;
3733 const struct firmware *cf;
3734 unsigned long mtype = 0, maddr = 0;
3735 u32 finiver, finicsum, cfcsum;
3736 int ret, using_flash;
3737
3738 /*
3739 * Reset device if necessary.
3740 */
3741 if (reset) {
3742 ret = t4_fw_reset(adapter, adapter->mbox,
3743 PIORSTMODE | PIORST);
3744 if (ret < 0)
3745 goto bye;
3746 }
3747
3748 /*
3749 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3750 * then use that. Otherwise, use the configuration file stored
3751 * in the adapter flash ...
3752 */
3753 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003754 if (ret < 0) {
Vipul Pandya636f9d32012-09-26 02:39:39 +00003755 using_flash = 1;
3756 mtype = FW_MEMTYPE_CF_FLASH;
3757 maddr = t4_flash_cfg_addr(adapter);
3758 } else {
3759 u32 params[7], val[7];
3760
3761 using_flash = 0;
3762 if (cf->size >= FLASH_CFG_MAX_SIZE)
3763 ret = -ENOMEM;
3764 else {
3765 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3766 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3767 ret = t4_query_params(adapter, adapter->mbox,
3768 adapter->fn, 0, 1, params, val);
3769 if (ret == 0) {
3770 /*
3771 * For t4_memory_write() below addresses and
3772 * sizes have to be in terms of multiples of 4
3773 * bytes. So, if the Configuration File isn't
3774 * a multiple of 4 bytes in length we'll have
3775 * to write that out separately since we can't
3776 * guarantee that the bytes following the
3777 * residual byte in the buffer returned by
3778 * request_firmware() are zeroed out ...
3779 */
3780 size_t resid = cf->size & 0x3;
3781 size_t size = cf->size & ~0x3;
3782 __be32 *data = (__be32 *)cf->data;
3783
3784 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
3785 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
3786
3787 ret = t4_memory_write(adapter, mtype, maddr,
3788 size, data);
3789 if (ret == 0 && resid != 0) {
3790 union {
3791 __be32 word;
3792 char buf[4];
3793 } last;
3794 int i;
3795
3796 last.word = data[size >> 2];
3797 for (i = resid; i < 4; i++)
3798 last.buf[i] = 0;
3799 ret = t4_memory_write(adapter, mtype,
3800 maddr + size,
3801 4, &last.word);
3802 }
3803 }
3804 }
3805
3806 release_firmware(cf);
3807 if (ret)
3808 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003809 }
3810
Vipul Pandya636f9d32012-09-26 02:39:39 +00003811 /*
3812 * Issue a Capability Configuration command to the firmware to get it
3813 * to parse the Configuration File. We don't use t4_fw_config_file()
3814 * because we want the ability to modify various features after we've
3815 * processed the configuration file ...
3816 */
3817 memset(&caps_cmd, 0, sizeof(caps_cmd));
3818 caps_cmd.op_to_write =
3819 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3820 FW_CMD_REQUEST |
3821 FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05303822 caps_cmd.cfvalid_to_len16 =
Vipul Pandya636f9d32012-09-26 02:39:39 +00003823 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
3824 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3825 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
3826 FW_LEN16(caps_cmd));
3827 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3828 &caps_cmd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003829 if (ret < 0)
3830 goto bye;
3831
Vipul Pandya636f9d32012-09-26 02:39:39 +00003832 finiver = ntohl(caps_cmd.finiver);
3833 finicsum = ntohl(caps_cmd.finicsum);
3834 cfcsum = ntohl(caps_cmd.cfcsum);
3835 if (finicsum != cfcsum)
3836 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3837 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3838 finicsum, cfcsum);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003839
Vipul Pandya636f9d32012-09-26 02:39:39 +00003840 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00003841 * And now tell the firmware to use the configuration we just loaded.
3842 */
3843 caps_cmd.op_to_write =
3844 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3845 FW_CMD_REQUEST |
3846 FW_CMD_WRITE);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05303847 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00003848 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3849 NULL);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003850 if (ret < 0)
3851 goto bye;
3852
Vipul Pandya636f9d32012-09-26 02:39:39 +00003853 /*
3854 * Tweak configuration based on system architecture, module
3855 * parameters, etc.
3856 */
3857 ret = adap_init0_tweaks(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003858 if (ret < 0)
3859 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003860
Vipul Pandya636f9d32012-09-26 02:39:39 +00003861 /*
3862 * And finally tell the firmware to initialize itself using the
3863 * parameters from the Configuration File.
3864 */
3865 ret = t4_fw_initialize(adapter, adapter->mbox);
3866 if (ret < 0)
3867 goto bye;
3868
3869 /*
3870 * Return successfully and note that we're operating with parameters
3871 * not supplied by the driver, rather than from hard-wired
3872 * initialization constants burried in the driver.
3873 */
3874 adapter->flags |= USING_SOFT_PARAMS;
3875 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3876 "Configuration File %s, version %#x, computed checksum %#x\n",
3877 (using_flash
3878 ? "in device FLASH"
3879 : "/lib/firmware/" FW_CFNAME),
3880 finiver, cfcsum);
3881 return 0;
3882
3883 /*
3884 * Something bad happened. Return the error ... (If the "error"
3885 * is that there's no Configuration File on the adapter we don't
3886 * want to issue a warning since this is fairly common.)
3887 */
3888bye:
3889 if (ret != -ENOENT)
3890 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
3891 -ret);
3892 return ret;
3893}
3894
3895/*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00003896 * Attempt to initialize the adapter via hard-coded, driver supplied
3897 * parameters ...
3898 */
3899static int adap_init0_no_config(struct adapter *adapter, int reset)
3900{
3901 struct sge *s = &adapter->sge;
3902 struct fw_caps_config_cmd caps_cmd;
3903 u32 v;
3904 int i, ret;
3905
3906 /*
3907 * Reset device if necessary
3908 */
3909 if (reset) {
3910 ret = t4_fw_reset(adapter, adapter->mbox,
3911 PIORSTMODE | PIORST);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003912 if (ret < 0)
3913 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003914 }
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003915
Vipul Pandya13ee15d2012-09-26 02:39:40 +00003916 /*
3917 * Get device capabilities and select which we'll be using.
3918 */
3919 memset(&caps_cmd, 0, sizeof(caps_cmd));
3920 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3921 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05303922 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya13ee15d2012-09-26 02:39:40 +00003923 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3924 &caps_cmd);
3925 if (ret < 0)
3926 goto bye;
3927
Vipul Pandya13ee15d2012-09-26 02:39:40 +00003928 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3929 if (!vf_acls)
3930 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3931 else
3932 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3933 } else if (vf_acls) {
3934 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
3935 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003936 }
Vipul Pandya13ee15d2012-09-26 02:39:40 +00003937 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3938 FW_CMD_REQUEST | FW_CMD_WRITE);
3939 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3940 NULL);
3941 if (ret < 0)
3942 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003943
Vipul Pandya13ee15d2012-09-26 02:39:40 +00003944 /*
3945 * Tweak configuration based on system architecture, module
3946 * parameters, etc.
3947 */
3948 ret = adap_init0_tweaks(adapter);
3949 if (ret < 0)
3950 goto bye;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00003951
Vipul Pandya13ee15d2012-09-26 02:39:40 +00003952 /*
3953 * Select RSS Global Mode we want to use. We use "Basic Virtual"
3954 * mode which maps each Virtual Interface to its own section of
3955 * the RSS Table and we turn on all map and hash enables ...
3956 */
3957 adapter->flags |= RSS_TNLALLLOOKUP;
3958 ret = t4_config_glbl_rss(adapter, adapter->mbox,
3959 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3960 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3961 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
3962 ((adapter->flags & RSS_TNLALLLOOKUP) ?
3963 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
3964 if (ret < 0)
3965 goto bye;
3966
3967 /*
3968 * Set up our own fundamental resource provisioning ...
3969 */
3970 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
3971 PFRES_NEQ, PFRES_NETHCTRL,
3972 PFRES_NIQFLINT, PFRES_NIQ,
3973 PFRES_TC, PFRES_NVI,
3974 FW_PFVF_CMD_CMASK_MASK,
3975 pfvfres_pmask(adapter, adapter->fn, 0),
3976 PFRES_NEXACTF,
3977 PFRES_R_CAPS, PFRES_WX_CAPS);
3978 if (ret < 0)
3979 goto bye;
3980
3981 /*
3982 * Perform low level SGE initialization. We need to do this before we
3983 * send the firmware the INITIALIZE command because that will cause
3984 * any other PF Drivers which are waiting for the Master
3985 * Initialization to proceed forward.
3986 */
3987 for (i = 0; i < SGE_NTIMERS - 1; i++)
3988 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
3989 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3990 s->counter_val[0] = 1;
3991 for (i = 1; i < SGE_NCOUNTERS; i++)
3992 s->counter_val[i] = min(intr_cnt[i - 1],
3993 THRESHOLD_0_GET(THRESHOLD_0_MASK));
3994 t4_sge_init(adapter);
Casey Leedom7ee9ff92010-06-25 12:11:46 +00003995
3996#ifdef CONFIG_PCI_IOV
3997 /*
3998 * Provision resource limits for Virtual Functions. We currently
3999 * grant them all the same static resource limits except for the Port
4000 * Access Rights Mask which we're assigning based on the PF. All of
4001 * the static provisioning stuff for both the PF and VF really needs
4002 * to be managed in a persistent manner for each device which the
4003 * firmware controls.
4004 */
4005 {
4006 int pf, vf;
4007
4008 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4009 if (num_vf[pf] <= 0)
4010 continue;
4011
4012 /* VF numbering starts at 1! */
4013 for (vf = 1; vf <= num_vf[pf]; vf++) {
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004014 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4015 pf, vf,
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004016 VFRES_NEQ, VFRES_NETHCTRL,
4017 VFRES_NIQFLINT, VFRES_NIQ,
4018 VFRES_TC, VFRES_NVI,
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004019 FW_PFVF_CMD_CMASK_GET(
4020 FW_PFVF_CMD_CMASK_MASK),
4021 pfvfres_pmask(
4022 adapter, pf, vf),
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004023 VFRES_NEXACTF,
4024 VFRES_R_CAPS, VFRES_WX_CAPS);
4025 if (ret < 0)
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004026 dev_warn(adapter->pdev_dev,
4027 "failed to "\
Casey Leedom7ee9ff92010-06-25 12:11:46 +00004028 "provision pf/vf=%d/%d; "
4029 "err=%d\n", pf, vf, ret);
4030 }
4031 }
4032 }
4033#endif
4034
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004035 /*
4036 * Set up the default filter mode. Later we'll want to implement this
4037 * via a firmware command, etc. ... This needs to be done before the
4038 * firmare initialization command ... If the selected set of fields
4039 * isn't equal to the default value, we'll need to make sure that the
4040 * field selections will fit in the 36-bit budget.
4041 */
4042 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
Vipul Pandya404d9e32012-10-08 02:59:43 +00004043 int j, bits = 0;
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004044
Vipul Pandya404d9e32012-10-08 02:59:43 +00004045 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4046 switch (tp_vlan_pri_map & (1 << j)) {
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004047 case 0:
4048 /* compressed filter field not enabled */
4049 break;
4050 case FCOE_MASK:
4051 bits += 1;
4052 break;
4053 case PORT_MASK:
4054 bits += 3;
4055 break;
4056 case VNIC_ID_MASK:
4057 bits += 17;
4058 break;
4059 case VLAN_MASK:
4060 bits += 17;
4061 break;
4062 case TOS_MASK:
4063 bits += 8;
4064 break;
4065 case PROTOCOL_MASK:
4066 bits += 8;
4067 break;
4068 case ETHERTYPE_MASK:
4069 bits += 16;
4070 break;
4071 case MACMATCH_MASK:
4072 bits += 9;
4073 break;
4074 case MPSHITTYPE_MASK:
4075 bits += 3;
4076 break;
4077 case FRAGMENTATION_MASK:
4078 bits += 1;
4079 break;
4080 }
4081
4082 if (bits > 36) {
4083 dev_err(adapter->pdev_dev,
4084 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4085 " using %#x\n", tp_vlan_pri_map, bits,
4086 TP_VLAN_PRI_MAP_DEFAULT);
4087 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4088 }
4089 }
4090 v = tp_vlan_pri_map;
4091 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4092 &v, 1, TP_VLAN_PRI_MAP);
4093
4094 /*
4095 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4096 * to support any of the compressed filter fields above. Newer
4097 * versions of the firmware do this automatically but it doesn't hurt
4098 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4099 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4100 * since the firmware automatically turns this on and off when we have
4101 * a non-zero number of filters active (since it does have a
4102 * performance impact).
4103 */
4104 if (tp_vlan_pri_map)
4105 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4106 FIVETUPLELOOKUP_MASK,
4107 FIVETUPLELOOKUP_MASK);
4108
4109 /*
4110 * Tweak some settings.
4111 */
4112 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4113 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4114 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4115 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4116
4117 /*
4118 * Get basic stuff going by issuing the Firmware Initialize command.
4119 * Note that this _must_ be after all PFVF commands ...
4120 */
4121 ret = t4_fw_initialize(adapter, adapter->mbox);
4122 if (ret < 0)
4123 goto bye;
4124
4125 /*
4126 * Return successfully!
4127 */
4128 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4129 "driver parameters\n");
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004130 return 0;
4131
4132 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004133 * Something bad happened. Return the error ...
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004134 */
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004135bye:
4136 return ret;
4137}
4138
4139/*
Vipul Pandya636f9d32012-09-26 02:39:39 +00004140 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004141 */
4142static int adap_init0(struct adapter *adap)
4143{
4144 int ret;
4145 u32 v, port_vec;
4146 enum dev_state state;
4147 u32 params[7], val[7];
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00004148 struct fw_caps_config_cmd caps_cmd;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004149 int reset = 1, j;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004150
Vipul Pandya636f9d32012-09-26 02:39:39 +00004151 /*
4152 * Contact FW, advertising Master capability (and potentially forcing
4153 * ourselves as the Master PF if our module parameter force_init is
4154 * set).
4155 */
4156 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4157 force_init ? MASTER_MUST : MASTER_MAY,
4158 &state);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004159 if (ret < 0) {
4160 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4161 ret);
4162 return ret;
4163 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004164 if (ret == adap->mbox)
4165 adap->flags |= MASTER_PF;
4166 if (force_init && state == DEV_STATE_INIT)
4167 state = DEV_STATE_UNINIT;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004168
Vipul Pandya636f9d32012-09-26 02:39:39 +00004169 /*
4170 * If we're the Master PF Driver and the device is uninitialized,
4171 * then let's consider upgrading the firmware ... (We always want
4172 * to check the firmware version number in order to A. get it for
4173 * later reporting and B. to warn if the currently loaded firmware
4174 * is excessively mismatched relative to the driver.)
4175 */
4176 ret = t4_check_fw_version(adap);
4177 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4178 if (ret == -EINVAL || ret > 0) {
4179 if (upgrade_fw(adap) >= 0) {
4180 /*
4181 * Note that the chip was reset as part of the
4182 * firmware upgrade so we don't reset it again
4183 * below and grab the new firmware version.
4184 */
4185 reset = 0;
4186 ret = t4_check_fw_version(adap);
4187 }
4188 }
4189 if (ret < 0)
4190 return ret;
4191 }
4192
4193 /*
4194 * Grab VPD parameters. This should be done after we establish a
4195 * connection to the firmware since some of the VPD parameters
4196 * (notably the Core Clock frequency) are retrieved via requests to
4197 * the firmware. On the other hand, we need these fairly early on
4198 * so we do this right after getting ahold of the firmware.
4199 */
4200 ret = get_vpd_params(adap, &adap->params.vpd);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004201 if (ret < 0)
4202 goto bye;
4203
Vipul Pandya636f9d32012-09-26 02:39:39 +00004204 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004205 * Find out what ports are available to us. Note that we need to do
4206 * this before calling adap_init0_no_config() since it needs nports
4207 * and portvec ...
Vipul Pandya636f9d32012-09-26 02:39:39 +00004208 */
4209 v =
4210 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4211 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4212 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
4213 if (ret < 0)
4214 goto bye;
4215
4216 adap->params.nports = hweight32(port_vec);
4217 adap->params.portvec = port_vec;
4218
4219 /*
4220 * If the firmware is initialized already (and we're not forcing a
4221 * master initialization), note that we're living with existing
4222 * adapter parameters. Otherwise, it's time to try initializing the
4223 * adapter ...
4224 */
4225 if (state == DEV_STATE_INIT) {
4226 dev_info(adap->pdev_dev, "Coming up as %s: "\
4227 "Adapter already initialized\n",
4228 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4229 adap->flags |= USING_SOFT_PARAMS;
4230 } else {
4231 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4232 "Initializing adapter\n");
Vipul Pandya636f9d32012-09-26 02:39:39 +00004233
4234 /*
4235 * If the firmware doesn't support Configuration
4236 * Files warn user and exit,
4237 */
4238 if (ret < 0)
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004239 dev_warn(adap->pdev_dev, "Firmware doesn't support "
Vipul Pandya636f9d32012-09-26 02:39:39 +00004240 "configuration file.\n");
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004241 if (force_old_init)
4242 ret = adap_init0_no_config(adap, reset);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004243 else {
4244 /*
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004245 * Find out whether we're dealing with a version of
4246 * the firmware which has configuration file support.
Vipul Pandya636f9d32012-09-26 02:39:39 +00004247 */
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004248 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4249 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4250 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4251 params, val);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004252
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004253 /*
4254 * If the firmware doesn't support Configuration
4255 * Files, use the old Driver-based, hard-wired
4256 * initialization. Otherwise, try using the
4257 * Configuration File support and fall back to the
4258 * Driver-based initialization if there's no
4259 * Configuration File found.
4260 */
4261 if (ret < 0)
4262 ret = adap_init0_no_config(adap, reset);
4263 else {
4264 /*
4265 * The firmware provides us with a memory
4266 * buffer where we can load a Configuration
4267 * File from the host if we want to override
4268 * the Configuration File in flash.
4269 */
4270
4271 ret = adap_init0_config(adap, reset);
4272 if (ret == -ENOENT) {
4273 dev_info(adap->pdev_dev,
4274 "No Configuration File present "
4275 "on adapter. Using hard-wired "
4276 "configuration parameters.\n");
4277 ret = adap_init0_no_config(adap, reset);
4278 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004279 }
4280 }
4281 if (ret < 0) {
4282 dev_err(adap->pdev_dev,
4283 "could not initialize adapter, error %d\n",
4284 -ret);
4285 goto bye;
4286 }
4287 }
4288
4289 /*
4290 * If we're living with non-hard-coded parameters (either from a
4291 * Firmware Configuration File or values programmed by a different PF
4292 * Driver), give the SGE code a chance to pull in anything that it
4293 * needs ... Note that this must be called after we retrieve our VPD
4294 * parameters in order to know how to convert core ticks to seconds.
4295 */
4296 if (adap->flags & USING_SOFT_PARAMS) {
4297 ret = t4_sge_init(adap);
4298 if (ret < 0)
4299 goto bye;
4300 }
4301
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00004302 if (is_bypass_device(adap->pdev->device))
4303 adap->params.bypass = 1;
4304
Vipul Pandya636f9d32012-09-26 02:39:39 +00004305 /*
4306 * Grab some of our basic fundamental operating parameters.
4307 */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004308#define FW_PARAM_DEV(param) \
4309 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
Vipul Pandya636f9d32012-09-26 02:39:39 +00004310 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004311
4312#define FW_PARAM_PFVF(param) \
Vipul Pandya636f9d32012-09-26 02:39:39 +00004313 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4314 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
4315 FW_PARAMS_PARAM_Y(0) | \
4316 FW_PARAMS_PARAM_Z(0)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004317
Vipul Pandya636f9d32012-09-26 02:39:39 +00004318 params[0] = FW_PARAM_PFVF(EQ_START);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004319 params[1] = FW_PARAM_PFVF(L2T_START);
4320 params[2] = FW_PARAM_PFVF(L2T_END);
4321 params[3] = FW_PARAM_PFVF(FILTER_START);
4322 params[4] = FW_PARAM_PFVF(FILTER_END);
4323 params[5] = FW_PARAM_PFVF(IQFLINT_START);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004324 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004325 if (ret < 0)
4326 goto bye;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004327 adap->sge.egr_start = val[0];
4328 adap->l2t_start = val[1];
4329 adap->l2t_end = val[2];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004330 adap->tids.ftid_base = val[3];
4331 adap->tids.nftids = val[4] - val[3] + 1;
4332 adap->sge.ingr_start = val[5];
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004333
Vipul Pandya636f9d32012-09-26 02:39:39 +00004334 /* query params related to active filter region */
4335 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4336 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4337 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
4338 /* If Active filter size is set we enable establishing
4339 * offload connection through firmware work request
4340 */
4341 if ((val[0] != val[1]) && (ret >= 0)) {
4342 adap->flags |= FW_OFLD_CONN;
4343 adap->tids.aftid_base = val[0];
4344 adap->tids.aftid_end = val[1];
4345 }
4346
Vipul Pandya636f9d32012-09-26 02:39:39 +00004347 /*
4348 * Get device capabilities so we can determine what resources we need
4349 * to manage.
4350 */
4351 memset(&caps_cmd, 0, sizeof(caps_cmd));
Vipul Pandya9a4da2c2012-10-19 02:09:53 +00004352 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004353 FW_CMD_REQUEST | FW_CMD_READ);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05304354 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
Vipul Pandya636f9d32012-09-26 02:39:39 +00004355 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4356 &caps_cmd);
4357 if (ret < 0)
4358 goto bye;
4359
Vipul Pandya13ee15d2012-09-26 02:39:40 +00004360 if (caps_cmd.ofldcaps) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004361 /* query offload-related parameters */
4362 params[0] = FW_PARAM_DEV(NTID);
4363 params[1] = FW_PARAM_PFVF(SERVER_START);
4364 params[2] = FW_PARAM_PFVF(SERVER_END);
4365 params[3] = FW_PARAM_PFVF(TDDP_START);
4366 params[4] = FW_PARAM_PFVF(TDDP_END);
4367 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004368 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4369 params, val);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004370 if (ret < 0)
4371 goto bye;
4372 adap->tids.ntids = val[0];
4373 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4374 adap->tids.stid_base = val[1];
4375 adap->tids.nstids = val[2] - val[1] + 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004376 /*
4377 * Setup server filter region. Divide the availble filter
4378 * region into two parts. Regular filters get 1/3rd and server
4379 * filters get 2/3rd part. This is only enabled if workarond
4380 * path is enabled.
4381 * 1. For regular filters.
4382 * 2. Server filter: This are special filters which are used
4383 * to redirect SYN packets to offload queue.
4384 */
4385 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4386 adap->tids.sftid_base = adap->tids.ftid_base +
4387 DIV_ROUND_UP(adap->tids.nftids, 3);
4388 adap->tids.nsftids = adap->tids.nftids -
4389 DIV_ROUND_UP(adap->tids.nftids, 3);
4390 adap->tids.nftids = adap->tids.sftid_base -
4391 adap->tids.ftid_base;
4392 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004393 adap->vres.ddp.start = val[3];
4394 adap->vres.ddp.size = val[4] - val[3] + 1;
4395 adap->params.ofldq_wr_cred = val[5];
Vipul Pandya636f9d32012-09-26 02:39:39 +00004396
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004397 adap->params.offload = 1;
4398 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004399 if (caps_cmd.rdmacaps) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004400 params[0] = FW_PARAM_PFVF(STAG_START);
4401 params[1] = FW_PARAM_PFVF(STAG_END);
4402 params[2] = FW_PARAM_PFVF(RQ_START);
4403 params[3] = FW_PARAM_PFVF(RQ_END);
4404 params[4] = FW_PARAM_PFVF(PBL_START);
4405 params[5] = FW_PARAM_PFVF(PBL_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004406 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4407 params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004408 if (ret < 0)
4409 goto bye;
4410 adap->vres.stag.start = val[0];
4411 adap->vres.stag.size = val[1] - val[0] + 1;
4412 adap->vres.rq.start = val[2];
4413 adap->vres.rq.size = val[3] - val[2] + 1;
4414 adap->vres.pbl.start = val[4];
4415 adap->vres.pbl.size = val[5] - val[4] + 1;
4416
4417 params[0] = FW_PARAM_PFVF(SQRQ_START);
4418 params[1] = FW_PARAM_PFVF(SQRQ_END);
4419 params[2] = FW_PARAM_PFVF(CQ_START);
4420 params[3] = FW_PARAM_PFVF(CQ_END);
4421 params[4] = FW_PARAM_PFVF(OCQ_START);
4422 params[5] = FW_PARAM_PFVF(OCQ_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004423 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004424 if (ret < 0)
4425 goto bye;
4426 adap->vres.qp.start = val[0];
4427 adap->vres.qp.size = val[1] - val[0] + 1;
4428 adap->vres.cq.start = val[2];
4429 adap->vres.cq.size = val[3] - val[2] + 1;
4430 adap->vres.ocq.start = val[4];
4431 adap->vres.ocq.size = val[5] - val[4] + 1;
4432 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00004433 if (caps_cmd.iscsicaps) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004434 params[0] = FW_PARAM_PFVF(ISCSI_START);
4435 params[1] = FW_PARAM_PFVF(ISCSI_END);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004436 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
4437 params, val);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004438 if (ret < 0)
4439 goto bye;
4440 adap->vres.iscsi.start = val[0];
4441 adap->vres.iscsi.size = val[1] - val[0] + 1;
4442 }
4443#undef FW_PARAM_PFVF
4444#undef FW_PARAM_DEV
4445
Vipul Pandya636f9d32012-09-26 02:39:39 +00004446 /*
4447 * These are finalized by FW initialization, load their values now.
4448 */
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004449 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4450 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004451 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004452 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4453 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4454 adap->params.b_wnd);
4455
Vipul Pandya636f9d32012-09-26 02:39:39 +00004456 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4457 for (j = 0; j < NCHAN; j++)
4458 adap->params.tp.tx_modq[j] = j;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004459
Vipul Pandya793dad92012-12-10 09:30:56 +00004460 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4461 &adap->filter_mode, 1,
4462 TP_VLAN_PRI_MAP);
4463
Vipul Pandya636f9d32012-09-26 02:39:39 +00004464 adap->flags |= FW_OK;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004465 return 0;
4466
4467 /*
Vipul Pandya636f9d32012-09-26 02:39:39 +00004468 * Something bad happened. If a command timed out or failed with EIO
4469 * FW does not operate within its spec or something catastrophic
4470 * happened to HW/FW, stop issuing commands.
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004471 */
Vipul Pandya636f9d32012-09-26 02:39:39 +00004472bye:
4473 if (ret != -ETIMEDOUT && ret != -EIO)
4474 t4_fw_bye(adap, adap->mbox);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004475 return ret;
4476}
4477
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004478/* EEH callbacks */
4479
4480static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4481 pci_channel_state_t state)
4482{
4483 int i;
4484 struct adapter *adap = pci_get_drvdata(pdev);
4485
4486 if (!adap)
4487 goto out;
4488
4489 rtnl_lock();
4490 adap->flags &= ~FW_OK;
4491 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4492 for_each_port(adap, i) {
4493 struct net_device *dev = adap->port[i];
4494
4495 netif_device_detach(dev);
4496 netif_carrier_off(dev);
4497 }
4498 if (adap->flags & FULL_INIT_DONE)
4499 cxgb_down(adap);
4500 rtnl_unlock();
4501 pci_disable_device(pdev);
4502out: return state == pci_channel_io_perm_failure ?
4503 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4504}
4505
4506static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4507{
4508 int i, ret;
4509 struct fw_caps_config_cmd c;
4510 struct adapter *adap = pci_get_drvdata(pdev);
4511
4512 if (!adap) {
4513 pci_restore_state(pdev);
4514 pci_save_state(pdev);
4515 return PCI_ERS_RESULT_RECOVERED;
4516 }
4517
4518 if (pci_enable_device(pdev)) {
4519 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
4520 return PCI_ERS_RESULT_DISCONNECT;
4521 }
4522
4523 pci_set_master(pdev);
4524 pci_restore_state(pdev);
4525 pci_save_state(pdev);
4526 pci_cleanup_aer_uncorrect_error_status(pdev);
4527
4528 if (t4_wait_dev_ready(adap) < 0)
4529 return PCI_ERS_RESULT_DISCONNECT;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004530 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004531 return PCI_ERS_RESULT_DISCONNECT;
4532 adap->flags |= FW_OK;
4533 if (adap_init1(adap, &c))
4534 return PCI_ERS_RESULT_DISCONNECT;
4535
4536 for_each_port(adap, i) {
4537 struct port_info *p = adap2pinfo(adap, i);
4538
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004539 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
4540 NULL, NULL);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004541 if (ret < 0)
4542 return PCI_ERS_RESULT_DISCONNECT;
4543 p->viid = ret;
4544 p->xact_addr_filt = -1;
4545 }
4546
4547 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4548 adap->params.b_wnd);
Dimitris Michailidis1ae970e2010-08-02 13:19:19 +00004549 setup_memwin(adap);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004550 if (cxgb_up(adap))
4551 return PCI_ERS_RESULT_DISCONNECT;
4552 return PCI_ERS_RESULT_RECOVERED;
4553}
4554
4555static void eeh_resume(struct pci_dev *pdev)
4556{
4557 int i;
4558 struct adapter *adap = pci_get_drvdata(pdev);
4559
4560 if (!adap)
4561 return;
4562
4563 rtnl_lock();
4564 for_each_port(adap, i) {
4565 struct net_device *dev = adap->port[i];
4566
4567 if (netif_running(dev)) {
4568 link_start(dev);
4569 cxgb_set_rxmode(dev);
4570 }
4571 netif_device_attach(dev);
4572 }
4573 rtnl_unlock();
4574}
4575
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004576static const struct pci_error_handlers cxgb4_eeh = {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004577 .error_detected = eeh_err_detected,
4578 .slot_reset = eeh_slot_reset,
4579 .resume = eeh_resume,
4580};
4581
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004582static inline bool is_10g_port(const struct link_config *lc)
4583{
4584 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
4585}
4586
4587static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
4588 unsigned int size, unsigned int iqe_size)
4589{
4590 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
4591 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
4592 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
4593 q->iqe_len = iqe_size;
4594 q->size = size;
4595}
4596
4597/*
4598 * Perform default configuration of DMA queues depending on the number and type
4599 * of ports we found and the number of available CPUs. Most settings can be
4600 * modified by the admin prior to actual use.
4601 */
Bill Pemberton91744942012-12-03 09:23:02 -05004602static void cfg_queues(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004603{
4604 struct sge *s = &adap->sge;
4605 int i, q10g = 0, n10g = 0, qidx = 0;
4606
4607 for_each_port(adap, i)
4608 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
4609
4610 /*
4611 * We default to 1 queue per non-10G port and up to # of cores queues
4612 * per 10G port.
4613 */
4614 if (n10g)
4615 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
Yuval Mintz5952dde2012-07-01 03:18:55 +00004616 if (q10g > netif_get_num_default_rss_queues())
4617 q10g = netif_get_num_default_rss_queues();
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004618
4619 for_each_port(adap, i) {
4620 struct port_info *pi = adap2pinfo(adap, i);
4621
4622 pi->first_qset = qidx;
4623 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
4624 qidx += pi->nqsets;
4625 }
4626
4627 s->ethqsets = qidx;
4628 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4629
4630 if (is_offload(adap)) {
4631 /*
4632 * For offload we use 1 queue/channel if all ports are up to 1G,
4633 * otherwise we divide all available queues amongst the channels
4634 * capped by the number of available cores.
4635 */
4636 if (n10g) {
4637 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
4638 num_online_cpus());
4639 s->ofldqsets = roundup(i, adap->params.nports);
4640 } else
4641 s->ofldqsets = adap->params.nports;
4642 /* For RDMA one Rx queue per channel suffices */
4643 s->rdmaqs = adap->params.nports;
4644 }
4645
4646 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4647 struct sge_eth_rxq *r = &s->ethrxq[i];
4648
4649 init_rspq(&r->rspq, 0, 0, 1024, 64);
4650 r->fl.size = 72;
4651 }
4652
4653 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4654 s->ethtxq[i].q.size = 1024;
4655
4656 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4657 s->ctrlq[i].q.size = 512;
4658
4659 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4660 s->ofldtxq[i].q.size = 1024;
4661
4662 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
4663 struct sge_ofld_rxq *r = &s->ofldrxq[i];
4664
4665 init_rspq(&r->rspq, 0, 0, 1024, 64);
4666 r->rspq.uld = CXGB4_ULD_ISCSI;
4667 r->fl.size = 72;
4668 }
4669
4670 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4671 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4672
4673 init_rspq(&r->rspq, 0, 0, 511, 64);
4674 r->rspq.uld = CXGB4_ULD_RDMA;
4675 r->fl.size = 72;
4676 }
4677
4678 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
4679 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
4680}
4681
4682/*
4683 * Reduce the number of Ethernet queues across all ports to at most n.
4684 * n provides at least one queue per port.
4685 */
Bill Pemberton91744942012-12-03 09:23:02 -05004686static void reduce_ethqs(struct adapter *adap, int n)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004687{
4688 int i;
4689 struct port_info *pi;
4690
4691 while (n < adap->sge.ethqsets)
4692 for_each_port(adap, i) {
4693 pi = adap2pinfo(adap, i);
4694 if (pi->nqsets > 1) {
4695 pi->nqsets--;
4696 adap->sge.ethqsets--;
4697 if (adap->sge.ethqsets <= n)
4698 break;
4699 }
4700 }
4701
4702 n = 0;
4703 for_each_port(adap, i) {
4704 pi = adap2pinfo(adap, i);
4705 pi->first_qset = n;
4706 n += pi->nqsets;
4707 }
4708}
4709
4710/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4711#define EXTRA_VECS 2
4712
Bill Pemberton91744942012-12-03 09:23:02 -05004713static int enable_msix(struct adapter *adap)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004714{
4715 int ofld_need = 0;
4716 int i, err, want, need;
4717 struct sge *s = &adap->sge;
4718 unsigned int nchan = adap->params.nports;
4719 struct msix_entry entries[MAX_INGQ + 1];
4720
4721 for (i = 0; i < ARRAY_SIZE(entries); ++i)
4722 entries[i].entry = i;
4723
4724 want = s->max_ethqsets + EXTRA_VECS;
4725 if (is_offload(adap)) {
4726 want += s->rdmaqs + s->ofldqsets;
4727 /* need nchan for each possible ULD */
4728 ofld_need = 2 * nchan;
4729 }
4730 need = adap->params.nports + EXTRA_VECS + ofld_need;
4731
4732 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
4733 want = err;
4734
4735 if (!err) {
4736 /*
4737 * Distribute available vectors to the various queue groups.
4738 * Every group gets its minimum requirement and NIC gets top
4739 * priority for leftovers.
4740 */
4741 i = want - EXTRA_VECS - ofld_need;
4742 if (i < s->max_ethqsets) {
4743 s->max_ethqsets = i;
4744 if (i < s->ethqsets)
4745 reduce_ethqs(adap, i);
4746 }
4747 if (is_offload(adap)) {
4748 i = want - EXTRA_VECS - s->max_ethqsets;
4749 i -= ofld_need - nchan;
4750 s->ofldqsets = (i / nchan) * nchan; /* round down */
4751 }
4752 for (i = 0; i < want; ++i)
4753 adap->msix_info[i].vec = entries[i].vector;
4754 } else if (err > 0)
4755 dev_info(adap->pdev_dev,
4756 "only %d MSI-X vectors left, not using MSI-X\n", err);
4757 return err;
4758}
4759
4760#undef EXTRA_VECS
4761
Bill Pemberton91744942012-12-03 09:23:02 -05004762static int init_rss(struct adapter *adap)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00004763{
4764 unsigned int i, j;
4765
4766 for_each_port(adap, i) {
4767 struct port_info *pi = adap2pinfo(adap, i);
4768
4769 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4770 if (!pi->rss)
4771 return -ENOMEM;
4772 for (j = 0; j < pi->rss_size; j++)
Ben Hutchings278bc422011-12-15 13:56:49 +00004773 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00004774 }
4775 return 0;
4776}
4777
Bill Pemberton91744942012-12-03 09:23:02 -05004778static void print_port_info(const struct net_device *dev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004779{
4780 static const char *base[] = {
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004781 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
Dimitris Michailidis7d5e77a2010-12-14 21:36:47 +00004782 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004783 };
4784
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004785 char buf[80];
Dimitris Michailidis118969e2010-12-14 21:36:48 +00004786 char *bufp = buf;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00004787 const char *spd = "";
Dimitris Michailidis118969e2010-12-14 21:36:48 +00004788 const struct port_info *pi = netdev_priv(dev);
4789 const struct adapter *adap = pi->adapter;
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00004790
4791 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4792 spd = " 2.5 GT/s";
4793 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4794 spd = " 5 GT/s";
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004795
Dimitris Michailidis118969e2010-12-14 21:36:48 +00004796 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4797 bufp += sprintf(bufp, "100/");
4798 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4799 bufp += sprintf(bufp, "1000/");
4800 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4801 bufp += sprintf(bufp, "10G/");
4802 if (bufp != buf)
4803 --bufp;
4804 sprintf(bufp, "BASE-%s", base[pi->port_type]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004805
Dimitris Michailidis118969e2010-12-14 21:36:48 +00004806 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4807 adap->params.vpd.id, adap->params.rev, buf,
4808 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4809 (adap->flags & USING_MSIX) ? " MSI-X" :
4810 (adap->flags & USING_MSI) ? " MSI" : "");
4811 netdev_info(dev, "S/N: %s, E/C: %s\n",
4812 adap->params.vpd.sn, adap->params.vpd.ec);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004813}
4814
Bill Pemberton91744942012-12-03 09:23:02 -05004815static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
Dimitris Michailidisef306b52010-12-14 21:36:44 +00004816{
Jiang Liue5c8ae52012-08-20 13:53:19 -06004817 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00004818}
4819
Dimitris Michailidis06546392010-07-11 12:01:16 +00004820/*
4821 * Free the following resources:
4822 * - memory used for tables
4823 * - MSI/MSI-X
4824 * - net devices
4825 * - resources FW is holding for us
4826 */
4827static void free_some_resources(struct adapter *adapter)
4828{
4829 unsigned int i;
4830
4831 t4_free_mem(adapter->l2t);
4832 t4_free_mem(adapter->tids.tid_tab);
4833 disable_msi(adapter);
4834
4835 for_each_port(adapter, i)
Dimitris Michailidis671b0062010-07-11 12:01:17 +00004836 if (adapter->port[i]) {
4837 kfree(adap2pinfo(adapter, i)->rss);
Dimitris Michailidis06546392010-07-11 12:01:16 +00004838 free_netdev(adapter->port[i]);
Dimitris Michailidis671b0062010-07-11 12:01:17 +00004839 }
Dimitris Michailidis06546392010-07-11 12:01:16 +00004840 if (adapter->flags & FW_OK)
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004841 t4_fw_bye(adapter, adapter->fn);
Dimitris Michailidis06546392010-07-11 12:01:16 +00004842}
4843
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00004844#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
Dimitris Michailidis35d35682010-08-02 13:19:20 +00004845#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004846 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4847
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004848static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004849{
4850 int func, i, err;
4851 struct port_info *pi;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004852 bool highdma = false;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004853 struct adapter *adapter = NULL;
4854
4855 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4856
4857 err = pci_request_regions(pdev, KBUILD_MODNAME);
4858 if (err) {
4859 /* Just info, some other driver may have claimed the device. */
4860 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4861 return err;
4862 }
4863
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004864 /* We control everything through one PF */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004865 func = PCI_FUNC(pdev->devfn);
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004866 if (func != ent->driver_data) {
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004867 pci_save_state(pdev); /* to restore SR-IOV later */
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004868 goto sriov;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00004869 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004870
4871 err = pci_enable_device(pdev);
4872 if (err) {
4873 dev_err(&pdev->dev, "cannot enable PCI device\n");
4874 goto out_release_regions;
4875 }
4876
4877 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004878 highdma = true;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004879 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4880 if (err) {
4881 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4882 "coherent allocations\n");
4883 goto out_disable_device;
4884 }
4885 } else {
4886 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4887 if (err) {
4888 dev_err(&pdev->dev, "no usable DMA configuration\n");
4889 goto out_disable_device;
4890 }
4891 }
4892
4893 pci_enable_pcie_error_reporting(pdev);
Dimitris Michailidisef306b52010-12-14 21:36:44 +00004894 enable_pcie_relaxed_ordering(pdev);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004895 pci_set_master(pdev);
4896 pci_save_state(pdev);
4897
4898 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4899 if (!adapter) {
4900 err = -ENOMEM;
4901 goto out_disable_device;
4902 }
4903
4904 adapter->regs = pci_ioremap_bar(pdev, 0);
4905 if (!adapter->regs) {
4906 dev_err(&pdev->dev, "cannot map device registers\n");
4907 err = -ENOMEM;
4908 goto out_free_adapter;
4909 }
4910
4911 adapter->pdev = pdev;
4912 adapter->pdev_dev = &pdev->dev;
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05304913 adapter->mbox = func;
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004914 adapter->fn = func;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004915 adapter->msg_enable = dflt_msg_enable;
4916 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4917
4918 spin_lock_init(&adapter->stats_lock);
4919 spin_lock_init(&adapter->tid_release_lock);
4920
4921 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
Vipul Pandya881806b2012-05-18 15:29:24 +05304922 INIT_WORK(&adapter->db_full_task, process_db_full);
4923 INIT_WORK(&adapter->db_drop_task, process_db_drop);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004924
4925 err = t4_prep_adapter(adapter);
4926 if (err)
4927 goto out_unmap_bar;
Vipul Pandya636f9d32012-09-26 02:39:39 +00004928 setup_memwin(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004929 err = adap_init0(adapter);
Vipul Pandya636f9d32012-09-26 02:39:39 +00004930 setup_memwin_rdma(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004931 if (err)
4932 goto out_unmap_bar;
4933
4934 for_each_port(adapter, i) {
4935 struct net_device *netdev;
4936
4937 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4938 MAX_ETH_QSETS);
4939 if (!netdev) {
4940 err = -ENOMEM;
4941 goto out_free_dev;
4942 }
4943
4944 SET_NETDEV_DEV(netdev, &pdev->dev);
4945
4946 adapter->port[i] = netdev;
4947 pi = netdev_priv(netdev);
4948 pi->adapter = adapter;
4949 pi->xact_addr_filt = -1;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004950 pi->port_id = i;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004951 netdev->irq = pdev->irq;
4952
Michał Mirosław2ed28ba2011-04-16 13:05:08 +00004953 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4954 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4955 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4956 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004957 if (highdma)
4958 netdev->hw_features |= NETIF_F_HIGHDMA;
4959 netdev->features |= netdev->hw_features;
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004960 netdev->vlan_features = netdev->features & VLAN_FEAT;
4961
Jiri Pirko01789342011-08-16 06:29:00 +00004962 netdev->priv_flags |= IFF_UNICAST_FLT;
4963
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004964 netdev->netdev_ops = &cxgb4_netdev_ops;
4965 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
4966 }
4967
4968 pci_set_drvdata(pdev, adapter);
4969
4970 if (adapter->flags & FW_OK) {
Dimitris Michailidis060e0c72010-08-02 13:19:21 +00004971 err = t4_port_init(adapter, func, func, 0);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00004972 if (err)
4973 goto out_free_dev;
4974 }
4975
4976 /*
4977 * Configure queues and allocate tables now, they can be needed as
4978 * soon as the first register_netdev completes.
4979 */
4980 cfg_queues(adapter);
4981
4982 adapter->l2t = t4_init_l2t();
4983 if (!adapter->l2t) {
4984 /* We tolerate a lack of L2T, giving up some functionality */
4985 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4986 adapter->params.offload = 0;
4987 }
4988
4989 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
4990 dev_warn(&pdev->dev, "could not allocate TID table, "
4991 "continuing\n");
4992 adapter->params.offload = 0;
4993 }
4994
Dimitris Michailidisf7cabcd2010-07-11 12:01:15 +00004995 /* See what interrupts we'll be using */
4996 if (msi > 1 && enable_msix(adapter) == 0)
4997 adapter->flags |= USING_MSIX;
4998 else if (msi > 0 && pci_enable_msi(pdev) == 0)
4999 adapter->flags |= USING_MSI;
5000
Dimitris Michailidis671b0062010-07-11 12:01:17 +00005001 err = init_rss(adapter);
5002 if (err)
5003 goto out_free_dev;
5004
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005005 /*
5006 * The card is now ready to go. If any errors occur during device
5007 * registration we do not fail the whole card but rather proceed only
5008 * with the ports we manage to register successfully. However we must
5009 * register at least one net device.
5010 */
5011 for_each_port(adapter, i) {
Dimitris Michailidisa57cabe2010-12-14 21:36:46 +00005012 pi = adap2pinfo(adapter, i);
5013 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5014 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5015
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005016 err = register_netdev(adapter->port[i]);
5017 if (err)
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00005018 break;
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00005019 adapter->chan_map[pi->tx_chan] = i;
5020 print_port_info(adapter->port[i]);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005021 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00005022 if (i == 0) {
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005023 dev_err(&pdev->dev, "could not register any net devices\n");
5024 goto out_free_dev;
5025 }
Dimitris Michailidisb1a3c2b2010-12-14 21:36:51 +00005026 if (err) {
5027 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5028 err = 0;
Joe Perches6403eab2011-06-03 11:51:20 +00005029 }
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005030
5031 if (cxgb4_debugfs_root) {
5032 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5033 cxgb4_debugfs_root);
5034 setup_debugfs(adapter);
5035 }
5036
David S. Miller88c51002011-10-07 13:38:43 -04005037 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5038 pdev->needs_freset = 1;
5039
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005040 if (is_offload(adapter))
5041 attach_ulds(adapter);
5042
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005043sriov:
5044#ifdef CONFIG_PCI_IOV
5045 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
5046 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5047 dev_info(&pdev->dev,
5048 "instantiated %u virtual functions\n",
5049 num_vf[func]);
5050#endif
5051 return 0;
5052
5053 out_free_dev:
Dimitris Michailidis06546392010-07-11 12:01:16 +00005054 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005055 out_unmap_bar:
5056 iounmap(adapter->regs);
5057 out_free_adapter:
5058 kfree(adapter);
5059 out_disable_device:
5060 pci_disable_pcie_error_reporting(pdev);
5061 pci_disable_device(pdev);
5062 out_release_regions:
5063 pci_release_regions(pdev);
5064 pci_set_drvdata(pdev, NULL);
5065 return err;
5066}
5067
Bill Pemberton91744942012-12-03 09:23:02 -05005068static void remove_one(struct pci_dev *pdev)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005069{
5070 struct adapter *adapter = pci_get_drvdata(pdev);
5071
Vipul Pandya636f9d32012-09-26 02:39:39 +00005072#ifdef CONFIG_PCI_IOV
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005073 pci_disable_sriov(pdev);
5074
Vipul Pandya636f9d32012-09-26 02:39:39 +00005075#endif
5076
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005077 if (adapter) {
5078 int i;
5079
5080 if (is_offload(adapter))
5081 detach_ulds(adapter);
5082
5083 for_each_port(adapter, i)
Dimitris Michailidis8f3a7672010-12-14 21:36:52 +00005084 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005085 unregister_netdev(adapter->port[i]);
5086
5087 if (adapter->debugfs_root)
5088 debugfs_remove_recursive(adapter->debugfs_root);
5089
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00005090 /* If we allocated filters, free up state associated with any
5091 * valid filters ...
5092 */
5093 if (adapter->tids.ftid_tab) {
5094 struct filter_entry *f = &adapter->tids.ftid_tab[0];
Vipul Pandyadca4fae2012-12-10 09:30:53 +00005095 for (i = 0; i < (adapter->tids.nftids +
5096 adapter->tids.nsftids); i++, f++)
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00005097 if (f->valid)
5098 clear_filter(adapter, f);
5099 }
5100
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00005101 if (adapter->flags & FULL_INIT_DONE)
5102 cxgb_down(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005103
Dimitris Michailidis06546392010-07-11 12:01:16 +00005104 free_some_resources(adapter);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005105 iounmap(adapter->regs);
5106 kfree(adapter);
5107 pci_disable_pcie_error_reporting(pdev);
5108 pci_disable_device(pdev);
5109 pci_release_regions(pdev);
5110 pci_set_drvdata(pdev, NULL);
Dimitris Michailidisa069ec92010-09-30 09:17:12 +00005111 } else
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005112 pci_release_regions(pdev);
5113}
5114
5115static struct pci_driver cxgb4_driver = {
5116 .name = KBUILD_MODNAME,
5117 .id_table = cxgb4_pci_tbl,
5118 .probe = init_one,
Bill Pemberton91744942012-12-03 09:23:02 -05005119 .remove = remove_one,
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00005120 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005121};
5122
5123static int __init cxgb4_init_module(void)
5124{
5125 int ret;
5126
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05305127 workq = create_singlethread_workqueue("cxgb4");
5128 if (!workq)
5129 return -ENOMEM;
5130
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005131 /* Debugfs support is optional, just warn if this fails */
5132 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5133 if (!cxgb4_debugfs_root)
5134 pr_warning("could not create debugfs entry, continuing\n");
5135
5136 ret = pci_register_driver(&cxgb4_driver);
5137 if (ret < 0)
5138 debugfs_remove(cxgb4_debugfs_root);
5139 return ret;
5140}
5141
5142static void __exit cxgb4_cleanup_module(void)
5143{
5144 pci_unregister_driver(&cxgb4_driver);
5145 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
Vipul Pandya3069ee9b2012-05-18 15:29:26 +05305146 flush_workqueue(workq);
5147 destroy_workqueue(workq);
Dimitris Michailidisb8ff05a2010-04-01 15:28:26 +00005148}
5149
5150module_init(cxgb4_init_module);
5151module_exit(cxgb4_cleanup_module);