blob: d4f25a4b8af7893d05882c96b89e56a4a8e71808 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530205
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000208
209 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000210 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211}
212
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530214 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530219 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000221 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234{
235 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000238 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
239 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000240
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000241 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000242 return;
243
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244 if (arm)
245 val |= 1 << DB_CQ_REARM_SHIFT;
246 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000247 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248}
249
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250static int be_mac_addr_set(struct net_device *netdev, void *p)
251{
252 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530255 int status;
256 u8 mac[ETH_ALEN];
257 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000259 if (!is_valid_ether_addr(addr->sa_data))
260 return -EADDRNOTAVAIL;
261
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530262 /* Proceed further only if, User provided MAC is different
263 * from active MAC
264 */
265 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
266 return 0;
267
Sathya Perla5a712c12013-07-23 15:24:59 +0530268 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
269 * privilege or if PF did not provision the new MAC address.
270 * On BE3, this cmd will always fail if the VF doesn't have the
271 * FILTMGMT privilege. This failure is OK, only if the PF programmed
272 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000273 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530274 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
275 adapter->if_handle, &adapter->pmac_id[0], 0);
276 if (!status) {
277 curr_pmac_id = adapter->pmac_id[0];
278
279 /* Delete the old programmed MAC. This call may fail if the
280 * old MAC was already deleted by the PF driver.
281 */
282 if (adapter->pmac_id[0] != old_pmac_id)
283 be_cmd_pmac_del(adapter, adapter->if_handle,
284 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000285 }
286
Sathya Perla5a712c12013-07-23 15:24:59 +0530287 /* Decide if the new MAC is successfully activated only after
288 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000289 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530290 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
291 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000292 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000293 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700294
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 /* The MAC change did not happen, either due to lack of privilege
296 * or PF didn't pre-provision.
297 */
dingtianhong61d23e92013-12-30 15:40:43 +0800298 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 status = -EPERM;
300 goto err;
301 }
302
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 return 0;
306err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 return status;
309}
310
Sathya Perlaca34fe32012-11-06 17:48:56 +0000311/* BE2 supports only v0 cmd */
312static void *hw_stats_from_cmd(struct be_adapter *adapter)
313{
314 if (BE2_chip(adapter)) {
315 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500318 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
320
321 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500322 } else {
323 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000326 }
327}
328
329/* BE2 supports only v0 cmd */
330static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
331{
332 if (BE2_chip(adapter)) {
333 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500336 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
338
339 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500340 } else {
341 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000344 }
345}
346
347static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
350 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
351 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 &rxf_stats->port[adapter->port_num];
354 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355
Sathya Perlaac124ff2011-07-25 19:10:14 +0000356 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357 drvs->rx_pause_frames = port_stats->rx_pause_frames;
358 drvs->rx_crc_errors = port_stats->rx_crc_errors;
359 drvs->rx_control_frames = port_stats->rx_control_frames;
360 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
361 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
362 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
363 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
364 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
365 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
366 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
367 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
368 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
369 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
370 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372 drvs->rx_dropped_header_too_small =
373 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000374 drvs->rx_address_filtered =
375 port_stats->rx_address_filtered +
376 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 drvs->rx_alignment_symbol_errors =
378 port_stats->rx_alignment_symbol_errors;
379
380 drvs->tx_pauseframes = port_stats->tx_pauseframes;
381 drvs->tx_controlframes = port_stats->tx_controlframes;
382
383 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000388 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->forwarded_packets = rxf_stats->forwarded_packets;
390 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
392 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
394}
395
Sathya Perlaca34fe32012-11-06 17:48:56 +0000396static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
399 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
400 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000402 &rxf_stats->port[adapter->port_num];
403 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404
Sathya Perlaac124ff2011-07-25 19:10:14 +0000405 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000406 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
407 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_pause_frames = port_stats->rx_pause_frames;
409 drvs->rx_crc_errors = port_stats->rx_crc_errors;
410 drvs->rx_control_frames = port_stats->rx_control_frames;
411 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
412 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
413 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
414 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
415 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
416 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
417 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
418 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
419 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
420 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
421 drvs->rx_dropped_header_too_small =
422 port_stats->rx_dropped_header_too_small;
423 drvs->rx_input_fifo_overflow_drop =
424 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000425 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->rx_alignment_symbol_errors =
427 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->tx_pauseframes = port_stats->tx_pauseframes;
430 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000431 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->jabber_events = port_stats->jabber_events;
433 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 drvs->forwarded_packets = rxf_stats->forwarded_packets;
436 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
438 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000439 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
440}
441
Ajit Khaparde61000862013-10-03 16:16:33 -0500442static void populate_be_v2_stats(struct be_adapter *adapter)
443{
444 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
445 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
446 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
447 struct be_port_rxf_stats_v2 *port_stats =
448 &rxf_stats->port[adapter->port_num];
449 struct be_drv_stats *drvs = &adapter->drv_stats;
450
451 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
452 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
453 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
454 drvs->rx_pause_frames = port_stats->rx_pause_frames;
455 drvs->rx_crc_errors = port_stats->rx_crc_errors;
456 drvs->rx_control_frames = port_stats->rx_control_frames;
457 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
458 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
459 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
460 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
461 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
462 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
463 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
464 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
465 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
466 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
467 drvs->rx_dropped_header_too_small =
468 port_stats->rx_dropped_header_too_small;
469 drvs->rx_input_fifo_overflow_drop =
470 port_stats->rx_input_fifo_overflow_drop;
471 drvs->rx_address_filtered = port_stats->rx_address_filtered;
472 drvs->rx_alignment_symbol_errors =
473 port_stats->rx_alignment_symbol_errors;
474 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
475 drvs->tx_pauseframes = port_stats->tx_pauseframes;
476 drvs->tx_controlframes = port_stats->tx_controlframes;
477 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
478 drvs->jabber_events = port_stats->jabber_events;
479 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
480 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
481 drvs->forwarded_packets = rxf_stats->forwarded_packets;
482 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
483 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
484 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
485 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530486 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500487 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
488 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
489 drvs->rx_roce_frames = port_stats->roce_frames_received;
490 drvs->roce_drops_crc = port_stats->roce_drops_crc;
491 drvs->roce_drops_payload_len =
492 port_stats->roce_drops_payload_len;
493 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500494}
495
Selvin Xavier005d5692011-05-16 07:36:35 +0000496static void populate_lancer_stats(struct be_adapter *adapter)
497{
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530499 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500
501 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
502 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
503 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
504 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000506 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000507 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
508 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
509 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
510 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
511 drvs->rx_dropped_tcp_length =
512 pport_stats->rx_dropped_invalid_tcp_length;
513 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
514 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
515 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
516 drvs->rx_dropped_header_too_small =
517 pport_stats->rx_dropped_header_too_small;
518 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000519 drvs->rx_address_filtered =
520 pport_stats->rx_address_filtered +
521 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
525 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000527 drvs->forwarded_packets = pport_stats->num_forwards_lo;
528 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000530 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000532
Sathya Perla09c1c682011-08-22 19:41:53 +0000533static void accumulate_16bit_val(u32 *acc, u16 val)
534{
535#define lo(x) (x & 0xFFFF)
536#define hi(x) (x & 0xFFFF0000)
537 bool wrapped = val < lo(*acc);
538 u32 newacc = hi(*acc) + val;
539
540 if (wrapped)
541 newacc += 65536;
542 ACCESS_ONCE(*acc) = newacc;
543}
544
Jingoo Han4188e7d2013-08-05 18:02:02 +0900545static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530546 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000547{
548 if (!BEx_chip(adapter))
549 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
550 else
551 /* below erx HW counter can actually wrap around after
552 * 65535. Driver accumulates a 32-bit value
553 */
554 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
555 (u16)erx_stat);
556}
557
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000558void be_parse_stats(struct be_adapter *adapter)
559{
Ajit Khaparde61000862013-10-03 16:16:33 -0500560 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000561 struct be_rx_obj *rxo;
562 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000563 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000564
Sathya Perlaca34fe32012-11-06 17:48:56 +0000565 if (lancer_chip(adapter)) {
566 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000567 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 if (BE2_chip(adapter))
569 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 else if (BE3_chip(adapter))
571 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000572 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500573 else
574 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000575
Ajit Khaparde61000862013-10-03 16:16:33 -0500576 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000578 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
579 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000581 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582}
583
Sathya Perlaab1594e2011-07-25 19:10:15 +0000584static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530585 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000588 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000590 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000591 u64 pkts, bytes;
592 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594
Sathya Perla3abcded2010-10-03 22:12:27 -0700595 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530597
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700599 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 pkts = rx_stats(rxo)->rx_pkts;
601 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700602 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000603 stats->rx_packets += pkts;
604 stats->rx_bytes += bytes;
605 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
606 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
607 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700608 }
609
Sathya Perla3c8def92011-06-12 20:01:58 +0000610 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530612
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 pkts = tx_stats(txo)->tx_pkts;
616 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700617 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000618 stats->tx_packets += pkts;
619 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000620 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621
622 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000624 drvs->rx_alignment_symbol_errors +
625 drvs->rx_in_range_errors +
626 drvs->rx_out_range_errors +
627 drvs->rx_frame_too_long +
628 drvs->rx_dropped_too_small +
629 drvs->rx_dropped_too_short +
630 drvs->rx_dropped_header_too_small +
631 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000636 drvs->rx_out_range_errors +
637 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000638
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640
641 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000642 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000643
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 /* receiver fifo overrun */
645 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000646 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000647 drvs->rx_input_fifo_overflow_drop +
648 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650}
651
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654 struct net_device *netdev = adapter->netdev;
655
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000657 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530661 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000662 netif_carrier_on(netdev);
663 else
664 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200665
666 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500669static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670{
Sathya Perla3c8def92011-06-12 20:01:58 +0000671 struct be_tx_stats *stats = tx_stats(txo);
672
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000674 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500675 stats->tx_bytes += skb->len;
676 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000677 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678}
679
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500680/* Returns number of WRBs needed for the skb */
681static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500683 /* +1 for the header wrb */
684 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685}
686
687static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
688{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500689 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
690 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
691 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
692 wrb->rsvd0 = 0;
693}
694
695/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
696 * to avoid the swap and shift/mask operations in wrb_fill().
697 */
698static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
699{
700 wrb->frag_pa_hi = 0;
701 wrb->frag_pa_lo = 0;
702 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000703 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530707 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000708{
709 u8 vlan_prio;
710 u16 vlan_tag;
711
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100712 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000713 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
714 /* If vlan priority provided by OS is NOT in available bmap */
715 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
716 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
717 adapter->recommended_prio;
718
719 return vlan_tag;
720}
721
Sathya Perlac9c47142014-03-27 10:46:19 +0530722/* Used only for IP tunnel packets */
723static u16 skb_inner_ip_proto(struct sk_buff *skb)
724{
725 return (inner_ip_hdr(skb)->version == 4) ?
726 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
727}
728
729static u16 skb_ip_proto(struct sk_buff *skb)
730{
731 return (ip_hdr(skb)->version == 4) ?
732 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
733}
734
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530735static inline bool be_is_txq_full(struct be_tx_obj *txo)
736{
737 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
738}
739
740static inline bool be_can_txq_wake(struct be_tx_obj *txo)
741{
742 return atomic_read(&txo->q.used) < txo->q.len / 2;
743}
744
745static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
746{
747 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
748}
749
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530750static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
751 struct sk_buff *skb,
752 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530754 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000756 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530757 BE_WRB_F_SET(wrb_params->features, LSO, 1);
758 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000759 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530760 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530762 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530763 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530764 proto = skb_inner_ip_proto(skb);
765 } else {
766 proto = skb_ip_proto(skb);
767 }
768 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530769 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530770 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 }
773
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100774 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530775 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
776 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777 }
778
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530779 BE_WRB_F_SET(wrb_params->features, CRC, 1);
780}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500781
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530782static void wrb_fill_hdr(struct be_adapter *adapter,
783 struct be_eth_hdr_wrb *hdr,
784 struct be_wrb_params *wrb_params,
785 struct sk_buff *skb)
786{
787 memset(hdr, 0, sizeof(*hdr));
788
789 SET_TX_WRB_HDR_BITS(crc, hdr,
790 BE_WRB_F_GET(wrb_params->features, CRC));
791 SET_TX_WRB_HDR_BITS(ipcs, hdr,
792 BE_WRB_F_GET(wrb_params->features, IPCS));
793 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, TCPCS));
795 SET_TX_WRB_HDR_BITS(udpcs, hdr,
796 BE_WRB_F_GET(wrb_params->features, UDPCS));
797
798 SET_TX_WRB_HDR_BITS(lso, hdr,
799 BE_WRB_F_GET(wrb_params->features, LSO));
800 SET_TX_WRB_HDR_BITS(lso6, hdr,
801 BE_WRB_F_GET(wrb_params->features, LSO6));
802 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
803
804 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
805 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500806 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530807 SET_TX_WRB_HDR_BITS(event, hdr,
808 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
809 SET_TX_WRB_HDR_BITS(vlan, hdr,
810 BE_WRB_F_GET(wrb_params->features, VLAN));
811 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
812
813 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
814 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815}
816
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000817static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530818 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000819{
820 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500821 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000822
Sathya Perla7101e112010-03-22 20:41:12 +0000823
Sathya Perlaf986afc2015-02-06 08:18:43 -0500824 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
825 (u64)le32_to_cpu(wrb->frag_pa_lo);
826 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000827 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500828 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000829 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500830 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000831 }
832}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530834/* Grab a WRB header for xmit */
835static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530837 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530839 queue_head_inc(&txo->q);
840 return head;
841}
842
843/* Set up the WRB header for xmit */
844static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
845 struct be_tx_obj *txo,
846 struct be_wrb_params *wrb_params,
847 struct sk_buff *skb, u16 head)
848{
849 u32 num_frags = skb_wrb_cnt(skb);
850 struct be_queue_info *txq = &txo->q;
851 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
852
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530853 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500854 be_dws_cpu_to_le(hdr, sizeof(*hdr));
855
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500856 BUG_ON(txo->sent_skb_list[head]);
857 txo->sent_skb_list[head] = skb;
858 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530859 atomic_add(num_frags, &txq->used);
860 txo->last_req_wrb_cnt = num_frags;
861 txo->pend_wrb_cnt += num_frags;
862}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530864/* Setup a WRB fragment (buffer descriptor) for xmit */
865static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
866 int len)
867{
868 struct be_eth_wrb *wrb;
869 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700870
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530871 wrb = queue_head_node(txq);
872 wrb_fill(wrb, busaddr, len);
873 queue_head_inc(txq);
874}
875
876/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
877 * was invoked. The producer index is restored to the previous packet and the
878 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
879 */
880static void be_xmit_restore(struct be_adapter *adapter,
881 struct be_tx_obj *txo, u16 head, bool map_single,
882 u32 copied)
883{
884 struct device *dev;
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
887
888 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500889 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530890
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500891 /* skip the first wrb (hdr); it's not mapped */
892 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000893 while (copied) {
894 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000895 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000896 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500897 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000898 queue_head_inc(txq);
899 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530900
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500901 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530902}
903
904/* Enqueue the given packet for transmit. This routine allocates WRBs for the
905 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
906 * of WRBs used up by the packet.
907 */
908static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
909 struct sk_buff *skb,
910 struct be_wrb_params *wrb_params)
911{
912 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
913 struct device *dev = &adapter->pdev->dev;
914 struct be_queue_info *txq = &txo->q;
915 bool map_single = false;
916 u16 head = txq->head;
917 dma_addr_t busaddr;
918 int len;
919
920 head = be_tx_get_wrb_hdr(txo);
921
922 if (skb->len > skb->data_len) {
923 len = skb_headlen(skb);
924
925 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
926 if (dma_mapping_error(dev, busaddr))
927 goto dma_err;
928 map_single = true;
929 be_tx_setup_wrb_frag(txo, busaddr, len);
930 copied += len;
931 }
932
933 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
934 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
935 len = skb_frag_size(frag);
936
937 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
938 if (dma_mapping_error(dev, busaddr))
939 goto dma_err;
940 be_tx_setup_wrb_frag(txo, busaddr, len);
941 copied += len;
942 }
943
944 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
945
946 be_tx_stats_update(txo, skb);
947 return wrb_cnt;
948
949dma_err:
950 adapter->drv_stats.dma_map_errors++;
951 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000952 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953}
954
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500955static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
956{
957 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
958}
959
Somnath Kotur93040ae2012-06-26 22:32:10 +0000960static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000961 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530962 struct be_wrb_params
963 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000964{
965 u16 vlan_tag = 0;
966
967 skb = skb_share_check(skb, GFP_ATOMIC);
968 if (unlikely(!skb))
969 return skb;
970
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100971 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000972 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530973
974 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
975 if (!vlan_tag)
976 vlan_tag = adapter->pvid;
977 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
978 * skip VLAN insertion
979 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530980 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530981 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000982
983 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100984 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
985 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000986 if (unlikely(!skb))
987 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000988 skb->vlan_tci = 0;
989 }
990
991 /* Insert the outer VLAN, if any */
992 if (adapter->qnq_vid) {
993 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100994 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
995 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000996 if (unlikely(!skb))
997 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530998 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000999 }
1000
Somnath Kotur93040ae2012-06-26 22:32:10 +00001001 return skb;
1002}
1003
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001004static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1005{
1006 struct ethhdr *eh = (struct ethhdr *)skb->data;
1007 u16 offset = ETH_HLEN;
1008
1009 if (eh->h_proto == htons(ETH_P_IPV6)) {
1010 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1011
1012 offset += sizeof(struct ipv6hdr);
1013 if (ip6h->nexthdr != NEXTHDR_TCP &&
1014 ip6h->nexthdr != NEXTHDR_UDP) {
1015 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301016 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001017
1018 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1019 if (ehdr->hdrlen == 0xff)
1020 return true;
1021 }
1022 }
1023 return false;
1024}
1025
1026static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1027{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001028 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001029}
1030
Sathya Perla748b5392014-05-09 13:29:13 +05301031static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001032{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001033 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034}
1035
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301036static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1037 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301038 struct be_wrb_params
1039 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001041 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001042 unsigned int eth_hdr_len;
1043 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001044
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001045 /* For padded packets, BE HW modifies tot_len field in IP header
1046 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001047 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001048 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001049 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1050 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001051 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001052 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001053 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001054 ip = (struct iphdr *)ip_hdr(skb);
1055 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1056 }
1057
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001058 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301059 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001060 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301061 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001062 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301063 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001064
Somnath Kotur93040ae2012-06-26 22:32:10 +00001065 /* HW has a bug wherein it will calculate CSUM for VLAN
1066 * pkts even though it is disabled.
1067 * Manually insert VLAN in pkt.
1068 */
1069 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001070 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301071 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001072 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301073 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001074 }
1075
1076 /* HW may lockup when VLAN HW tagging is requested on
1077 * certain ipv6 packets. Drop such pkts if the HW workaround to
1078 * skip HW tagging is not enabled by FW.
1079 */
1080 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301081 (adapter->pvid || adapter->qnq_vid) &&
1082 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001083 goto tx_drop;
1084
1085 /* Manual VLAN tag insertion to prevent:
1086 * ASIC lockup when the ASIC inserts VLAN tag into
1087 * certain ipv6 packets. Insert VLAN tags in driver,
1088 * and set event, completion, vlan bits accordingly
1089 * in the Tx WRB.
1090 */
1091 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1092 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001094 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301095 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001096 }
1097
Sathya Perlaee9c7992013-05-22 23:04:55 +00001098 return skb;
1099tx_drop:
1100 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301101err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001102 return NULL;
1103}
1104
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301105static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1106 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301107 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301108{
1109 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1110 * less may cause a transmit stall on that port. So the work-around is
1111 * to pad short packets (<= 32 bytes) to a 36-byte length.
1112 */
1113 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001114 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301115 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301116 }
1117
1118 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301119 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301120 if (!skb)
1121 return NULL;
1122 }
1123
1124 return skb;
1125}
1126
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001127static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1128{
1129 struct be_queue_info *txq = &txo->q;
1130 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1131
1132 /* Mark the last request eventable if it hasn't been marked already */
1133 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1134 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1135
1136 /* compose a dummy wrb if there are odd set of wrbs to notify */
1137 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001138 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001139 queue_head_inc(txq);
1140 atomic_inc(&txq->used);
1141 txo->pend_wrb_cnt++;
1142 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1143 TX_HDR_WRB_NUM_SHIFT);
1144 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1145 TX_HDR_WRB_NUM_SHIFT);
1146 }
1147 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1148 txo->pend_wrb_cnt = 0;
1149}
1150
Sathya Perlaee9c7992013-05-22 23:04:55 +00001151static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1152{
1153 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001154 u16 q_idx = skb_get_queue_mapping(skb);
1155 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301156 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301157 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001158 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001159
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301160 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001161 if (unlikely(!skb))
1162 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001163
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301164 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1165
1166 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001167 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001168 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001169 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301172 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001173 netif_stop_subqueue(netdev, q_idx);
1174 tx_stats(txo)->tx_stops++;
1175 }
1176
1177 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1178 be_xmit_flush(adapter, txo);
1179
1180 return NETDEV_TX_OK;
1181drop:
1182 tx_stats(txo)->tx_drv_drops++;
1183 /* Flush the already enqueued tx requests */
1184 if (flush && txo->pend_wrb_cnt)
1185 be_xmit_flush(adapter, txo);
1186
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 return NETDEV_TX_OK;
1188}
1189
1190static int be_change_mtu(struct net_device *netdev, int new_mtu)
1191{
1192 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301193 struct device *dev = &adapter->pdev->dev;
1194
1195 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1196 dev_info(dev, "MTU must be between %d and %d bytes\n",
1197 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198 return -EINVAL;
1199 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301200
1201 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301202 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 netdev->mtu = new_mtu;
1204 return 0;
1205}
1206
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001207static inline bool be_in_all_promisc(struct be_adapter *adapter)
1208{
1209 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1210 BE_IF_FLAGS_ALL_PROMISCUOUS;
1211}
1212
1213static int be_set_vlan_promisc(struct be_adapter *adapter)
1214{
1215 struct device *dev = &adapter->pdev->dev;
1216 int status;
1217
1218 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1219 return 0;
1220
1221 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1222 if (!status) {
1223 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1224 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1225 } else {
1226 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1227 }
1228 return status;
1229}
1230
1231static int be_clear_vlan_promisc(struct be_adapter *adapter)
1232{
1233 struct device *dev = &adapter->pdev->dev;
1234 int status;
1235
1236 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1237 if (!status) {
1238 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1239 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1240 }
1241 return status;
1242}
1243
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001245 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1246 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 */
Sathya Perla10329df2012-06-05 19:37:18 +00001248static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249{
Vasundhara Volam50762662014-09-12 17:39:14 +05301250 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001251 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301252 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001253 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001254
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001255 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001256 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001257 return 0;
1258
Sathya Perla92bf14a2013-08-27 16:57:32 +05301259 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001260 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001261
1262 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301263 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1264 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001265
Vasundhara Volam435452a2015-03-20 06:28:23 -04001266 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001267 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001268 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001269 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301270 if (addl_status(status) ==
1271 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001272 return be_set_vlan_promisc(adapter);
1273 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1274 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001276 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277}
1278
Patrick McHardy80d5c362013-04-19 02:04:28 +00001279static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280{
1281 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001282 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001284 /* Packets with VID 0 are always received by Lancer by default */
1285 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301286 return status;
1287
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301288 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301289 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001290
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301291 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301292 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001293
Somnath Kotura6b74e02014-01-21 15:50:55 +05301294 status = be_vid_config(adapter);
1295 if (status) {
1296 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301297 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301298 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301299
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001300 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301}
1302
Patrick McHardy80d5c362013-04-19 02:04:28 +00001303static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304{
1305 struct be_adapter *adapter = netdev_priv(netdev);
1306
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001307 /* Packets with VID 0 are always received by Lancer by default */
1308 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301309 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001310
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301311 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301312 adapter->vlans_added--;
1313
1314 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315}
1316
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001317static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301318{
Sathya Perlaac34b742015-02-06 08:18:40 -05001319 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001320 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1321}
1322
1323static void be_set_all_promisc(struct be_adapter *adapter)
1324{
1325 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1326 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1327}
1328
1329static void be_set_mc_promisc(struct be_adapter *adapter)
1330{
1331 int status;
1332
1333 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1334 return;
1335
1336 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1337 if (!status)
1338 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1339}
1340
1341static void be_set_mc_list(struct be_adapter *adapter)
1342{
1343 int status;
1344
1345 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1346 if (!status)
1347 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1348 else
1349 be_set_mc_promisc(adapter);
1350}
1351
1352static void be_set_uc_list(struct be_adapter *adapter)
1353{
1354 struct netdev_hw_addr *ha;
1355 int i = 1; /* First slot is claimed by the Primary MAC */
1356
1357 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1358 be_cmd_pmac_del(adapter, adapter->if_handle,
1359 adapter->pmac_id[i], 0);
1360
1361 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1362 be_set_all_promisc(adapter);
1363 return;
1364 }
1365
1366 netdev_for_each_uc_addr(ha, adapter->netdev) {
1367 adapter->uc_macs++; /* First slot is for Primary MAC */
1368 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1369 &adapter->pmac_id[adapter->uc_macs], 0);
1370 }
1371}
1372
1373static void be_clear_uc_list(struct be_adapter *adapter)
1374{
1375 int i;
1376
1377 for (i = 1; i < (adapter->uc_macs + 1); i++)
1378 be_cmd_pmac_del(adapter, adapter->if_handle,
1379 adapter->pmac_id[i], 0);
1380 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301381}
1382
Sathya Perlaa54769f2011-10-24 02:45:00 +00001383static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384{
1385 struct be_adapter *adapter = netdev_priv(netdev);
1386
1387 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001388 be_set_all_promisc(adapter);
1389 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001391
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001392 /* Interface was previously in promiscuous mode; disable it */
1393 if (be_in_all_promisc(adapter)) {
1394 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001395 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001396 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001397 }
1398
Sathya Perlae7b909a2009-11-22 22:01:10 +00001399 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001400 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001401 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1402 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301403 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001404 }
Kalesh APa0794882014-05-30 19:06:23 +05301405
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001406 if (netdev_uc_count(netdev) != adapter->uc_macs)
1407 be_set_uc_list(adapter);
1408
1409 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410}
1411
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001412static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1413{
1414 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001415 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001416 int status;
1417
Sathya Perla11ac75e2011-12-13 00:58:50 +00001418 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001419 return -EPERM;
1420
Sathya Perla11ac75e2011-12-13 00:58:50 +00001421 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001422 return -EINVAL;
1423
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301424 /* Proceed further only if user provided MAC is different
1425 * from active MAC
1426 */
1427 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1428 return 0;
1429
Sathya Perla3175d8c2013-07-23 15:25:03 +05301430 if (BEx_chip(adapter)) {
1431 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1432 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001433
Sathya Perla11ac75e2011-12-13 00:58:50 +00001434 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1435 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301436 } else {
1437 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1438 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001439 }
1440
Kalesh APabccf232014-07-17 16:20:24 +05301441 if (status) {
1442 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1443 mac, vf, status);
1444 return be_cmd_status(status);
1445 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001446
Kalesh APabccf232014-07-17 16:20:24 +05301447 ether_addr_copy(vf_cfg->mac_addr, mac);
1448
1449 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001450}
1451
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001452static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301453 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001454{
1455 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001456 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001457
Sathya Perla11ac75e2011-12-13 00:58:50 +00001458 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001459 return -EPERM;
1460
Sathya Perla11ac75e2011-12-13 00:58:50 +00001461 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001462 return -EINVAL;
1463
1464 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001465 vi->max_tx_rate = vf_cfg->tx_rate;
1466 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001467 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1468 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001469 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301470 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001471 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001472
1473 return 0;
1474}
1475
Vasundhara Volam435452a2015-03-20 06:28:23 -04001476static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1477{
1478 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1479 u16 vids[BE_NUM_VLANS_SUPPORTED];
1480 int vf_if_id = vf_cfg->if_handle;
1481 int status;
1482
1483 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001484 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001485 if (status)
1486 return status;
1487
1488 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1489 vids[0] = 0;
1490 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1491 if (!status)
1492 dev_info(&adapter->pdev->dev,
1493 "Cleared guest VLANs on VF%d", vf);
1494
1495 /* After TVT is enabled, disallow VFs to program VLAN filters */
1496 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1497 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1498 ~BE_PRIV_FILTMGMT, vf + 1);
1499 if (!status)
1500 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1501 }
1502 return 0;
1503}
1504
1505static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1506{
1507 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1508 struct device *dev = &adapter->pdev->dev;
1509 int status;
1510
1511 /* Reset Transparent VLAN Tagging. */
1512 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001513 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001514 if (status)
1515 return status;
1516
1517 /* Allow VFs to program VLAN filtering */
1518 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1519 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1520 BE_PRIV_FILTMGMT, vf + 1);
1521 if (!status) {
1522 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1523 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1524 }
1525 }
1526
1527 dev_info(dev,
1528 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1529 return 0;
1530}
1531
Sathya Perla748b5392014-05-09 13:29:13 +05301532static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001533{
1534 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001535 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001536 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001537
Sathya Perla11ac75e2011-12-13 00:58:50 +00001538 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001539 return -EPERM;
1540
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001541 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001542 return -EINVAL;
1543
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001544 if (vlan || qos) {
1545 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001546 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001547 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001548 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001549 }
1550
Kalesh APabccf232014-07-17 16:20:24 +05301551 if (status) {
1552 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001553 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1554 status);
Kalesh APabccf232014-07-17 16:20:24 +05301555 return be_cmd_status(status);
1556 }
1557
1558 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301559 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001560}
1561
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001562static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1563 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001564{
1565 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301566 struct device *dev = &adapter->pdev->dev;
1567 int percent_rate, status = 0;
1568 u16 link_speed = 0;
1569 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001570
Sathya Perla11ac75e2011-12-13 00:58:50 +00001571 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001572 return -EPERM;
1573
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001574 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001575 return -EINVAL;
1576
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001577 if (min_tx_rate)
1578 return -EINVAL;
1579
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301580 if (!max_tx_rate)
1581 goto config_qos;
1582
1583 status = be_cmd_link_status_query(adapter, &link_speed,
1584 &link_status, 0);
1585 if (status)
1586 goto err;
1587
1588 if (!link_status) {
1589 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301590 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301591 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001592 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001593
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301594 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1595 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1596 link_speed);
1597 status = -EINVAL;
1598 goto err;
1599 }
1600
1601 /* On Skyhawk the QOS setting must be done only as a % value */
1602 percent_rate = link_speed / 100;
1603 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1604 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1605 percent_rate);
1606 status = -EINVAL;
1607 goto err;
1608 }
1609
1610config_qos:
1611 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001612 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301613 goto err;
1614
1615 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1616 return 0;
1617
1618err:
1619 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1620 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301621 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001622}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301623
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301624static int be_set_vf_link_state(struct net_device *netdev, int vf,
1625 int link_state)
1626{
1627 struct be_adapter *adapter = netdev_priv(netdev);
1628 int status;
1629
1630 if (!sriov_enabled(adapter))
1631 return -EPERM;
1632
1633 if (vf >= adapter->num_vfs)
1634 return -EINVAL;
1635
1636 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301637 if (status) {
1638 dev_err(&adapter->pdev->dev,
1639 "Link state change on VF %d failed: %#x\n", vf, status);
1640 return be_cmd_status(status);
1641 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301642
Kalesh APabccf232014-07-17 16:20:24 +05301643 adapter->vf_cfg[vf].plink_tracking = link_state;
1644
1645 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301646}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001647
Kalesh APe7bcbd72015-05-06 05:30:32 -04001648static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1649{
1650 struct be_adapter *adapter = netdev_priv(netdev);
1651 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1652 u8 spoofchk;
1653 int status;
1654
1655 if (!sriov_enabled(adapter))
1656 return -EPERM;
1657
1658 if (vf >= adapter->num_vfs)
1659 return -EINVAL;
1660
1661 if (BEx_chip(adapter))
1662 return -EOPNOTSUPP;
1663
1664 if (enable == vf_cfg->spoofchk)
1665 return 0;
1666
1667 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1668
1669 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1670 0, spoofchk);
1671 if (status) {
1672 dev_err(&adapter->pdev->dev,
1673 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1674 return be_cmd_status(status);
1675 }
1676
1677 vf_cfg->spoofchk = enable;
1678 return 0;
1679}
1680
Sathya Perla2632baf2013-10-01 16:00:00 +05301681static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1682 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683{
Sathya Perla2632baf2013-10-01 16:00:00 +05301684 aic->rx_pkts_prev = rx_pkts;
1685 aic->tx_reqs_prev = tx_pkts;
1686 aic->jiffies = now;
1687}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001688
Sathya Perla2632baf2013-10-01 16:00:00 +05301689static void be_eqd_update(struct be_adapter *adapter)
1690{
1691 struct be_set_eqd set_eqd[MAX_EVT_QS];
1692 int eqd, i, num = 0, start;
1693 struct be_aic_obj *aic;
1694 struct be_eq_obj *eqo;
1695 struct be_rx_obj *rxo;
1696 struct be_tx_obj *txo;
1697 u64 rx_pkts, tx_pkts;
1698 ulong now;
1699 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001700
Sathya Perla2632baf2013-10-01 16:00:00 +05301701 for_all_evt_queues(adapter, eqo, i) {
1702 aic = &adapter->aic_obj[eqo->idx];
1703 if (!aic->enable) {
1704 if (aic->jiffies)
1705 aic->jiffies = 0;
1706 eqd = aic->et_eqd;
1707 goto modify_eqd;
1708 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709
Sathya Perla2632baf2013-10-01 16:00:00 +05301710 rxo = &adapter->rx_obj[eqo->idx];
1711 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001712 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301713 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001714 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001715
Sathya Perla2632baf2013-10-01 16:00:00 +05301716 txo = &adapter->tx_obj[eqo->idx];
1717 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001718 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301719 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001720 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001721
Sathya Perla2632baf2013-10-01 16:00:00 +05301722 /* Skip, if wrapped around or first calculation */
1723 now = jiffies;
1724 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1725 rx_pkts < aic->rx_pkts_prev ||
1726 tx_pkts < aic->tx_reqs_prev) {
1727 be_aic_update(aic, rx_pkts, tx_pkts, now);
1728 continue;
1729 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001730
Sathya Perla2632baf2013-10-01 16:00:00 +05301731 delta = jiffies_to_msecs(now - aic->jiffies);
1732 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1733 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1734 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001735
Sathya Perla2632baf2013-10-01 16:00:00 +05301736 if (eqd < 8)
1737 eqd = 0;
1738 eqd = min_t(u32, eqd, aic->max_eqd);
1739 eqd = max_t(u32, eqd, aic->min_eqd);
1740
1741 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001742modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301743 if (eqd != aic->prev_eqd) {
1744 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1745 set_eqd[num].eq_id = eqo->q.id;
1746 aic->prev_eqd = eqd;
1747 num++;
1748 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001749 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301750
1751 if (num)
1752 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001753}
1754
Sathya Perla3abcded2010-10-03 22:12:27 -07001755static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301756 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001757{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001758 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001759
Sathya Perlaab1594e2011-07-25 19:10:15 +00001760 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001761 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001762 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001763 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001764 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001765 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001766 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001767 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001768 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769}
1770
Sathya Perla2e588f82011-03-11 02:49:26 +00001771static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001772{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001773 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301774 * Also ignore ipcksm for ipv6 pkts
1775 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001776 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301777 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001778}
1779
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301780static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001782 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001784 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301785 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786
Sathya Perla3abcded2010-10-03 22:12:27 -07001787 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001788 BUG_ON(!rx_page_info->page);
1789
Sathya Perlae50287b2014-03-04 12:14:38 +05301790 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001791 dma_unmap_page(&adapter->pdev->dev,
1792 dma_unmap_addr(rx_page_info, bus),
1793 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301794 rx_page_info->last_frag = false;
1795 } else {
1796 dma_sync_single_for_cpu(&adapter->pdev->dev,
1797 dma_unmap_addr(rx_page_info, bus),
1798 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301801 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802 atomic_dec(&rxq->used);
1803 return rx_page_info;
1804}
1805
1806/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001807static void be_rx_compl_discard(struct be_rx_obj *rxo,
1808 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001811 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001813 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301814 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001815 put_page(page_info->page);
1816 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817 }
1818}
1819
1820/*
1821 * skb_fill_rx_data forms a complete skb for an ether frame
1822 * indicated by rxcp.
1823 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001824static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1825 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001828 u16 i, j;
1829 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 u8 *start;
1831
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301832 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 start = page_address(page_info->page) + page_info->page_offset;
1834 prefetch(start);
1835
1836 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001837 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 skb->len = curr_frag_len;
1840 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001841 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842 /* Complete packet has now been moved to data */
1843 put_page(page_info->page);
1844 skb->data_len = 0;
1845 skb->tail += curr_frag_len;
1846 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001847 hdr_len = ETH_HLEN;
1848 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001850 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 skb_shinfo(skb)->frags[0].page_offset =
1852 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301853 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1854 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001856 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857 skb->tail += hdr_len;
1858 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001859 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860
Sathya Perla2e588f82011-03-11 02:49:26 +00001861 if (rxcp->pkt_size <= rx_frag_size) {
1862 BUG_ON(rxcp->num_rcvd != 1);
1863 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 }
1865
1866 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001867 remaining = rxcp->pkt_size - curr_frag_len;
1868 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301869 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001870 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001872 /* Coalesce all frags from the same physical page in one slot */
1873 if (page_info->page_offset == 0) {
1874 /* Fresh page */
1875 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001876 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001877 skb_shinfo(skb)->frags[j].page_offset =
1878 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001879 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001880 skb_shinfo(skb)->nr_frags++;
1881 } else {
1882 put_page(page_info->page);
1883 }
1884
Eric Dumazet9e903e02011-10-18 21:00:24 +00001885 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886 skb->len += curr_frag_len;
1887 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001888 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001889 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001890 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001892 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893}
1894
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001895/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301896static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001897 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001899 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001900 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001902
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001903 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001904 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001905 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001906 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 return;
1908 }
1909
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001910 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001912 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001913 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001914 else
1915 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001917 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001918 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001920 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301921
Tom Herbertb6c0e892014-08-27 21:27:17 -07001922 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301923 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924
Jiri Pirko343e43c2011-08-25 02:50:51 +00001925 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001926 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001927
1928 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929}
1930
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001931/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001932static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1933 struct napi_struct *napi,
1934 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001936 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001938 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001939 u16 remaining, curr_frag_len;
1940 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001941
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001943 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001945 return;
1946 }
1947
Sathya Perla2e588f82011-03-11 02:49:26 +00001948 remaining = rxcp->pkt_size;
1949 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301950 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
1952 curr_frag_len = min(remaining, rx_frag_size);
1953
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001954 /* Coalesce all frags from the same physical page in one slot */
1955 if (i == 0 || page_info->page_offset == 0) {
1956 /* First frag or Fresh page */
1957 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001958 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001959 skb_shinfo(skb)->frags[j].page_offset =
1960 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001961 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001962 } else {
1963 put_page(page_info->page);
1964 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001965 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001966 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968 memset(page_info, 0, sizeof(*page_info));
1969 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001970 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001972 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001973 skb->len = rxcp->pkt_size;
1974 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001975 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001976 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001977 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001978 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301979
Tom Herbertb6c0e892014-08-27 21:27:17 -07001980 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301981 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001982
Jiri Pirko343e43c2011-08-25 02:50:51 +00001983 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001984 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001985
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001986 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987}
1988
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001989static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1990 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301992 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1993 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1994 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1995 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1996 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1997 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1998 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1999 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2000 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2001 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2002 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002003 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302004 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2005 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002006 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302007 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302008 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302009 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002010}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2013 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002014{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302015 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2016 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2017 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2018 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2019 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2020 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2021 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2022 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2023 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2024 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2025 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002026 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302027 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2028 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002029 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302030 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2031 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002032}
2033
2034static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2035{
2036 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2037 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2038 struct be_adapter *adapter = rxo->adapter;
2039
2040 /* For checking the valid bit it is Ok to use either definition as the
2041 * valid bit is at the same position in both v0 and v1 Rx compl */
2042 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043 return NULL;
2044
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002045 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002046 be_dws_le_to_cpu(compl, sizeof(*compl));
2047
2048 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002050 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002051 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002052
Somnath Koture38b1702013-05-29 22:55:56 +00002053 if (rxcp->ip_frag)
2054 rxcp->l4_csum = 0;
2055
Sathya Perla15d72182011-03-21 20:49:26 +00002056 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302057 /* In QNQ modes, if qnq bit is not set, then the packet was
2058 * tagged only with the transparent outer vlan-tag and must
2059 * not be treated as a vlan packet by host
2060 */
2061 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002062 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002063
Sathya Perla15d72182011-03-21 20:49:26 +00002064 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002065 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002066
Somnath Kotur939cf302011-08-18 21:51:49 -07002067 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302068 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002069 rxcp->vlanf = 0;
2070 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002071
2072 /* As the compl has been parsed, reset it; we wont touch it again */
2073 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074
Sathya Perla3abcded2010-10-03 22:12:27 -07002075 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076 return rxcp;
2077}
2078
Eric Dumazet1829b082011-03-01 05:48:12 +00002079static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002082
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002084 gfp |= __GFP_COMP;
2085 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086}
2087
2088/*
2089 * Allocate a page, split it to fragments of size rx_frag_size and post as
2090 * receive buffers to BE
2091 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302092static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093{
Sathya Perla3abcded2010-10-03 22:12:27 -07002094 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002095 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002096 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002097 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002098 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099 struct be_eth_rx_d *rxd;
2100 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302101 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102
Sathya Perla3abcded2010-10-03 22:12:27 -07002103 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302104 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002105 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002106 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002108 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109 break;
2110 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002111 page_dmaaddr = dma_map_page(dev, pagep, 0,
2112 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002113 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002114 if (dma_mapping_error(dev, page_dmaaddr)) {
2115 put_page(pagep);
2116 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302117 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002118 break;
2119 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302120 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 } else {
2122 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302123 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302125 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127
2128 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302129 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002130 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2131 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132
2133 /* Any space left in the current big page for another frag? */
2134 if ((page_offset + rx_frag_size + rx_frag_size) >
2135 adapter->big_page_size) {
2136 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302137 page_info->last_frag = true;
2138 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2139 } else {
2140 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002142
2143 prev_page_info = page_info;
2144 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302147
2148 /* Mark the last frag of a page when we break out of the above loop
2149 * with no more slots available in the RXQ
2150 */
2151 if (pagep) {
2152 prev_page_info->last_frag = true;
2153 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2154 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155
2156 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302158 if (rxo->rx_post_starved)
2159 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302160 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002161 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302162 be_rxq_notify(adapter, rxq->id, notify);
2163 posted -= notify;
2164 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002165 } else if (atomic_read(&rxq->used) == 0) {
2166 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002167 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169}
2170
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302171static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302173 struct be_queue_info *tx_cq = &txo->cq;
2174 struct be_tx_compl_info *txcp = &txo->txcp;
2175 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302177 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178 return NULL;
2179
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302180 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002181 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302182 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302184 txcp->status = GET_TX_COMPL_BITS(status, compl);
2185 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302187 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188 queue_tail_inc(tx_cq);
2189 return txcp;
2190}
2191
Sathya Perla3c8def92011-06-12 20:01:58 +00002192static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302193 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194{
Sathya Perla3c8def92011-06-12 20:01:58 +00002195 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002196 struct be_queue_info *txq = &txo->q;
2197 u16 frag_index, num_wrbs = 0;
2198 struct sk_buff *skb = NULL;
2199 bool unmap_skb_hdr = false;
2200 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002202 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002203 if (sent_skbs[txq->tail]) {
2204 /* Free skb from prev req */
2205 if (skb)
2206 dev_consume_skb_any(skb);
2207 skb = sent_skbs[txq->tail];
2208 sent_skbs[txq->tail] = NULL;
2209 queue_tail_inc(txq); /* skip hdr wrb */
2210 num_wrbs++;
2211 unmap_skb_hdr = true;
2212 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002213 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002214 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002215 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002216 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002217 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002219 num_wrbs++;
2220 } while (frag_index != last_index);
2221 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002223 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224}
2225
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002226/* Return the number of events in the event queue */
2227static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002228{
2229 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002231
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232 do {
2233 eqe = queue_tail_node(&eqo->q);
2234 if (eqe->evt == 0)
2235 break;
2236
2237 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002238 eqe->evt = 0;
2239 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 queue_tail_inc(&eqo->q);
2241 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002242
2243 return num;
2244}
2245
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002246/* Leaves the EQ is disarmed state */
2247static void be_eq_clean(struct be_eq_obj *eqo)
2248{
2249 int num = events_get(eqo);
2250
2251 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2252}
2253
2254static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255{
2256 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002257 struct be_queue_info *rxq = &rxo->q;
2258 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002259 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002260 struct be_adapter *adapter = rxo->adapter;
2261 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262
Sathya Perlad23e9462012-12-17 19:38:51 +00002263 /* Consume pending rx completions.
2264 * Wait for the flush completion (identified by zero num_rcvd)
2265 * to arrive. Notify CQ even when there are no more CQ entries
2266 * for HW to flush partially coalesced CQ entries.
2267 * In Lancer, there is no need to wait for flush compl.
2268 */
2269 for (;;) {
2270 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302271 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002272 if (lancer_chip(adapter))
2273 break;
2274
2275 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2276 dev_warn(&adapter->pdev->dev,
2277 "did not receive flush compl\n");
2278 break;
2279 }
2280 be_cq_notify(adapter, rx_cq->id, true, 0);
2281 mdelay(1);
2282 } else {
2283 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002284 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002285 if (rxcp->num_rcvd == 0)
2286 break;
2287 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288 }
2289
Sathya Perlad23e9462012-12-17 19:38:51 +00002290 /* After cleanup, leave the CQ in unarmed state */
2291 be_cq_notify(adapter, rx_cq->id, false, 0);
2292
2293 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302294 while (atomic_read(&rxq->used) > 0) {
2295 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296 put_page(page_info->page);
2297 memset(page_info, 0, sizeof(*page_info));
2298 }
2299 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302300 rxq->tail = 0;
2301 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302}
2303
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002304static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002306 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2307 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302308 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002309 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302310 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002311 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302313 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002314 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002315 pending_txqs = adapter->num_tx_qs;
2316
2317 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302318 cmpl = 0;
2319 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002320 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302321 while ((txcp = be_tx_compl_get(txo))) {
2322 num_wrbs +=
2323 be_tx_compl_process(adapter, txo,
2324 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002325 cmpl++;
2326 }
2327 if (cmpl) {
2328 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2329 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302330 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002331 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302332 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002333 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002334 }
2335
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302336 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002337 break;
2338
2339 mdelay(1);
2340 } while (true);
2341
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002342 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002343 for_all_tx_queues(adapter, txo, i) {
2344 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002345
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002346 if (atomic_read(&txq->used)) {
2347 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2348 i, atomic_read(&txq->used));
2349 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002350 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002351 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2352 txq->len);
2353 /* Use the tx-compl process logic to handle requests
2354 * that were not sent to the HW.
2355 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002356 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2357 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002358 BUG_ON(atomic_read(&txq->used));
2359 txo->pend_wrb_cnt = 0;
2360 /* Since hw was never notified of these requests,
2361 * reset TXQ indices
2362 */
2363 txq->head = notified_idx;
2364 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002365 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002366 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367}
2368
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369static void be_evt_queues_destroy(struct be_adapter *adapter)
2370{
2371 struct be_eq_obj *eqo;
2372 int i;
2373
2374 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002375 if (eqo->q.created) {
2376 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002377 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302378 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302379 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002380 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002381 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002382 be_queue_free(adapter, &eqo->q);
2383 }
2384}
2385
2386static int be_evt_queues_create(struct be_adapter *adapter)
2387{
2388 struct be_queue_info *eq;
2389 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302390 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002391 int i, rc;
2392
Sathya Perla92bf14a2013-08-27 16:57:32 +05302393 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2394 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002395
2396 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002397 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2398 return -ENOMEM;
2399 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2400 eqo->affinity_mask);
2401
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302402 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2403 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302404 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302405 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302408 aic->max_eqd = BE_MAX_EQD;
2409 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002410
2411 eq = &eqo->q;
2412 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302413 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414 if (rc)
2415 return rc;
2416
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302417 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418 if (rc)
2419 return rc;
2420 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002421 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422}
2423
Sathya Perla5fb379e2009-06-18 00:02:59 +00002424static void be_mcc_queues_destroy(struct be_adapter *adapter)
2425{
2426 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002427
Sathya Perla8788fdc2009-07-27 22:52:03 +00002428 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002429 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002430 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002431 be_queue_free(adapter, q);
2432
Sathya Perla8788fdc2009-07-27 22:52:03 +00002433 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002434 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002435 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002436 be_queue_free(adapter, q);
2437}
2438
2439/* Must be called only after TX qs are created as MCC shares TX EQ */
2440static int be_mcc_queues_create(struct be_adapter *adapter)
2441{
2442 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002443
Sathya Perla8788fdc2009-07-27 22:52:03 +00002444 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002445 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302446 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002447 goto err;
2448
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002449 /* Use the default EQ for MCC completions */
2450 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002451 goto mcc_cq_free;
2452
Sathya Perla8788fdc2009-07-27 22:52:03 +00002453 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002454 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2455 goto mcc_cq_destroy;
2456
Sathya Perla8788fdc2009-07-27 22:52:03 +00002457 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002458 goto mcc_q_free;
2459
2460 return 0;
2461
2462mcc_q_free:
2463 be_queue_free(adapter, q);
2464mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002465 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002466mcc_cq_free:
2467 be_queue_free(adapter, cq);
2468err:
2469 return -1;
2470}
2471
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472static void be_tx_queues_destroy(struct be_adapter *adapter)
2473{
2474 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002475 struct be_tx_obj *txo;
2476 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002477
Sathya Perla3c8def92011-06-12 20:01:58 +00002478 for_all_tx_queues(adapter, txo, i) {
2479 q = &txo->q;
2480 if (q->created)
2481 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2482 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002483
Sathya Perla3c8def92011-06-12 20:01:58 +00002484 q = &txo->cq;
2485 if (q->created)
2486 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2487 be_queue_free(adapter, q);
2488 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002489}
2490
Sathya Perla77071332013-08-27 16:57:34 +05302491static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492{
Sathya Perla73f394e2015-03-26 03:05:09 -04002493 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002494 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002495 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302496 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497
Sathya Perla92bf14a2013-08-27 16:57:32 +05302498 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002499
Sathya Perla3c8def92011-06-12 20:01:58 +00002500 for_all_tx_queues(adapter, txo, i) {
2501 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002502 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2503 sizeof(struct be_eth_tx_compl));
2504 if (status)
2505 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002506
John Stultz827da442013-10-07 15:51:58 -07002507 u64_stats_init(&txo->stats.sync);
2508 u64_stats_init(&txo->stats.sync_compl);
2509
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 /* If num_evt_qs is less than num_tx_qs, then more than
2511 * one txq share an eq
2512 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002513 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2514 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002515 if (status)
2516 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002517
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2519 sizeof(struct be_eth_wrb));
2520 if (status)
2521 return status;
2522
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002523 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002524 if (status)
2525 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002526
2527 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2528 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002529 }
2530
Sathya Perlad3791422012-09-28 04:39:44 +00002531 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2532 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002533 return 0;
2534}
2535
2536static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002537{
2538 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002539 struct be_rx_obj *rxo;
2540 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541
Sathya Perla3abcded2010-10-03 22:12:27 -07002542 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002543 q = &rxo->cq;
2544 if (q->created)
2545 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2546 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548}
2549
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002550static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002551{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002552 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002553 struct be_rx_obj *rxo;
2554 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002555
Sathya Perla92bf14a2013-08-27 16:57:32 +05302556 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002557 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302558
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002559 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2560 if (adapter->num_rss_qs <= 1)
2561 adapter->num_rss_qs = 0;
2562
2563 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2564
2565 /* When the interface is not capable of RSS rings (and there is no
2566 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002567 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002568 if (adapter->num_rx_qs == 0)
2569 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302570
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002572 for_all_rx_queues(adapter, rxo, i) {
2573 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002574 cq = &rxo->cq;
2575 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302576 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002577 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002578 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579
John Stultz827da442013-10-07 15:51:58 -07002580 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2582 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002583 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002584 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002585 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002586
Sathya Perlad3791422012-09-28 04:39:44 +00002587 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002588 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002589 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002590}
2591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592static irqreturn_t be_intx(int irq, void *dev)
2593{
Sathya Perlae49cc342012-11-27 19:50:02 +00002594 struct be_eq_obj *eqo = dev;
2595 struct be_adapter *adapter = eqo->adapter;
2596 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002598 /* IRQ is not expected when NAPI is scheduled as the EQ
2599 * will not be armed.
2600 * But, this can happen on Lancer INTx where it takes
2601 * a while to de-assert INTx or in BE2 where occasionaly
2602 * an interrupt may be raised even when EQ is unarmed.
2603 * If NAPI is already scheduled, then counting & notifying
2604 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002605 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002606 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002607 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002608 __napi_schedule(&eqo->napi);
2609 if (num_evts)
2610 eqo->spurious_intr = 0;
2611 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002612 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002613
2614 /* Return IRQ_HANDLED only for the the first spurious intr
2615 * after a valid intr to stop the kernel from branding
2616 * this irq as a bad one!
2617 */
2618 if (num_evts || eqo->spurious_intr++ == 0)
2619 return IRQ_HANDLED;
2620 else
2621 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002622}
2623
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002625{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002627
Sathya Perla0b545a62012-11-23 00:27:18 +00002628 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2629 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630 return IRQ_HANDLED;
2631}
2632
Sathya Perla2e588f82011-03-11 02:49:26 +00002633static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634{
Somnath Koture38b1702013-05-29 22:55:56 +00002635 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636}
2637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302639 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002640{
Sathya Perla3abcded2010-10-03 22:12:27 -07002641 struct be_adapter *adapter = rxo->adapter;
2642 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002643 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302645 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002646
2647 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002648 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002649 if (!rxcp)
2650 break;
2651
Sathya Perla12004ae2011-08-02 19:57:46 +00002652 /* Is it a flush compl that has no data */
2653 if (unlikely(rxcp->num_rcvd == 0))
2654 goto loop_continue;
2655
2656 /* Discard compl with partial DMA Lancer B0 */
2657 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002659 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002660 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002661
Sathya Perla12004ae2011-08-02 19:57:46 +00002662 /* On BE drop pkts that arrive due to imperfect filtering in
2663 * promiscuous mode on some skews
2664 */
2665 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302666 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002667 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002668 goto loop_continue;
2669 }
2670
Sathya Perla6384a4d2013-10-25 10:40:16 +05302671 /* Don't do gro when we're busy_polling */
2672 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002673 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002674 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302675 be_rx_compl_process(rxo, napi, rxcp);
2676
Sathya Perla12004ae2011-08-02 19:57:46 +00002677loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302678 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002679 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002680 }
2681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002682 if (work_done) {
2683 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002684
Sathya Perla6384a4d2013-10-25 10:40:16 +05302685 /* When an rx-obj gets into post_starved state, just
2686 * let be_worker do the posting.
2687 */
2688 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2689 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302690 be_post_rx_frags(rxo, GFP_ATOMIC,
2691 max_t(u32, MAX_RX_POST,
2692 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002693 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002694
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695 return work_done;
2696}
2697
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302698static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302699{
2700 switch (status) {
2701 case BE_TX_COMP_HDR_PARSE_ERR:
2702 tx_stats(txo)->tx_hdr_parse_err++;
2703 break;
2704 case BE_TX_COMP_NDMA_ERR:
2705 tx_stats(txo)->tx_dma_err++;
2706 break;
2707 case BE_TX_COMP_ACL_ERR:
2708 tx_stats(txo)->tx_spoof_check_err++;
2709 break;
2710 }
2711}
2712
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302713static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302714{
2715 switch (status) {
2716 case LANCER_TX_COMP_LSO_ERR:
2717 tx_stats(txo)->tx_tso_err++;
2718 break;
2719 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2720 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2721 tx_stats(txo)->tx_spoof_check_err++;
2722 break;
2723 case LANCER_TX_COMP_QINQ_ERR:
2724 tx_stats(txo)->tx_qinq_err++;
2725 break;
2726 case LANCER_TX_COMP_PARITY_ERR:
2727 tx_stats(txo)->tx_internal_parity_err++;
2728 break;
2729 case LANCER_TX_COMP_DMA_ERR:
2730 tx_stats(txo)->tx_dma_err++;
2731 break;
2732 }
2733}
2734
Sathya Perlac8f64612014-09-02 09:56:55 +05302735static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2736 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002737{
Sathya Perlac8f64612014-09-02 09:56:55 +05302738 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302739 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302741 while ((txcp = be_tx_compl_get(txo))) {
2742 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302743 work_done++;
2744
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302745 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302746 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302747 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302748 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302749 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302750 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 }
2752
2753 if (work_done) {
2754 be_cq_notify(adapter, txo->cq.id, true, work_done);
2755 atomic_sub(num_wrbs, &txo->q.used);
2756
2757 /* As Tx wrbs have been freed up, wake up netdev queue
2758 * if it was stopped due to lack of tx wrbs. */
2759 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302760 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002762 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002763
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002764 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2765 tx_stats(txo)->tx_compl += work_done;
2766 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2767 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002768}
Sathya Perla3c8def92011-06-12 20:01:58 +00002769
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002770#ifdef CONFIG_NET_RX_BUSY_POLL
2771static inline bool be_lock_napi(struct be_eq_obj *eqo)
2772{
2773 bool status = true;
2774
2775 spin_lock(&eqo->lock); /* BH is already disabled */
2776 if (eqo->state & BE_EQ_LOCKED) {
2777 WARN_ON(eqo->state & BE_EQ_NAPI);
2778 eqo->state |= BE_EQ_NAPI_YIELD;
2779 status = false;
2780 } else {
2781 eqo->state = BE_EQ_NAPI;
2782 }
2783 spin_unlock(&eqo->lock);
2784 return status;
2785}
2786
2787static inline void be_unlock_napi(struct be_eq_obj *eqo)
2788{
2789 spin_lock(&eqo->lock); /* BH is already disabled */
2790
2791 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2792 eqo->state = BE_EQ_IDLE;
2793
2794 spin_unlock(&eqo->lock);
2795}
2796
2797static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2798{
2799 bool status = true;
2800
2801 spin_lock_bh(&eqo->lock);
2802 if (eqo->state & BE_EQ_LOCKED) {
2803 eqo->state |= BE_EQ_POLL_YIELD;
2804 status = false;
2805 } else {
2806 eqo->state |= BE_EQ_POLL;
2807 }
2808 spin_unlock_bh(&eqo->lock);
2809 return status;
2810}
2811
2812static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2813{
2814 spin_lock_bh(&eqo->lock);
2815
2816 WARN_ON(eqo->state & (BE_EQ_NAPI));
2817 eqo->state = BE_EQ_IDLE;
2818
2819 spin_unlock_bh(&eqo->lock);
2820}
2821
2822static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2823{
2824 spin_lock_init(&eqo->lock);
2825 eqo->state = BE_EQ_IDLE;
2826}
2827
2828static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2829{
2830 local_bh_disable();
2831
2832 /* It's enough to just acquire napi lock on the eqo to stop
2833 * be_busy_poll() from processing any queueus.
2834 */
2835 while (!be_lock_napi(eqo))
2836 mdelay(1);
2837
2838 local_bh_enable();
2839}
2840
2841#else /* CONFIG_NET_RX_BUSY_POLL */
2842
2843static inline bool be_lock_napi(struct be_eq_obj *eqo)
2844{
2845 return true;
2846}
2847
2848static inline void be_unlock_napi(struct be_eq_obj *eqo)
2849{
2850}
2851
2852static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2853{
2854 return false;
2855}
2856
2857static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2858{
2859}
2860
2861static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2862{
2863}
2864
2865static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2866{
2867}
2868#endif /* CONFIG_NET_RX_BUSY_POLL */
2869
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302870int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002871{
2872 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2873 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002874 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302875 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302876 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002877
Sathya Perla0b545a62012-11-23 00:27:18 +00002878 num_evts = events_get(eqo);
2879
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302880 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2881 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002882
Sathya Perla6384a4d2013-10-25 10:40:16 +05302883 if (be_lock_napi(eqo)) {
2884 /* This loop will iterate twice for EQ0 in which
2885 * completions of the last RXQ (default one) are also processed
2886 * For other EQs the loop iterates only once
2887 */
2888 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2889 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2890 max_work = max(work, max_work);
2891 }
2892 be_unlock_napi(eqo);
2893 } else {
2894 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002895 }
2896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002897 if (is_mcc_eqo(eqo))
2898 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002899
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002900 if (max_work < budget) {
2901 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002902 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002903 } else {
2904 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002905 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002906 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002907 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002908}
2909
Sathya Perla6384a4d2013-10-25 10:40:16 +05302910#ifdef CONFIG_NET_RX_BUSY_POLL
2911static int be_busy_poll(struct napi_struct *napi)
2912{
2913 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2914 struct be_adapter *adapter = eqo->adapter;
2915 struct be_rx_obj *rxo;
2916 int i, work = 0;
2917
2918 if (!be_lock_busy_poll(eqo))
2919 return LL_FLUSH_BUSY;
2920
2921 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2922 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2923 if (work)
2924 break;
2925 }
2926
2927 be_unlock_busy_poll(eqo);
2928 return work;
2929}
2930#endif
2931
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002932void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002933{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002934 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2935 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002936 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302937 bool error_detected = false;
2938 struct device *dev = &adapter->pdev->dev;
2939 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002940
Sathya Perlad23e9462012-12-17 19:38:51 +00002941 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002942 return;
2943
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002944 if (lancer_chip(adapter)) {
2945 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2946 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2947 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302948 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002949 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302950 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302951 adapter->hw_error = true;
Kalesh APd0e1b312015-02-23 04:20:12 -05002952 error_detected = true;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302953 /* Do not log error messages if its a FW reset */
2954 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2955 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2956 dev_info(dev, "Firmware update in progress\n");
2957 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302958 dev_err(dev, "Error detected in the card\n");
2959 dev_err(dev, "ERR: sliport status 0x%x\n",
2960 sliport_status);
2961 dev_err(dev, "ERR: sliport error1 0x%x\n",
2962 sliport_err1);
2963 dev_err(dev, "ERR: sliport error2 0x%x\n",
2964 sliport_err2);
2965 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002966 }
2967 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04002968 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2969 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2970 ue_lo_mask = ioread32(adapter->pcicfg +
2971 PCICFG_UE_STATUS_LOW_MASK);
2972 ue_hi_mask = ioread32(adapter->pcicfg +
2973 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002974
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002975 ue_lo = (ue_lo & ~ue_lo_mask);
2976 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002977
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302978 /* On certain platforms BE hardware can indicate spurious UEs.
2979 * Allow HW to stop working completely in case of a real UE.
2980 * Hence not setting the hw_error for UE detection.
2981 */
2982
2983 if (ue_lo || ue_hi) {
2984 error_detected = true;
2985 dev_err(dev,
2986 "Unrecoverable Error detected in the adapter");
2987 dev_err(dev, "Please reboot server to recover");
2988 if (skyhawk_chip(adapter))
2989 adapter->hw_error = true;
2990 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2991 if (ue_lo & 1)
2992 dev_err(dev, "UE: %s bit set\n",
2993 ue_status_low_desc[i]);
2994 }
2995 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2996 if (ue_hi & 1)
2997 dev_err(dev, "UE: %s bit set\n",
2998 ue_status_hi_desc[i]);
2999 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303000 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003001 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303002 if (error_detected)
3003 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003004}
3005
Sathya Perla8d56ff12009-11-22 22:02:26 +00003006static void be_msix_disable(struct be_adapter *adapter)
3007{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003008 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003009 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003010 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303011 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003012 }
3013}
3014
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003015static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003016{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003017 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003018 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019
Sathya Perla92bf14a2013-08-27 16:57:32 +05303020 /* If RoCE is supported, program the max number of NIC vectors that
3021 * may be configured via set-channels, along with vectors needed for
3022 * RoCe. Else, just program the number we'll use initially.
3023 */
3024 if (be_roce_supported(adapter))
3025 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3026 2 * num_online_cpus());
3027 else
3028 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003029
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003030 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003031 adapter->msix_entries[i].entry = i;
3032
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003033 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3034 MIN_MSIX_VECTORS, num_vec);
3035 if (num_vec < 0)
3036 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003037
Sathya Perla92bf14a2013-08-27 16:57:32 +05303038 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3039 adapter->num_msix_roce_vec = num_vec / 2;
3040 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3041 adapter->num_msix_roce_vec);
3042 }
3043
3044 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3045
3046 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3047 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003048 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003049
3050fail:
3051 dev_warn(dev, "MSIx enable failed\n");
3052
3053 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3054 if (!be_physfn(adapter))
3055 return num_vec;
3056 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003057}
3058
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003059static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303060 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003061{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303062 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063}
3064
3065static int be_msix_register(struct be_adapter *adapter)
3066{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003067 struct net_device *netdev = adapter->netdev;
3068 struct be_eq_obj *eqo;
3069 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003071 for_all_evt_queues(adapter, eqo, i) {
3072 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3073 vec = be_msix_vec_get(adapter, eqo);
3074 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003075 if (status)
3076 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003077
3078 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003079 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003080
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003082err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003083 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3084 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3085 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303086 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003087 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003088 return status;
3089}
3090
3091static int be_irq_register(struct be_adapter *adapter)
3092{
3093 struct net_device *netdev = adapter->netdev;
3094 int status;
3095
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003096 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003097 status = be_msix_register(adapter);
3098 if (status == 0)
3099 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003100 /* INTx is not supported for VF */
3101 if (!be_physfn(adapter))
3102 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003103 }
3104
Sathya Perlae49cc342012-11-27 19:50:02 +00003105 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003106 netdev->irq = adapter->pdev->irq;
3107 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003108 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003109 if (status) {
3110 dev_err(&adapter->pdev->dev,
3111 "INTx request IRQ failed - err %d\n", status);
3112 return status;
3113 }
3114done:
3115 adapter->isr_registered = true;
3116 return 0;
3117}
3118
3119static void be_irq_unregister(struct be_adapter *adapter)
3120{
3121 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003122 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003123 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124
3125 if (!adapter->isr_registered)
3126 return;
3127
3128 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003129 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003130 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131 goto done;
3132 }
3133
3134 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003135 for_all_evt_queues(adapter, eqo, i) {
3136 vec = be_msix_vec_get(adapter, eqo);
3137 irq_set_affinity_hint(vec, NULL);
3138 free_irq(vec, eqo);
3139 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141done:
3142 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143}
3144
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003145static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003146{
3147 struct be_queue_info *q;
3148 struct be_rx_obj *rxo;
3149 int i;
3150
3151 for_all_rx_queues(adapter, rxo, i) {
3152 q = &rxo->q;
3153 if (q->created) {
3154 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003155 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003156 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003157 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003158 }
3159}
3160
Sathya Perla889cd4b2010-05-30 23:33:45 +00003161static int be_close(struct net_device *netdev)
3162{
3163 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003164 struct be_eq_obj *eqo;
3165 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003166
Kalesh APe1ad8e32014-04-14 16:12:41 +05303167 /* This protection is needed as be_close() may be called even when the
3168 * adapter is in cleared state (after eeh perm failure)
3169 */
3170 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3171 return 0;
3172
Parav Pandit045508a2012-03-26 14:27:13 +00003173 be_roce_dev_close(adapter);
3174
Ivan Veceradff345c52013-11-27 08:59:32 +01003175 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3176 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003177 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303178 be_disable_busy_poll(eqo);
3179 }
David S. Miller71237b62013-11-28 18:53:36 -05003180 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003181 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003182
3183 be_async_mcc_disable(adapter);
3184
3185 /* Wait for all pending tx completions to arrive so that
3186 * all tx skbs are freed.
3187 */
Sathya Perlafba87552013-05-08 02:05:50 +00003188 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303189 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003190
3191 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003192 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003193
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003194 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003195 if (msix_enabled(adapter))
3196 synchronize_irq(be_msix_vec_get(adapter, eqo));
3197 else
3198 synchronize_irq(netdev->irq);
3199 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003200 }
3201
Sathya Perla889cd4b2010-05-30 23:33:45 +00003202 be_irq_unregister(adapter);
3203
Sathya Perla482c9e72011-06-29 23:33:17 +00003204 return 0;
3205}
3206
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003207static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003208{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003209 struct rss_info *rss = &adapter->rss_info;
3210 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003211 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003212 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003213
3214 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003215 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3216 sizeof(struct be_eth_rx_d));
3217 if (rc)
3218 return rc;
3219 }
3220
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003221 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3222 rxo = default_rxo(adapter);
3223 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3224 rx_frag_size, adapter->if_handle,
3225 false, &rxo->rss_id);
3226 if (rc)
3227 return rc;
3228 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003229
3230 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003231 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003232 rx_frag_size, adapter->if_handle,
3233 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003234 if (rc)
3235 return rc;
3236 }
3237
3238 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003239 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003240 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303241 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003242 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303243 rss->rsstable[j + i] = rxo->rss_id;
3244 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003245 }
3246 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303247 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3248 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003249
3250 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303251 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3252 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303253 } else {
3254 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303255 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303256 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003257
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003258 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303259 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003260 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303261 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303262 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303263 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003264 }
3265
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003266 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303267
Sathya Perla482c9e72011-06-29 23:33:17 +00003268 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003269 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303270 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003271 return 0;
3272}
3273
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274static int be_open(struct net_device *netdev)
3275{
3276 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003277 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003278 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003279 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003280 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003281 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003283 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003284 if (status)
3285 goto err;
3286
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003287 status = be_irq_register(adapter);
3288 if (status)
3289 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003290
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003291 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003292 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003293
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003294 for_all_tx_queues(adapter, txo, i)
3295 be_cq_notify(adapter, txo->cq.id, true, 0);
3296
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003297 be_async_mcc_enable(adapter);
3298
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003299 for_all_evt_queues(adapter, eqo, i) {
3300 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303301 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303302 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003303 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003304 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003305
Sathya Perla323ff712012-09-28 04:39:43 +00003306 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003307 if (!status)
3308 be_link_status_update(adapter, link_status);
3309
Sathya Perlafba87552013-05-08 02:05:50 +00003310 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003311 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303312
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303313#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303314 if (skyhawk_chip(adapter))
3315 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303316#endif
3317
Sathya Perla889cd4b2010-05-30 23:33:45 +00003318 return 0;
3319err:
3320 be_close(adapter->netdev);
3321 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003322}
3323
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003324static int be_setup_wol(struct be_adapter *adapter, bool enable)
3325{
3326 struct be_dma_mem cmd;
3327 int status = 0;
3328 u8 mac[ETH_ALEN];
3329
Joe Perchesc7bf7162015-03-02 19:54:47 -08003330 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003331
3332 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003333 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3334 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303335 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303336 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003337
3338 if (enable) {
3339 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303340 PCICFG_PM_CONTROL_OFFSET,
3341 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003342 if (status) {
3343 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003344 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003345 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3346 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003347 return status;
3348 }
3349 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303350 adapter->netdev->dev_addr,
3351 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003352 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3353 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3354 } else {
3355 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3356 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3357 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3358 }
3359
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003360 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003361 return status;
3362}
3363
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003364static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3365{
3366 u32 addr;
3367
3368 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3369
3370 mac[5] = (u8)(addr & 0xFF);
3371 mac[4] = (u8)((addr >> 8) & 0xFF);
3372 mac[3] = (u8)((addr >> 16) & 0xFF);
3373 /* Use the OUI from the current MAC address */
3374 memcpy(mac, adapter->netdev->dev_addr, 3);
3375}
3376
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003377/*
3378 * Generate a seed MAC address from the PF MAC Address using jhash.
3379 * MAC Address for VFs are assigned incrementally starting from the seed.
3380 * These addresses are programmed in the ASIC by the PF and the VF driver
3381 * queries for the MAC address during its probe.
3382 */
Sathya Perla4c876612013-02-03 20:30:11 +00003383static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003384{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003385 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003386 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003387 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003388 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003389
3390 be_vf_eth_addr_generate(adapter, mac);
3391
Sathya Perla11ac75e2011-12-13 00:58:50 +00003392 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303393 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003394 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003395 vf_cfg->if_handle,
3396 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303397 else
3398 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3399 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003400
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003401 if (status)
3402 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303403 "Mac address assignment failed for VF %d\n",
3404 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003405 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003406 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003407
3408 mac[5] += 1;
3409 }
3410 return status;
3411}
3412
Sathya Perla4c876612013-02-03 20:30:11 +00003413static int be_vfs_mac_query(struct be_adapter *adapter)
3414{
3415 int status, vf;
3416 u8 mac[ETH_ALEN];
3417 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003418
3419 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303420 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3421 mac, vf_cfg->if_handle,
3422 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003423 if (status)
3424 return status;
3425 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3426 }
3427 return 0;
3428}
3429
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003430static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003431{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003432 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003433 u32 vf;
3434
Sathya Perla257a3fe2013-06-14 15:54:51 +05303435 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003436 dev_warn(&adapter->pdev->dev,
3437 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003438 goto done;
3439 }
3440
Sathya Perlab4c1df92013-05-08 02:05:47 +00003441 pci_disable_sriov(adapter->pdev);
3442
Sathya Perla11ac75e2011-12-13 00:58:50 +00003443 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303444 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003445 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3446 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303447 else
3448 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3449 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003450
Sathya Perla11ac75e2011-12-13 00:58:50 +00003451 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3452 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003453done:
3454 kfree(adapter->vf_cfg);
3455 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303456 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003457}
3458
Sathya Perla77071332013-08-27 16:57:34 +05303459static void be_clear_queues(struct be_adapter *adapter)
3460{
3461 be_mcc_queues_destroy(adapter);
3462 be_rx_cqs_destroy(adapter);
3463 be_tx_queues_destroy(adapter);
3464 be_evt_queues_destroy(adapter);
3465}
3466
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303467static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003468{
Sathya Perla191eb752012-02-23 18:50:13 +00003469 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3470 cancel_delayed_work_sync(&adapter->work);
3471 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3472 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303473}
3474
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003475static void be_cancel_err_detection(struct be_adapter *adapter)
3476{
3477 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3478 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3479 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3480 }
3481}
3482
Somnath Koturb05004a2013-12-05 12:08:16 +05303483static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303484{
Somnath Koturb05004a2013-12-05 12:08:16 +05303485 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003486 be_cmd_pmac_del(adapter, adapter->if_handle,
3487 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303488 kfree(adapter->pmac_id);
3489 adapter->pmac_id = NULL;
3490 }
3491}
3492
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303493#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303494static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3495{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003496 struct net_device *netdev = adapter->netdev;
3497
Sathya Perlac9c47142014-03-27 10:46:19 +05303498 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3499 be_cmd_manage_iface(adapter, adapter->if_handle,
3500 OP_CONVERT_TUNNEL_TO_NORMAL);
3501
3502 if (adapter->vxlan_port)
3503 be_cmd_set_vxlan_port(adapter, 0);
3504
3505 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3506 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003507
3508 netdev->hw_enc_features = 0;
3509 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303510 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303511}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303512#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303513
Vasundhara Volamf2858732015-03-04 00:44:33 -05003514static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3515{
3516 struct be_resources res = adapter->pool_res;
3517 u16 num_vf_qs = 1;
3518
3519 /* Distribute the queue resources equally among the PF and it's VFs
3520 * Do not distribute queue resources in multi-channel configuration.
3521 */
3522 if (num_vfs && !be_is_mc(adapter)) {
3523 /* If number of VFs requested is 8 less than max supported,
3524 * assign 8 queue pairs to the PF and divide the remaining
3525 * resources evenly among the VFs
3526 */
3527 if (num_vfs < (be_max_vfs(adapter) - 8))
3528 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3529 else
3530 num_vf_qs = res.max_rss_qs / num_vfs;
3531
3532 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3533 * interfaces per port. Provide RSS on VFs, only if number
3534 * of VFs requested is less than MAX_RSS_IFACES limit.
3535 */
3536 if (num_vfs >= MAX_RSS_IFACES)
3537 num_vf_qs = 1;
3538 }
3539 return num_vf_qs;
3540}
3541
Somnath Koturb05004a2013-12-05 12:08:16 +05303542static int be_clear(struct be_adapter *adapter)
3543{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003544 struct pci_dev *pdev = adapter->pdev;
3545 u16 num_vf_qs;
3546
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303547 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003548
Sathya Perla11ac75e2011-12-13 00:58:50 +00003549 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003550 be_vf_clear(adapter);
3551
Vasundhara Volambec84e62014-06-30 13:01:32 +05303552 /* Re-configure FW to distribute resources evenly across max-supported
3553 * number of VFs, only when VFs are not already enabled.
3554 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003555 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3556 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003557 num_vf_qs = be_calculate_vf_qs(adapter,
3558 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303559 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003560 pci_sriov_get_totalvfs(pdev),
3561 num_vf_qs);
3562 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303563
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303564#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303565 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303566#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303567 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303568 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003569
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003571
Sathya Perla77071332013-08-27 16:57:34 +05303572 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003573
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003574 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303575 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003576 return 0;
3577}
3578
Kalesh AP0700d812015-01-20 03:51:43 -05003579static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3580 u32 cap_flags, u32 vf)
3581{
3582 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003583
3584 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3585 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003586 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003587
3588 en_flags &= cap_flags;
3589
Vasundhara Volam435452a2015-03-20 06:28:23 -04003590 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003591}
3592
Sathya Perla4c876612013-02-03 20:30:11 +00003593static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003594{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303595 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003596 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003597 u32 cap_flags, vf;
3598 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003599
Kalesh AP0700d812015-01-20 03:51:43 -05003600 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003601 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3602 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003603
Sathya Perla4c876612013-02-03 20:30:11 +00003604 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303605 if (!BE3_chip(adapter)) {
3606 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003607 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303608 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003609 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303610 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003611 /* Prevent VFs from enabling VLAN promiscuous
3612 * mode
3613 */
3614 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3615 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303616 }
Sathya Perla4c876612013-02-03 20:30:11 +00003617
Kalesh AP0700d812015-01-20 03:51:43 -05003618 status = be_if_create(adapter, &vf_cfg->if_handle,
3619 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003620 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003621 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003622 }
Kalesh AP0700d812015-01-20 03:51:43 -05003623
3624 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003625}
3626
Sathya Perla39f1d942012-05-08 19:41:24 +00003627static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003628{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003629 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003630 int vf;
3631
Sathya Perla39f1d942012-05-08 19:41:24 +00003632 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3633 GFP_KERNEL);
3634 if (!adapter->vf_cfg)
3635 return -ENOMEM;
3636
Sathya Perla11ac75e2011-12-13 00:58:50 +00003637 for_all_vfs(adapter, vf_cfg, vf) {
3638 vf_cfg->if_handle = -1;
3639 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003640 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003641 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003642}
3643
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003644static int be_vf_setup(struct be_adapter *adapter)
3645{
Sathya Perla4c876612013-02-03 20:30:11 +00003646 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303647 struct be_vf_cfg *vf_cfg;
3648 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003649 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003650
Sathya Perla257a3fe2013-06-14 15:54:51 +05303651 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003652
3653 status = be_vf_setup_init(adapter);
3654 if (status)
3655 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003656
Sathya Perla4c876612013-02-03 20:30:11 +00003657 if (old_vfs) {
3658 for_all_vfs(adapter, vf_cfg, vf) {
3659 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3660 if (status)
3661 goto err;
3662 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003663
Sathya Perla4c876612013-02-03 20:30:11 +00003664 status = be_vfs_mac_query(adapter);
3665 if (status)
3666 goto err;
3667 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303668 status = be_vfs_if_create(adapter);
3669 if (status)
3670 goto err;
3671
Sathya Perla39f1d942012-05-08 19:41:24 +00003672 status = be_vf_eth_addr_config(adapter);
3673 if (status)
3674 goto err;
3675 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003676
Sathya Perla11ac75e2011-12-13 00:58:50 +00003677 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303678 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003679 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3680 vf + 1);
3681 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303682 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003683 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303684 BE_PRIV_FILTMGMT,
3685 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003686 if (!status) {
3687 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303688 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3689 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003690 }
Sathya Perla04a06022013-07-23 15:25:00 +05303691 }
3692
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303693 /* Allow full available bandwidth */
3694 if (!old_vfs)
3695 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003696
Kalesh APe7bcbd72015-05-06 05:30:32 -04003697 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3698 vf_cfg->if_handle, NULL,
3699 &spoofchk);
3700 if (!status)
3701 vf_cfg->spoofchk = spoofchk;
3702
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303703 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303704 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303705 be_cmd_set_logical_link_config(adapter,
3706 IFLA_VF_LINK_STATE_AUTO,
3707 vf+1);
3708 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003709 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003710
3711 if (!old_vfs) {
3712 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3713 if (status) {
3714 dev_err(dev, "SRIOV enable failed\n");
3715 adapter->num_vfs = 0;
3716 goto err;
3717 }
3718 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303719
3720 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003721 return 0;
3722err:
Sathya Perla4c876612013-02-03 20:30:11 +00003723 dev_err(dev, "VF setup failed\n");
3724 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003725 return status;
3726}
3727
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303728/* Converting function_mode bits on BE3 to SH mc_type enums */
3729
3730static u8 be_convert_mc_type(u32 function_mode)
3731{
Suresh Reddy66064db2014-06-23 16:41:29 +05303732 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303733 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303734 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303735 return FLEX10;
3736 else if (function_mode & VNIC_MODE)
3737 return vNIC2;
3738 else if (function_mode & UMC_ENABLED)
3739 return UMC;
3740 else
3741 return MC_NONE;
3742}
3743
Sathya Perla92bf14a2013-08-27 16:57:32 +05303744/* On BE2/BE3 FW does not suggest the supported limits */
3745static void BEx_get_resources(struct be_adapter *adapter,
3746 struct be_resources *res)
3747{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303748 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303749
3750 if (be_physfn(adapter))
3751 res->max_uc_mac = BE_UC_PMAC_COUNT;
3752 else
3753 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3754
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303755 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3756
3757 if (be_is_mc(adapter)) {
3758 /* Assuming that there are 4 channels per port,
3759 * when multi-channel is enabled
3760 */
3761 if (be_is_qnq_mode(adapter))
3762 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3763 else
3764 /* In a non-qnq multichannel mode, the pvid
3765 * takes up one vlan entry
3766 */
3767 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3768 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303769 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303770 }
3771
Sathya Perla92bf14a2013-08-27 16:57:32 +05303772 res->max_mcast_mac = BE_MAX_MC;
3773
Vasundhara Volama5243da2014-03-11 18:53:07 +05303774 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3775 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3776 * *only* if it is RSS-capable.
3777 */
3778 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3779 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303780 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303781 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303782 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3783 struct be_resources super_nic_res = {0};
3784
3785 /* On a SuperNIC profile, the driver needs to use the
3786 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3787 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003788 be_cmd_get_profile_config(adapter, &super_nic_res,
3789 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303790 /* Some old versions of BE3 FW don't report max_tx_qs value */
3791 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3792 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303793 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303794 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303795
3796 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3797 !use_sriov && be_physfn(adapter))
3798 res->max_rss_qs = (adapter->be3_native) ?
3799 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3800 res->max_rx_qs = res->max_rss_qs + 1;
3801
Suresh Reddye3dc8672014-01-06 13:02:25 +05303802 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303803 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303804 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3805 else
3806 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303807
3808 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003809 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303810 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3811 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3812}
3813
Sathya Perla30128032011-11-10 19:17:57 +00003814static void be_setup_init(struct be_adapter *adapter)
3815{
3816 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003817 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003818 adapter->if_handle = -1;
3819 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003820 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003821 if (be_physfn(adapter))
3822 adapter->cmd_privileges = MAX_PRIVILEGES;
3823 else
3824 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003825}
3826
Vasundhara Volambec84e62014-06-30 13:01:32 +05303827static int be_get_sriov_config(struct be_adapter *adapter)
3828{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303829 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303830 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303831
Vasundhara Volamf2858732015-03-04 00:44:33 -05003832 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05303833
Vasundhara Volamace40af2015-03-04 00:44:34 -05003834 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303835 if (BE3_chip(adapter) && !res.max_vfs) {
3836 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3837 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3838 }
3839
Sathya Perlad3d18312014-08-01 17:47:30 +05303840 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303841
Vasundhara Volamace40af2015-03-04 00:44:34 -05003842 /* If during previous unload of the driver, the VFs were not disabled,
3843 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3844 * Instead use the TotalVFs value stored in the pci-dev struct.
3845 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303846 old_vfs = pci_num_vf(adapter->pdev);
3847 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05003848 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3849 old_vfs);
3850
3851 adapter->pool_res.max_vfs =
3852 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303853 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303854 }
3855
3856 return 0;
3857}
3858
Vasundhara Volamace40af2015-03-04 00:44:34 -05003859static void be_alloc_sriov_res(struct be_adapter *adapter)
3860{
3861 int old_vfs = pci_num_vf(adapter->pdev);
3862 u16 num_vf_qs;
3863 int status;
3864
3865 be_get_sriov_config(adapter);
3866
3867 if (!old_vfs)
3868 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3869
3870 /* When the HW is in SRIOV capable configuration, the PF-pool
3871 * resources are given to PF during driver load, if there are no
3872 * old VFs. This facility is not available in BE3 FW.
3873 * Also, this is done by FW in Lancer chip.
3874 */
3875 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3876 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3877 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3878 num_vf_qs);
3879 if (status)
3880 dev_err(&adapter->pdev->dev,
3881 "Failed to optimize SRIOV resources\n");
3882 }
3883}
3884
Sathya Perla92bf14a2013-08-27 16:57:32 +05303885static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003886{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303887 struct device *dev = &adapter->pdev->dev;
3888 struct be_resources res = {0};
3889 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003890
Sathya Perla92bf14a2013-08-27 16:57:32 +05303891 if (BEx_chip(adapter)) {
3892 BEx_get_resources(adapter, &res);
3893 adapter->res = res;
3894 }
3895
Sathya Perla92bf14a2013-08-27 16:57:32 +05303896 /* For Lancer, SH etc read per-function resource limits from FW.
3897 * GET_FUNC_CONFIG returns per function guaranteed limits.
3898 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3899 */
Sathya Perla4c876612013-02-03 20:30:11 +00003900 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303901 status = be_cmd_get_func_config(adapter, &res);
3902 if (status)
3903 return status;
3904
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003905 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3906 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3907 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3908 res.max_rss_qs -= 1;
3909
Sathya Perla92bf14a2013-08-27 16:57:32 +05303910 /* If RoCE may be enabled stash away half the EQs for RoCE */
3911 if (be_roce_supported(adapter))
3912 res.max_evt_qs /= 2;
3913 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003914 }
3915
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003916 /* If FW supports RSS default queue, then skip creating non-RSS
3917 * queue for non-IP traffic.
3918 */
3919 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3920 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3921
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303922 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3923 be_max_txqs(adapter), be_max_rxqs(adapter),
3924 be_max_rss(adapter), be_max_eqs(adapter),
3925 be_max_vfs(adapter));
3926 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3927 be_max_uc(adapter), be_max_mc(adapter),
3928 be_max_vlans(adapter));
3929
Vasundhara Volamace40af2015-03-04 00:44:34 -05003930 /* Sanitize cfg_num_qs based on HW and platform limits */
3931 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3932 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05303933 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003934}
3935
Sathya Perla39f1d942012-05-08 19:41:24 +00003936static int be_get_config(struct be_adapter *adapter)
3937{
Sathya Perla6b085ba2015-02-23 04:20:09 -05003938 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05303939 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05003940
3941 status = be_cmd_get_cntl_attributes(adapter);
3942 if (status)
3943 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003944
Kalesh APe97e3cd2014-07-17 16:20:26 +05303945 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003946 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303947 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003948
Sathya Perla6b085ba2015-02-23 04:20:09 -05003949 if (BEx_chip(adapter)) {
3950 level = be_cmd_get_fw_log_level(adapter);
3951 adapter->msg_enable =
3952 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3953 }
3954
3955 be_cmd_get_acpi_wol_cap(adapter);
3956
Vasundhara Volam21252372015-02-06 08:18:42 -05003957 be_cmd_query_port_name(adapter);
3958
3959 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05303960 status = be_cmd_get_active_profile(adapter, &profile_id);
3961 if (!status)
3962 dev_info(&adapter->pdev->dev,
3963 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303964 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303965
Sathya Perla92bf14a2013-08-27 16:57:32 +05303966 status = be_get_resources(adapter);
3967 if (status)
3968 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003969
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303970 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3971 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303972 if (!adapter->pmac_id)
3973 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003974
Sathya Perla92bf14a2013-08-27 16:57:32 +05303975 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003976}
3977
Sathya Perla95046b92013-07-23 15:25:02 +05303978static int be_mac_setup(struct be_adapter *adapter)
3979{
3980 u8 mac[ETH_ALEN];
3981 int status;
3982
3983 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3984 status = be_cmd_get_perm_mac(adapter, mac);
3985 if (status)
3986 return status;
3987
3988 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3989 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3990 } else {
3991 /* Maybe the HW was reset; dev_addr must be re-programmed */
3992 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3993 }
3994
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003995 /* For BE3-R VFs, the PF programs the initial MAC address */
3996 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3997 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3998 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303999 return 0;
4000}
4001
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304002static void be_schedule_worker(struct be_adapter *adapter)
4003{
4004 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4005 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4006}
4007
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004008static void be_schedule_err_detection(struct be_adapter *adapter)
4009{
4010 schedule_delayed_work(&adapter->be_err_detection_work,
4011 msecs_to_jiffies(1000));
4012 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4013}
4014
Sathya Perla77071332013-08-27 16:57:34 +05304015static int be_setup_queues(struct be_adapter *adapter)
4016{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304017 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304018 int status;
4019
4020 status = be_evt_queues_create(adapter);
4021 if (status)
4022 goto err;
4023
4024 status = be_tx_qs_create(adapter);
4025 if (status)
4026 goto err;
4027
4028 status = be_rx_cqs_create(adapter);
4029 if (status)
4030 goto err;
4031
4032 status = be_mcc_queues_create(adapter);
4033 if (status)
4034 goto err;
4035
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304036 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4037 if (status)
4038 goto err;
4039
4040 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4041 if (status)
4042 goto err;
4043
Sathya Perla77071332013-08-27 16:57:34 +05304044 return 0;
4045err:
4046 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4047 return status;
4048}
4049
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304050int be_update_queues(struct be_adapter *adapter)
4051{
4052 struct net_device *netdev = adapter->netdev;
4053 int status;
4054
4055 if (netif_running(netdev))
4056 be_close(netdev);
4057
4058 be_cancel_worker(adapter);
4059
4060 /* If any vectors have been shared with RoCE we cannot re-program
4061 * the MSIx table.
4062 */
4063 if (!adapter->num_msix_roce_vec)
4064 be_msix_disable(adapter);
4065
4066 be_clear_queues(adapter);
4067
4068 if (!msix_enabled(adapter)) {
4069 status = be_msix_enable(adapter);
4070 if (status)
4071 return status;
4072 }
4073
4074 status = be_setup_queues(adapter);
4075 if (status)
4076 return status;
4077
4078 be_schedule_worker(adapter);
4079
4080 if (netif_running(netdev))
4081 status = be_open(netdev);
4082
4083 return status;
4084}
4085
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004086static inline int fw_major_num(const char *fw_ver)
4087{
4088 int fw_major = 0, i;
4089
4090 i = sscanf(fw_ver, "%d.", &fw_major);
4091 if (i != 1)
4092 return 0;
4093
4094 return fw_major;
4095}
4096
Sathya Perlaf962f842015-02-23 04:20:16 -05004097/* If any VFs are already enabled don't FLR the PF */
4098static bool be_reset_required(struct be_adapter *adapter)
4099{
4100 return pci_num_vf(adapter->pdev) ? false : true;
4101}
4102
4103/* Wait for the FW to be ready and perform the required initialization */
4104static int be_func_init(struct be_adapter *adapter)
4105{
4106 int status;
4107
4108 status = be_fw_wait_ready(adapter);
4109 if (status)
4110 return status;
4111
4112 if (be_reset_required(adapter)) {
4113 status = be_cmd_reset_function(adapter);
4114 if (status)
4115 return status;
4116
4117 /* Wait for interrupts to quiesce after an FLR */
4118 msleep(100);
4119
4120 /* We can clear all errors when function reset succeeds */
4121 be_clear_all_error(adapter);
4122 }
4123
4124 /* Tell FW we're ready to fire cmds */
4125 status = be_cmd_fw_init(adapter);
4126 if (status)
4127 return status;
4128
4129 /* Allow interrupts for other ULPs running on NIC function */
4130 be_intr_set(adapter, true);
4131
4132 return 0;
4133}
4134
Sathya Perla5fb379e2009-06-18 00:02:59 +00004135static int be_setup(struct be_adapter *adapter)
4136{
Sathya Perla39f1d942012-05-08 19:41:24 +00004137 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004138 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004139
Sathya Perlaf962f842015-02-23 04:20:16 -05004140 status = be_func_init(adapter);
4141 if (status)
4142 return status;
4143
Sathya Perla30128032011-11-10 19:17:57 +00004144 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004145
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004146 if (!lancer_chip(adapter))
4147 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004148
Vasundhara Volamace40af2015-03-04 00:44:34 -05004149 if (!BE2_chip(adapter) && be_physfn(adapter))
4150 be_alloc_sriov_res(adapter);
4151
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004152 status = be_get_config(adapter);
4153 if (status)
4154 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004155
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004156 status = be_msix_enable(adapter);
4157 if (status)
4158 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004159
Kalesh AP0700d812015-01-20 03:51:43 -05004160 status = be_if_create(adapter, &adapter->if_handle,
4161 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004162 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004163 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004164
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304165 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4166 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304167 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304168 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004169 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004170 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004171
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004172 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004173
Sathya Perla95046b92013-07-23 15:25:02 +05304174 status = be_mac_setup(adapter);
4175 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004176 goto err;
4177
Kalesh APe97e3cd2014-07-17 16:20:26 +05304178 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304179 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004180
Somnath Koture9e2a902013-10-24 14:37:53 +05304181 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304182 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304183 adapter->fw_ver);
4184 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4185 }
4186
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004187 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004188 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004189
4190 be_set_rx_mode(adapter->netdev);
4191
Kalesh AP00d594c2015-01-20 03:51:44 -05004192 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4193 adapter->rx_fc);
4194 if (status)
4195 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4196 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004197
Kalesh AP00d594c2015-01-20 03:51:44 -05004198 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4199 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004200
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304201 if (be_physfn(adapter))
4202 be_cmd_set_logical_link_config(adapter,
4203 IFLA_VF_LINK_STATE_AUTO, 0);
4204
Vasundhara Volambec84e62014-06-30 13:01:32 +05304205 if (adapter->num_vfs)
4206 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004207
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004208 status = be_cmd_get_phy_info(adapter);
4209 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004210 adapter->phy.fc_autoneg = 1;
4211
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304212 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304213 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004214 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004215err:
4216 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004217 return status;
4218}
4219
Ivan Vecera66268732011-12-08 01:31:21 +00004220#ifdef CONFIG_NET_POLL_CONTROLLER
4221static void be_netpoll(struct net_device *netdev)
4222{
4223 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004224 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004225 int i;
4226
Sathya Perlae49cc342012-11-27 19:50:02 +00004227 for_all_evt_queues(adapter, eqo, i) {
4228 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4229 napi_schedule(&eqo->napi);
4230 }
Ivan Vecera66268732011-12-08 01:31:21 +00004231}
4232#endif
4233
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304234static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004235
Sathya Perla306f1342011-08-02 19:57:45 +00004236static bool phy_flashing_required(struct be_adapter *adapter)
4237{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004238 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004239 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004240}
4241
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004242static bool is_comp_in_ufi(struct be_adapter *adapter,
4243 struct flash_section_info *fsec, int type)
4244{
4245 int i = 0, img_type = 0;
4246 struct flash_section_info_g2 *fsec_g2 = NULL;
4247
Sathya Perlaca34fe32012-11-06 17:48:56 +00004248 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004249 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4250
4251 for (i = 0; i < MAX_FLASH_COMP; i++) {
4252 if (fsec_g2)
4253 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4254 else
4255 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4256
4257 if (img_type == type)
4258 return true;
4259 }
4260 return false;
4261
4262}
4263
Jingoo Han4188e7d2013-08-05 18:02:02 +09004264static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304265 int header_size,
4266 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004267{
4268 struct flash_section_info *fsec = NULL;
4269 const u8 *p = fw->data;
4270
4271 p += header_size;
4272 while (p < (fw->data + fw->size)) {
4273 fsec = (struct flash_section_info *)p;
4274 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4275 return fsec;
4276 p += 32;
4277 }
4278 return NULL;
4279}
4280
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304281static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4282 u32 img_offset, u32 img_size, int hdr_size,
4283 u16 img_optype, bool *crc_match)
4284{
4285 u32 crc_offset;
4286 int status;
4287 u8 crc[4];
4288
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004289 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4290 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304291 if (status)
4292 return status;
4293
4294 crc_offset = hdr_size + img_offset + img_size - 4;
4295
4296 /* Skip flashing, if crc of flashed region matches */
4297 if (!memcmp(crc, p + crc_offset, 4))
4298 *crc_match = true;
4299 else
4300 *crc_match = false;
4301
4302 return status;
4303}
4304
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004305static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004306 struct be_dma_mem *flash_cmd, int optype, int img_size,
4307 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004308{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004309 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004310 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304311 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004312
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004313 while (total_bytes) {
4314 num_bytes = min_t(u32, 32*1024, total_bytes);
4315
4316 total_bytes -= num_bytes;
4317
4318 if (!total_bytes) {
4319 if (optype == OPTYPE_PHY_FW)
4320 flash_op = FLASHROM_OPER_PHY_FLASH;
4321 else
4322 flash_op = FLASHROM_OPER_FLASH;
4323 } else {
4324 if (optype == OPTYPE_PHY_FW)
4325 flash_op = FLASHROM_OPER_PHY_SAVE;
4326 else
4327 flash_op = FLASHROM_OPER_SAVE;
4328 }
4329
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004330 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004331 img += num_bytes;
4332 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004333 flash_op, img_offset +
4334 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304335 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304336 optype == OPTYPE_PHY_FW)
4337 break;
4338 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004339 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004340
4341 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004342 }
4343 return 0;
4344}
4345
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004346/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004347static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304348 const struct firmware *fw,
4349 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004350{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004351 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304352 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004353 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304354 int status, i, filehdr_size, num_comp;
4355 const struct flash_comp *pflashcomp;
4356 bool crc_match;
4357 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004358
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004359 struct flash_comp gen3_flash_types[] = {
4360 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4361 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4362 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4363 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4364 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4365 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4366 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4367 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4368 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4369 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4370 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4371 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4372 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4373 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4374 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4375 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4376 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4377 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4378 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4379 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004380 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004381
4382 struct flash_comp gen2_flash_types[] = {
4383 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4384 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4385 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4386 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4387 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4388 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4389 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4390 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4391 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4392 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4393 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4394 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4395 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4396 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4397 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4398 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004399 };
4400
Sathya Perlaca34fe32012-11-06 17:48:56 +00004401 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004402 pflashcomp = gen3_flash_types;
4403 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004404 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004405 } else {
4406 pflashcomp = gen2_flash_types;
4407 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004408 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004409 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004410 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004411
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004412 /* Get flash section info*/
4413 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4414 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304415 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004416 return -1;
4417 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004418 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004419 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004420 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004421
4422 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4423 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4424 continue;
4425
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004426 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4427 !phy_flashing_required(adapter))
4428 continue;
4429
4430 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304431 status = be_check_flash_crc(adapter, fw->data,
4432 pflashcomp[i].offset,
4433 pflashcomp[i].size,
4434 filehdr_size +
4435 img_hdrs_size,
4436 OPTYPE_REDBOOT, &crc_match);
4437 if (status) {
4438 dev_err(dev,
4439 "Could not get CRC for 0x%x region\n",
4440 pflashcomp[i].optype);
4441 continue;
4442 }
4443
4444 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004445 continue;
4446 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004447
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304448 p = fw->data + filehdr_size + pflashcomp[i].offset +
4449 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004450 if (p + pflashcomp[i].size > fw->data + fw->size)
4451 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004452
4453 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004454 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004455 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304456 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004457 pflashcomp[i].img_type);
4458 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004459 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004460 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004461 return 0;
4462}
4463
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304464static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4465{
4466 u32 img_type = le32_to_cpu(fsec_entry.type);
4467 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4468
4469 if (img_optype != 0xFFFF)
4470 return img_optype;
4471
4472 switch (img_type) {
4473 case IMAGE_FIRMWARE_iSCSI:
4474 img_optype = OPTYPE_ISCSI_ACTIVE;
4475 break;
4476 case IMAGE_BOOT_CODE:
4477 img_optype = OPTYPE_REDBOOT;
4478 break;
4479 case IMAGE_OPTION_ROM_ISCSI:
4480 img_optype = OPTYPE_BIOS;
4481 break;
4482 case IMAGE_OPTION_ROM_PXE:
4483 img_optype = OPTYPE_PXE_BIOS;
4484 break;
4485 case IMAGE_OPTION_ROM_FCoE:
4486 img_optype = OPTYPE_FCOE_BIOS;
4487 break;
4488 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4489 img_optype = OPTYPE_ISCSI_BACKUP;
4490 break;
4491 case IMAGE_NCSI:
4492 img_optype = OPTYPE_NCSI_FW;
4493 break;
4494 case IMAGE_FLASHISM_JUMPVECTOR:
4495 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4496 break;
4497 case IMAGE_FIRMWARE_PHY:
4498 img_optype = OPTYPE_SH_PHY_FW;
4499 break;
4500 case IMAGE_REDBOOT_DIR:
4501 img_optype = OPTYPE_REDBOOT_DIR;
4502 break;
4503 case IMAGE_REDBOOT_CONFIG:
4504 img_optype = OPTYPE_REDBOOT_CONFIG;
4505 break;
4506 case IMAGE_UFI_DIR:
4507 img_optype = OPTYPE_UFI_DIR;
4508 break;
4509 default:
4510 break;
4511 }
4512
4513 return img_optype;
4514}
4515
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004516static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304517 const struct firmware *fw,
4518 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004519{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004520 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004521 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304522 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004523 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304524 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004525 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304526 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304527 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004528
4529 filehdr_size = sizeof(struct flash_file_hdr_g3);
4530 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4531 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304532 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304533 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004534 }
4535
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004536retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004537 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4538 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4539 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304540 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4541 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4542 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004543
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304544 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004545 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004546
4547 if (flash_offset_support)
4548 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4549 else
4550 flash_optype = img_optype;
4551
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304552 /* Don't bother verifying CRC if an old FW image is being
4553 * flashed
4554 */
4555 if (old_fw_img)
4556 goto flash;
4557
4558 status = be_check_flash_crc(adapter, fw->data, img_offset,
4559 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004560 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304561 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304562 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4563 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004564 /* The current FW image on the card does not support
4565 * OFFSET based flashing. Retry using older mechanism
4566 * of OPTYPE based flashing
4567 */
4568 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4569 flash_offset_support = false;
4570 goto retry_flash;
4571 }
4572
4573 /* The current FW image on the card does not recognize
4574 * the new FLASH op_type. The FW download is partially
4575 * complete. Reboot the server now to enable FW image
4576 * to recognize the new FLASH op_type. To complete the
4577 * remaining process, download the same FW again after
4578 * the reboot.
4579 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304580 dev_err(dev, "Flash incomplete. Reset the server\n");
4581 dev_err(dev, "Download FW image again after reset\n");
4582 return -EAGAIN;
4583 } else if (status) {
4584 dev_err(dev, "Could not get CRC for 0x%x region\n",
4585 img_optype);
4586 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004587 }
4588
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304589 if (crc_match)
4590 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004591
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304592flash:
4593 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004594 if (p + img_size > fw->data + fw->size)
4595 return -1;
4596
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004597 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4598 img_offset);
4599
4600 /* The current FW image on the card does not support OFFSET
4601 * based flashing. Retry using older mechanism of OPTYPE based
4602 * flashing
4603 */
4604 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4605 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4606 flash_offset_support = false;
4607 goto retry_flash;
4608 }
4609
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304610 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4611 * UFI_DIR region
4612 */
Kalesh AP4c600052014-05-30 19:06:26 +05304613 if (old_fw_img &&
4614 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4615 (img_optype == OPTYPE_UFI_DIR &&
4616 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304617 continue;
4618 } else if (status) {
4619 dev_err(dev, "Flashing section type 0x%x failed\n",
4620 img_type);
4621 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004622 }
4623 }
4624 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004625}
4626
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004627static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304628 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004629{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004630#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4631#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304632 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004633 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004634 const u8 *data_ptr = NULL;
4635 u8 *dest_image_ptr = NULL;
4636 size_t image_size = 0;
4637 u32 chunk_size = 0;
4638 u32 data_written = 0;
4639 u32 offset = 0;
4640 int status = 0;
4641 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004642 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004643
4644 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304645 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304646 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004647 }
4648
4649 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4650 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304651 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004652 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304653 if (!flash_cmd.va)
4654 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004655
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004656 dest_image_ptr = flash_cmd.va +
4657 sizeof(struct lancer_cmd_req_write_object);
4658 image_size = fw->size;
4659 data_ptr = fw->data;
4660
4661 while (image_size) {
4662 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4663
4664 /* Copy the image chunk content. */
4665 memcpy(dest_image_ptr, data_ptr, chunk_size);
4666
4667 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004668 chunk_size, offset,
4669 LANCER_FW_DOWNLOAD_LOCATION,
4670 &data_written, &change_status,
4671 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004672 if (status)
4673 break;
4674
4675 offset += data_written;
4676 data_ptr += data_written;
4677 image_size -= data_written;
4678 }
4679
4680 if (!status) {
4681 /* Commit the FW written */
4682 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004683 0, offset,
4684 LANCER_FW_DOWNLOAD_LOCATION,
4685 &data_written, &change_status,
4686 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004687 }
4688
Kalesh APbb864e02014-09-02 09:56:51 +05304689 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004690 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304691 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304692 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004693 }
4694
Kalesh APbb864e02014-09-02 09:56:51 +05304695 dev_info(dev, "Firmware flashed successfully\n");
4696
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004697 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304698 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004699 status = lancer_physdev_ctrl(adapter,
4700 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004701 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304702 dev_err(dev, "Adapter busy, could not reset FW\n");
4703 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004704 }
4705 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304706 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004707 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304708
4709 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004710}
4711
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004712#define BE2_UFI 2
4713#define BE3_UFI 3
4714#define BE3R_UFI 10
4715#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004716#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004717
Sathya Perlaca34fe32012-11-06 17:48:56 +00004718static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004719 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004720{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004721 if (!fhdr) {
4722 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4723 return -1;
4724 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004725
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004726 /* First letter of the build version is used to identify
4727 * which chip this image file is meant for.
4728 */
4729 switch (fhdr->build[0]) {
4730 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004731 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4732 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004733 case BLD_STR_UFI_TYPE_BE3:
4734 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4735 BE3_UFI;
4736 case BLD_STR_UFI_TYPE_BE2:
4737 return BE2_UFI;
4738 default:
4739 return -1;
4740 }
4741}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004742
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004743/* Check if the flash image file is compatible with the adapter that
4744 * is being flashed.
4745 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004746 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004747 */
4748static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4749 struct flash_file_hdr_g3 *fhdr)
4750{
4751 int ufi_type = be_get_ufi_type(adapter, fhdr);
4752
4753 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004754 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004755 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004756 case SH_UFI:
4757 return (skyhawk_chip(adapter) &&
4758 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004759 case BE3R_UFI:
4760 return BE3_chip(adapter);
4761 case BE3_UFI:
4762 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4763 case BE2_UFI:
4764 return BE2_chip(adapter);
4765 default:
4766 return false;
4767 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004768}
4769
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004770static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4771{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004772 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004773 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004774 struct image_hdr *img_hdr_ptr;
4775 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004776 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004777
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004778 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4779 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4780 dev_err(dev, "Flash image is not compatible with adapter\n");
4781 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004782 }
4783
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004784 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4785 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4786 GFP_KERNEL);
4787 if (!flash_cmd.va)
4788 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004789
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004790 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4791 for (i = 0; i < num_imgs; i++) {
4792 img_hdr_ptr = (struct image_hdr *)(fw->data +
4793 (sizeof(struct flash_file_hdr_g3) +
4794 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004795 if (!BE2_chip(adapter) &&
4796 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4797 continue;
4798
4799 if (skyhawk_chip(adapter))
4800 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4801 num_imgs);
4802 else
4803 status = be_flash_BEx(adapter, fw, &flash_cmd,
4804 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004805 }
4806
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004807 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4808 if (!status)
4809 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004810
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004811 return status;
4812}
4813
4814int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4815{
4816 const struct firmware *fw;
4817 int status;
4818
4819 if (!netif_running(adapter->netdev)) {
4820 dev_err(&adapter->pdev->dev,
4821 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304822 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004823 }
4824
4825 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4826 if (status)
4827 goto fw_exit;
4828
4829 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4830
4831 if (lancer_chip(adapter))
4832 status = lancer_fw_download(adapter, fw);
4833 else
4834 status = be_fw_download(adapter, fw);
4835
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004836 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304837 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004838
Ajit Khaparde84517482009-09-04 03:12:16 +00004839fw_exit:
4840 release_firmware(fw);
4841 return status;
4842}
4843
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004844static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4845 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004846{
4847 struct be_adapter *adapter = netdev_priv(dev);
4848 struct nlattr *attr, *br_spec;
4849 int rem;
4850 int status = 0;
4851 u16 mode = 0;
4852
4853 if (!sriov_enabled(adapter))
4854 return -EOPNOTSUPP;
4855
4856 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004857 if (!br_spec)
4858 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004859
4860 nla_for_each_nested(attr, br_spec, rem) {
4861 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4862 continue;
4863
Thomas Grafb7c1a312014-11-26 13:42:17 +01004864 if (nla_len(attr) < sizeof(mode))
4865 return -EINVAL;
4866
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004867 mode = nla_get_u16(attr);
4868 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4869 return -EINVAL;
4870
4871 status = be_cmd_set_hsw_config(adapter, 0, 0,
4872 adapter->if_handle,
4873 mode == BRIDGE_MODE_VEPA ?
4874 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004875 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004876 if (status)
4877 goto err;
4878
4879 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4880 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4881
4882 return status;
4883 }
4884err:
4885 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4886 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4887
4888 return status;
4889}
4890
4891static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004892 struct net_device *dev, u32 filter_mask,
4893 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004894{
4895 struct be_adapter *adapter = netdev_priv(dev);
4896 int status = 0;
4897 u8 hsw_mode;
4898
4899 if (!sriov_enabled(adapter))
4900 return 0;
4901
4902 /* BE and Lancer chips support VEB mode only */
4903 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4904 hsw_mode = PORT_FWD_TYPE_VEB;
4905 } else {
4906 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004907 adapter->if_handle, &hsw_mode,
4908 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004909 if (status)
4910 return 0;
4911 }
4912
4913 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4914 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004915 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004916 0, 0, nlflags);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004917}
4918
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304919#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004920/* VxLAN offload Notes:
4921 *
4922 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4923 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4924 * is expected to work across all types of IP tunnels once exported. Skyhawk
4925 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304926 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4927 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4928 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004929 *
4930 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4931 * adds more than one port, disable offloads and don't re-enable them again
4932 * until after all the tunnels are removed.
4933 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304934static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4935 __be16 port)
4936{
4937 struct be_adapter *adapter = netdev_priv(netdev);
4938 struct device *dev = &adapter->pdev->dev;
4939 int status;
4940
4941 if (lancer_chip(adapter) || BEx_chip(adapter))
4942 return;
4943
4944 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304945 dev_info(dev,
4946 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004947 dev_info(dev, "Disabling VxLAN offloads\n");
4948 adapter->vxlan_port_count++;
4949 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304950 }
4951
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004952 if (adapter->vxlan_port_count++ >= 1)
4953 return;
4954
Sathya Perlac9c47142014-03-27 10:46:19 +05304955 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4956 OP_CONVERT_NORMAL_TO_TUNNEL);
4957 if (status) {
4958 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4959 goto err;
4960 }
4961
4962 status = be_cmd_set_vxlan_port(adapter, port);
4963 if (status) {
4964 dev_warn(dev, "Failed to add VxLAN port\n");
4965 goto err;
4966 }
4967 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4968 adapter->vxlan_port = port;
4969
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004970 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4971 NETIF_F_TSO | NETIF_F_TSO6 |
4972 NETIF_F_GSO_UDP_TUNNEL;
4973 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304974 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004975
Sathya Perlac9c47142014-03-27 10:46:19 +05304976 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4977 be16_to_cpu(port));
4978 return;
4979err:
4980 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304981}
4982
4983static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4984 __be16 port)
4985{
4986 struct be_adapter *adapter = netdev_priv(netdev);
4987
4988 if (lancer_chip(adapter) || BEx_chip(adapter))
4989 return;
4990
4991 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004992 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304993
4994 be_disable_vxlan_offloads(adapter);
4995
4996 dev_info(&adapter->pdev->dev,
4997 "Disabled VxLAN offloads for UDP port %d\n",
4998 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004999done:
5000 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305001}
Joe Stringer725d5482014-11-13 16:38:13 -08005002
Jesse Gross5f352272014-12-23 22:37:26 -08005003static netdev_features_t be_features_check(struct sk_buff *skb,
5004 struct net_device *dev,
5005 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005006{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305007 struct be_adapter *adapter = netdev_priv(dev);
5008 u8 l4_hdr = 0;
5009
5010 /* The code below restricts offload features for some tunneled packets.
5011 * Offload features for normal (non tunnel) packets are unchanged.
5012 */
5013 if (!skb->encapsulation ||
5014 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5015 return features;
5016
5017 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5018 * should disable tunnel offload features if it's not a VxLAN packet,
5019 * as tunnel offloads have been enabled only for VxLAN. This is done to
5020 * allow other tunneled traffic like GRE work fine while VxLAN
5021 * offloads are configured in Skyhawk-R.
5022 */
5023 switch (vlan_get_protocol(skb)) {
5024 case htons(ETH_P_IP):
5025 l4_hdr = ip_hdr(skb)->protocol;
5026 break;
5027 case htons(ETH_P_IPV6):
5028 l4_hdr = ipv6_hdr(skb)->nexthdr;
5029 break;
5030 default:
5031 return features;
5032 }
5033
5034 if (l4_hdr != IPPROTO_UDP ||
5035 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5036 skb->inner_protocol != htons(ETH_P_TEB) ||
5037 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5038 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5039 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5040
5041 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005042}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305043#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305044
stephen hemmingere5686ad2012-01-05 19:10:25 +00005045static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005046 .ndo_open = be_open,
5047 .ndo_stop = be_close,
5048 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005049 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005050 .ndo_set_mac_address = be_mac_addr_set,
5051 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005052 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005053 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005054 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5055 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005056 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005057 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005058 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005059 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305060 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005061 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005062#ifdef CONFIG_NET_POLL_CONTROLLER
5063 .ndo_poll_controller = be_netpoll,
5064#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005065 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5066 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305067#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305068 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305069#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305070#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305071 .ndo_add_vxlan_port = be_add_vxlan_port,
5072 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005073 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305074#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005075};
5076
5077static void be_netdev_init(struct net_device *netdev)
5078{
5079 struct be_adapter *adapter = netdev_priv(netdev);
5080
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005081 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005082 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005083 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005084 if (be_multi_rxq(adapter))
5085 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005086
5087 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005088 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005089
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005090 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005091 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005092
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005093 netdev->priv_flags |= IFF_UNICAST_FLT;
5094
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005095 netdev->flags |= IFF_MULTICAST;
5096
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005097 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005098
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005099 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005100
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005101 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005102}
5103
Kalesh AP87ac1a52015-02-23 04:20:15 -05005104static void be_cleanup(struct be_adapter *adapter)
5105{
5106 struct net_device *netdev = adapter->netdev;
5107
5108 rtnl_lock();
5109 netif_device_detach(netdev);
5110 if (netif_running(netdev))
5111 be_close(netdev);
5112 rtnl_unlock();
5113
5114 be_clear(adapter);
5115}
5116
Kalesh AP484d76f2015-02-23 04:20:14 -05005117static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005118{
Kalesh APd0e1b312015-02-23 04:20:12 -05005119 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005120 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005121
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005122 status = be_setup(adapter);
5123 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005124 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005125
Kalesh APd0e1b312015-02-23 04:20:12 -05005126 if (netif_running(netdev)) {
5127 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005128 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005129 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005130 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005131
Kalesh APd0e1b312015-02-23 04:20:12 -05005132 netif_device_attach(netdev);
5133
Kalesh AP484d76f2015-02-23 04:20:14 -05005134 return 0;
5135}
5136
5137static int be_err_recover(struct be_adapter *adapter)
5138{
5139 struct device *dev = &adapter->pdev->dev;
5140 int status;
5141
5142 status = be_resume(adapter);
5143 if (status)
5144 goto err;
5145
Sathya Perla9fa465c2015-02-23 04:20:13 -05005146 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005147 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005148err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005149 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305150 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005151 else
5152 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005153
5154 return status;
5155}
5156
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005157static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005158{
5159 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005160 container_of(work, struct be_adapter,
5161 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005162 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005163
5164 be_detect_error(adapter);
5165
Kalesh APd0e1b312015-02-23 04:20:12 -05005166 if (adapter->hw_error) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005167 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005168
5169 /* As of now error recovery support is in Lancer only */
5170 if (lancer_chip(adapter))
5171 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005172 }
5173
Sathya Perla9fa465c2015-02-23 04:20:13 -05005174 /* Always attempt recovery on VFs */
5175 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005176 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005177}
5178
Vasundhara Volam21252372015-02-06 08:18:42 -05005179static void be_log_sfp_info(struct be_adapter *adapter)
5180{
5181 int status;
5182
5183 status = be_cmd_query_sfp_info(adapter);
5184 if (!status) {
5185 dev_err(&adapter->pdev->dev,
5186 "Unqualified SFP+ detected on %c from %s part no: %s",
5187 adapter->port_name, adapter->phy.vendor_name,
5188 adapter->phy.vendor_pn);
5189 }
5190 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5191}
5192
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005193static void be_worker(struct work_struct *work)
5194{
5195 struct be_adapter *adapter =
5196 container_of(work, struct be_adapter, work.work);
5197 struct be_rx_obj *rxo;
5198 int i;
5199
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005200 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005201 * mcc completions
5202 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005203 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005204 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005205 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005206 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005207 goto reschedule;
5208 }
5209
5210 if (!adapter->stats_cmd_sent) {
5211 if (lancer_chip(adapter))
5212 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305213 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005214 else
5215 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5216 }
5217
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305218 if (be_physfn(adapter) &&
5219 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005220 be_cmd_get_die_temperature(adapter);
5221
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005222 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305223 /* Replenish RX-queues starved due to memory
5224 * allocation failures.
5225 */
5226 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305227 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005228 }
5229
Sathya Perla2632baf2013-10-01 16:00:00 +05305230 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005231
Vasundhara Volam21252372015-02-06 08:18:42 -05005232 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5233 be_log_sfp_info(adapter);
5234
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005235reschedule:
5236 adapter->work_counter++;
5237 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5238}
5239
Sathya Perla78fad34e2015-02-23 04:20:08 -05005240static void be_unmap_pci_bars(struct be_adapter *adapter)
5241{
5242 if (adapter->csr)
5243 pci_iounmap(adapter->pdev, adapter->csr);
5244 if (adapter->db)
5245 pci_iounmap(adapter->pdev, adapter->db);
5246}
5247
5248static int db_bar(struct be_adapter *adapter)
5249{
5250 if (lancer_chip(adapter) || !be_physfn(adapter))
5251 return 0;
5252 else
5253 return 4;
5254}
5255
5256static int be_roce_map_pci_bars(struct be_adapter *adapter)
5257{
5258 if (skyhawk_chip(adapter)) {
5259 adapter->roce_db.size = 4096;
5260 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5261 db_bar(adapter));
5262 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5263 db_bar(adapter));
5264 }
5265 return 0;
5266}
5267
5268static int be_map_pci_bars(struct be_adapter *adapter)
5269{
David S. Miller0fa74a42015-03-20 18:51:09 -04005270 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005271 u8 __iomem *addr;
5272 u32 sli_intf;
5273
5274 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5275 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5276 SLI_INTF_FAMILY_SHIFT;
5277 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5278
5279 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a42015-03-20 18:51:09 -04005280 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005281 if (!adapter->csr)
5282 return -ENOMEM;
5283 }
5284
David S. Miller0fa74a42015-03-20 18:51:09 -04005285 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005286 if (!addr)
5287 goto pci_map_err;
5288 adapter->db = addr;
5289
David S. Miller0fa74a42015-03-20 18:51:09 -04005290 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5291 if (be_physfn(adapter)) {
5292 /* PCICFG is the 2nd BAR in BE2 */
5293 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5294 if (!addr)
5295 goto pci_map_err;
5296 adapter->pcicfg = addr;
5297 } else {
5298 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5299 }
5300 }
5301
Sathya Perla78fad34e2015-02-23 04:20:08 -05005302 be_roce_map_pci_bars(adapter);
5303 return 0;
5304
5305pci_map_err:
David S. Miller0fa74a42015-03-20 18:51:09 -04005306 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005307 be_unmap_pci_bars(adapter);
5308 return -ENOMEM;
5309}
5310
5311static void be_drv_cleanup(struct be_adapter *adapter)
5312{
5313 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5314 struct device *dev = &adapter->pdev->dev;
5315
5316 if (mem->va)
5317 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5318
5319 mem = &adapter->rx_filter;
5320 if (mem->va)
5321 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5322
5323 mem = &adapter->stats_cmd;
5324 if (mem->va)
5325 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5326}
5327
5328/* Allocate and initialize various fields in be_adapter struct */
5329static int be_drv_init(struct be_adapter *adapter)
5330{
5331 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5332 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5333 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5334 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5335 struct device *dev = &adapter->pdev->dev;
5336 int status = 0;
5337
5338 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5339 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5340 &mbox_mem_alloc->dma,
5341 GFP_KERNEL);
5342 if (!mbox_mem_alloc->va)
5343 return -ENOMEM;
5344
5345 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5346 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5347 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5348 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5349
5350 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5351 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5352 &rx_filter->dma, GFP_KERNEL);
5353 if (!rx_filter->va) {
5354 status = -ENOMEM;
5355 goto free_mbox;
5356 }
5357
5358 if (lancer_chip(adapter))
5359 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5360 else if (BE2_chip(adapter))
5361 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5362 else if (BE3_chip(adapter))
5363 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5364 else
5365 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5366 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5367 &stats_cmd->dma, GFP_KERNEL);
5368 if (!stats_cmd->va) {
5369 status = -ENOMEM;
5370 goto free_rx_filter;
5371 }
5372
5373 mutex_init(&adapter->mbox_lock);
5374 spin_lock_init(&adapter->mcc_lock);
5375 spin_lock_init(&adapter->mcc_cq_lock);
5376 init_completion(&adapter->et_cmd_compl);
5377
5378 pci_save_state(adapter->pdev);
5379
5380 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005381 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5382 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005383
5384 adapter->rx_fc = true;
5385 adapter->tx_fc = true;
5386
5387 /* Must be a power of 2 or else MODULO will BUG_ON */
5388 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005389
5390 return 0;
5391
5392free_rx_filter:
5393 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5394free_mbox:
5395 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5396 mbox_mem_alloc->dma);
5397 return status;
5398}
5399
5400static void be_remove(struct pci_dev *pdev)
5401{
5402 struct be_adapter *adapter = pci_get_drvdata(pdev);
5403
5404 if (!adapter)
5405 return;
5406
5407 be_roce_dev_remove(adapter);
5408 be_intr_set(adapter, false);
5409
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005410 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005411
5412 unregister_netdev(adapter->netdev);
5413
5414 be_clear(adapter);
5415
5416 /* tell fw we're done with firing cmds */
5417 be_cmd_fw_clean(adapter);
5418
5419 be_unmap_pci_bars(adapter);
5420 be_drv_cleanup(adapter);
5421
5422 pci_disable_pcie_error_reporting(pdev);
5423
5424 pci_release_regions(pdev);
5425 pci_disable_device(pdev);
5426
5427 free_netdev(adapter->netdev);
5428}
5429
Sathya Perlad3791422012-09-28 04:39:44 +00005430static char *mc_name(struct be_adapter *adapter)
5431{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305432 char *str = ""; /* default */
5433
5434 switch (adapter->mc_type) {
5435 case UMC:
5436 str = "UMC";
5437 break;
5438 case FLEX10:
5439 str = "FLEX10";
5440 break;
5441 case vNIC1:
5442 str = "vNIC-1";
5443 break;
5444 case nPAR:
5445 str = "nPAR";
5446 break;
5447 case UFP:
5448 str = "UFP";
5449 break;
5450 case vNIC2:
5451 str = "vNIC-2";
5452 break;
5453 default:
5454 str = "";
5455 }
5456
5457 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005458}
5459
5460static inline char *func_name(struct be_adapter *adapter)
5461{
5462 return be_physfn(adapter) ? "PF" : "VF";
5463}
5464
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005465static inline char *nic_name(struct pci_dev *pdev)
5466{
5467 switch (pdev->device) {
5468 case OC_DEVICE_ID1:
5469 return OC_NAME;
5470 case OC_DEVICE_ID2:
5471 return OC_NAME_BE;
5472 case OC_DEVICE_ID3:
5473 case OC_DEVICE_ID4:
5474 return OC_NAME_LANCER;
5475 case BE_DEVICE_ID2:
5476 return BE3_NAME;
5477 case OC_DEVICE_ID5:
5478 case OC_DEVICE_ID6:
5479 return OC_NAME_SH;
5480 default:
5481 return BE_NAME;
5482 }
5483}
5484
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005485static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005486{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005487 struct be_adapter *adapter;
5488 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005489 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005490
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305491 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005493 status = pci_enable_device(pdev);
5494 if (status)
5495 goto do_none;
5496
5497 status = pci_request_regions(pdev, DRV_NAME);
5498 if (status)
5499 goto disable_dev;
5500 pci_set_master(pdev);
5501
Sathya Perla7f640062012-06-05 19:37:20 +00005502 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305503 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005504 status = -ENOMEM;
5505 goto rel_reg;
5506 }
5507 adapter = netdev_priv(netdev);
5508 adapter->pdev = pdev;
5509 pci_set_drvdata(pdev, adapter);
5510 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005511 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005512
Russell King4c15c242013-06-26 23:49:11 +01005513 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005514 if (!status) {
5515 netdev->features |= NETIF_F_HIGHDMA;
5516 } else {
Russell King4c15c242013-06-26 23:49:11 +01005517 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005518 if (status) {
5519 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5520 goto free_netdev;
5521 }
5522 }
5523
Kalesh AP2f951a92014-09-12 17:39:21 +05305524 status = pci_enable_pcie_error_reporting(pdev);
5525 if (!status)
5526 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005527
Sathya Perla78fad34e2015-02-23 04:20:08 -05005528 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005529 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005530 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005531
Sathya Perla78fad34e2015-02-23 04:20:08 -05005532 status = be_drv_init(adapter);
5533 if (status)
5534 goto unmap_bars;
5535
Sathya Perla5fb379e2009-06-18 00:02:59 +00005536 status = be_setup(adapter);
5537 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005538 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005539
Sathya Perla3abcded2010-10-03 22:12:27 -07005540 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005541 status = register_netdev(netdev);
5542 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005543 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005544
Parav Pandit045508a2012-03-26 14:27:13 +00005545 be_roce_dev_add(adapter);
5546
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005547 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005548
Sathya Perlad3791422012-09-28 04:39:44 +00005549 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005550 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005551
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005552 return 0;
5553
Sathya Perla5fb379e2009-06-18 00:02:59 +00005554unsetup:
5555 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005556drv_cleanup:
5557 be_drv_cleanup(adapter);
5558unmap_bars:
5559 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005560free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005561 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005562rel_reg:
5563 pci_release_regions(pdev);
5564disable_dev:
5565 pci_disable_device(pdev);
5566do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005567 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005568 return status;
5569}
5570
5571static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5572{
5573 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005574
Suresh Reddy76a9e082014-01-15 13:23:40 +05305575 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005576 be_setup_wol(adapter, true);
5577
Ajit Khaparded4360d62013-11-22 12:51:09 -06005578 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005579 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005580
Kalesh AP87ac1a52015-02-23 04:20:15 -05005581 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005582
5583 pci_save_state(pdev);
5584 pci_disable_device(pdev);
5585 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5586 return 0;
5587}
5588
Kalesh AP484d76f2015-02-23 04:20:14 -05005589static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005590{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005591 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005592 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005593
5594 status = pci_enable_device(pdev);
5595 if (status)
5596 return status;
5597
Yijing Wang1ca01512013-06-27 20:53:42 +08005598 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005599 pci_restore_state(pdev);
5600
Kalesh AP484d76f2015-02-23 04:20:14 -05005601 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005602 if (status)
5603 return status;
5604
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005605 be_schedule_err_detection(adapter);
5606
Suresh Reddy76a9e082014-01-15 13:23:40 +05305607 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005608 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005609
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005610 return 0;
5611}
5612
Sathya Perla82456b02010-02-17 01:35:37 +00005613/*
5614 * An FLR will stop BE from DMAing any data.
5615 */
5616static void be_shutdown(struct pci_dev *pdev)
5617{
5618 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005619
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005620 if (!adapter)
5621 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005622
Devesh Sharmad114f992014-06-10 19:32:15 +05305623 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005624 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005625 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005626
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005627 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005628
Ajit Khaparde57841862011-04-06 18:08:43 +00005629 be_cmd_reset_function(adapter);
5630
Sathya Perla82456b02010-02-17 01:35:37 +00005631 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005632}
5633
Sathya Perlacf588472010-02-14 21:22:01 +00005634static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305635 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005636{
5637 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005638
5639 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5640
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005641 if (!adapter->eeh_error) {
5642 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005643
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005644 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005645
Kalesh AP87ac1a52015-02-23 04:20:15 -05005646 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005647 }
Sathya Perlacf588472010-02-14 21:22:01 +00005648
5649 if (state == pci_channel_io_perm_failure)
5650 return PCI_ERS_RESULT_DISCONNECT;
5651
5652 pci_disable_device(pdev);
5653
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005654 /* The error could cause the FW to trigger a flash debug dump.
5655 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005656 * can cause it not to recover; wait for it to finish.
5657 * Wait only for first function as it is needed only once per
5658 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005659 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005660 if (pdev->devfn == 0)
5661 ssleep(30);
5662
Sathya Perlacf588472010-02-14 21:22:01 +00005663 return PCI_ERS_RESULT_NEED_RESET;
5664}
5665
5666static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5667{
5668 struct be_adapter *adapter = pci_get_drvdata(pdev);
5669 int status;
5670
5671 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005672
5673 status = pci_enable_device(pdev);
5674 if (status)
5675 return PCI_ERS_RESULT_DISCONNECT;
5676
5677 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005678 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005679 pci_restore_state(pdev);
5680
5681 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005682 dev_info(&adapter->pdev->dev,
5683 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005684 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005685 if (status)
5686 return PCI_ERS_RESULT_DISCONNECT;
5687
Sathya Perlad6b6d982012-09-05 01:56:48 +00005688 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005689 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005690 return PCI_ERS_RESULT_RECOVERED;
5691}
5692
5693static void be_eeh_resume(struct pci_dev *pdev)
5694{
5695 int status = 0;
5696 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005697
5698 dev_info(&adapter->pdev->dev, "EEH resume\n");
5699
5700 pci_save_state(pdev);
5701
Kalesh AP484d76f2015-02-23 04:20:14 -05005702 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005703 if (status)
5704 goto err;
5705
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005706 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005707 return;
5708err:
5709 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005710}
5711
Vasundhara Volamace40af2015-03-04 00:44:34 -05005712static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5713{
5714 struct be_adapter *adapter = pci_get_drvdata(pdev);
5715 u16 num_vf_qs;
5716 int status;
5717
5718 if (!num_vfs)
5719 be_vf_clear(adapter);
5720
5721 adapter->num_vfs = num_vfs;
5722
5723 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5724 dev_warn(&pdev->dev,
5725 "Cannot disable VFs while they are assigned\n");
5726 return -EBUSY;
5727 }
5728
5729 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5730 * are equally distributed across the max-number of VFs. The user may
5731 * request only a subset of the max-vfs to be enabled.
5732 * Based on num_vfs, redistribute the resources across num_vfs so that
5733 * each VF will have access to more number of resources.
5734 * This facility is not available in BE3 FW.
5735 * Also, this is done by FW in Lancer chip.
5736 */
5737 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5738 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5739 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5740 adapter->num_vfs, num_vf_qs);
5741 if (status)
5742 dev_err(&pdev->dev,
5743 "Failed to optimize SR-IOV resources\n");
5744 }
5745
5746 status = be_get_resources(adapter);
5747 if (status)
5748 return be_cmd_status(status);
5749
5750 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5751 rtnl_lock();
5752 status = be_update_queues(adapter);
5753 rtnl_unlock();
5754 if (status)
5755 return be_cmd_status(status);
5756
5757 if (adapter->num_vfs)
5758 status = be_vf_setup(adapter);
5759
5760 if (!status)
5761 return adapter->num_vfs;
5762
5763 return 0;
5764}
5765
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005766static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005767 .error_detected = be_eeh_err_detected,
5768 .slot_reset = be_eeh_reset,
5769 .resume = be_eeh_resume,
5770};
5771
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005772static struct pci_driver be_driver = {
5773 .name = DRV_NAME,
5774 .id_table = be_dev_ids,
5775 .probe = be_probe,
5776 .remove = be_remove,
5777 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005778 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005779 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005780 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005781 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005782};
5783
5784static int __init be_init_module(void)
5785{
Joe Perches8e95a202009-12-03 07:58:21 +00005786 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5787 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005788 printk(KERN_WARNING DRV_NAME
5789 " : Module param rx_frag_size must be 2048/4096/8192."
5790 " Using 2048\n");
5791 rx_frag_size = 2048;
5792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005793
Vasundhara Volamace40af2015-03-04 00:44:34 -05005794 if (num_vfs > 0) {
5795 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5796 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5797 }
5798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005799 return pci_register_driver(&be_driver);
5800}
5801module_init(be_init_module);
5802
5803static void __exit be_exit_module(void)
5804{
5805 pci_unregister_driver(&be_driver);
5806}
5807module_exit(be_exit_module);