blob: f15a3cfeb217bc3baffd7d4478973b330de5e500 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530205
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000208
209 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000210 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211}
212
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400214 bool arm, bool clear_int, u16 num_popped,
215 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216{
217 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530220 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000221
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000222 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000223 return;
224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 if (arm)
226 val |= 1 << DB_EQ_REARM_SHIFT;
227 if (clear_int)
228 val |= 1 << DB_EQ_CLR_SHIFT;
229 val |= 1 << DB_EQ_EVNT_SHIFT;
230 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400231 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233}
234
Sathya Perla8788fdc2009-07-27 22:52:03 +0000235void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000240 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
241 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000242
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000243 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000244 return;
245
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246 if (arm)
247 val |= 1 << DB_CQ_REARM_SHIFT;
248 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000249 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250}
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252static int be_mac_addr_set(struct net_device *netdev, void *p)
253{
254 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530255 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530257 int status;
258 u8 mac[ETH_ALEN];
259 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700260
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000261 if (!is_valid_ether_addr(addr->sa_data))
262 return -EADDRNOTAVAIL;
263
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530264 /* Proceed further only if, User provided MAC is different
265 * from active MAC
266 */
267 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
268 return 0;
269
Sathya Perla5a712c12013-07-23 15:24:59 +0530270 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
271 * privilege or if PF did not provision the new MAC address.
272 * On BE3, this cmd will always fail if the VF doesn't have the
273 * FILTMGMT privilege. This failure is OK, only if the PF programmed
274 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000275 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530276 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
277 adapter->if_handle, &adapter->pmac_id[0], 0);
278 if (!status) {
279 curr_pmac_id = adapter->pmac_id[0];
280
281 /* Delete the old programmed MAC. This call may fail if the
282 * old MAC was already deleted by the PF driver.
283 */
284 if (adapter->pmac_id[0] != old_pmac_id)
285 be_cmd_pmac_del(adapter, adapter->if_handle,
286 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 }
288
Sathya Perla5a712c12013-07-23 15:24:59 +0530289 /* Decide if the new MAC is successfully activated only after
290 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000291 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530292 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
293 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000294 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000295 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700296
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 /* The MAC change did not happen, either due to lack of privilege
298 * or PF didn't pre-provision.
299 */
dingtianhong61d23e92013-12-30 15:40:43 +0800300 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530301 status = -EPERM;
302 goto err;
303 }
304
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530306 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000307 return 0;
308err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530309 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 return status;
311}
312
Sathya Perlaca34fe32012-11-06 17:48:56 +0000313/* BE2 supports only v0 cmd */
314static void *hw_stats_from_cmd(struct be_adapter *adapter)
315{
316 if (BE2_chip(adapter)) {
317 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000321 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500324 } else {
325 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
326
327 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328 }
329}
330
331/* BE2 supports only v0 cmd */
332static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
333{
334 if (BE2_chip(adapter)) {
335 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000339 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500342 } else {
343 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
344
345 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000346 }
347}
348
349static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
352 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
353 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000355 &rxf_stats->port[adapter->port_num];
356 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357
Sathya Perlaac124ff2011-07-25 19:10:14 +0000358 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_pause_frames = port_stats->rx_pause_frames;
360 drvs->rx_crc_errors = port_stats->rx_crc_errors;
361 drvs->rx_control_frames = port_stats->rx_control_frames;
362 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
363 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
364 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
365 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
366 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
367 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
368 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
369 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
370 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
371 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
372 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_dropped_header_too_small =
375 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000376 drvs->rx_address_filtered =
377 port_stats->rx_address_filtered +
378 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 drvs->rx_alignment_symbol_errors =
380 port_stats->rx_alignment_symbol_errors;
381
382 drvs->tx_pauseframes = port_stats->tx_pauseframes;
383 drvs->tx_controlframes = port_stats->tx_controlframes;
384
385 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 drvs->forwarded_packets = rxf_stats->forwarded_packets;
392 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
394 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
396}
397
Sathya Perlaca34fe32012-11-06 17:48:56 +0000398static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
401 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
402 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000404 &rxf_stats->port[adapter->port_num];
405 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406
Sathya Perlaac124ff2011-07-25 19:10:14 +0000407 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000408 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
409 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 drvs->rx_pause_frames = port_stats->rx_pause_frames;
411 drvs->rx_crc_errors = port_stats->rx_crc_errors;
412 drvs->rx_control_frames = port_stats->rx_control_frames;
413 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
414 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
415 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
416 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
417 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
418 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
419 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
420 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
421 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
422 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
423 drvs->rx_dropped_header_too_small =
424 port_stats->rx_dropped_header_too_small;
425 drvs->rx_input_fifo_overflow_drop =
426 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000427 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->rx_alignment_symbol_errors =
429 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 drvs->tx_pauseframes = port_stats->tx_pauseframes;
432 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000433 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->jabber_events = port_stats->jabber_events;
435 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->forwarded_packets = rxf_stats->forwarded_packets;
438 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000439 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
440 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
442}
443
Ajit Khaparde61000862013-10-03 16:16:33 -0500444static void populate_be_v2_stats(struct be_adapter *adapter)
445{
446 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
447 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
448 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
449 struct be_port_rxf_stats_v2 *port_stats =
450 &rxf_stats->port[adapter->port_num];
451 struct be_drv_stats *drvs = &adapter->drv_stats;
452
453 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
454 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
455 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
456 drvs->rx_pause_frames = port_stats->rx_pause_frames;
457 drvs->rx_crc_errors = port_stats->rx_crc_errors;
458 drvs->rx_control_frames = port_stats->rx_control_frames;
459 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
460 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
461 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
462 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
463 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
464 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
465 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
466 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
467 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
468 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
469 drvs->rx_dropped_header_too_small =
470 port_stats->rx_dropped_header_too_small;
471 drvs->rx_input_fifo_overflow_drop =
472 port_stats->rx_input_fifo_overflow_drop;
473 drvs->rx_address_filtered = port_stats->rx_address_filtered;
474 drvs->rx_alignment_symbol_errors =
475 port_stats->rx_alignment_symbol_errors;
476 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
477 drvs->tx_pauseframes = port_stats->tx_pauseframes;
478 drvs->tx_controlframes = port_stats->tx_controlframes;
479 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
480 drvs->jabber_events = port_stats->jabber_events;
481 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
482 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
483 drvs->forwarded_packets = rxf_stats->forwarded_packets;
484 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
485 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
486 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
487 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530488 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500489 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
490 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
491 drvs->rx_roce_frames = port_stats->roce_frames_received;
492 drvs->roce_drops_crc = port_stats->roce_drops_crc;
493 drvs->roce_drops_payload_len =
494 port_stats->roce_drops_payload_len;
495 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500496}
497
Selvin Xavier005d5692011-05-16 07:36:35 +0000498static void populate_lancer_stats(struct be_adapter *adapter)
499{
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530501 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000502
503 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
504 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
505 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
506 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000507 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000508 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000509 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
510 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
511 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
512 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
513 drvs->rx_dropped_tcp_length =
514 pport_stats->rx_dropped_invalid_tcp_length;
515 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
516 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
517 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
518 drvs->rx_dropped_header_too_small =
519 pport_stats->rx_dropped_header_too_small;
520 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000521 drvs->rx_address_filtered =
522 pport_stats->rx_address_filtered +
523 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
527 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000528 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000529 drvs->forwarded_packets = pport_stats->num_forwards_lo;
530 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000533}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000534
Sathya Perla09c1c682011-08-22 19:41:53 +0000535static void accumulate_16bit_val(u32 *acc, u16 val)
536{
537#define lo(x) (x & 0xFFFF)
538#define hi(x) (x & 0xFFFF0000)
539 bool wrapped = val < lo(*acc);
540 u32 newacc = hi(*acc) + val;
541
542 if (wrapped)
543 newacc += 65536;
544 ACCESS_ONCE(*acc) = newacc;
545}
546
Jingoo Han4188e7d2013-08-05 18:02:02 +0900547static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530548 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000549{
550 if (!BEx_chip(adapter))
551 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
552 else
553 /* below erx HW counter can actually wrap around after
554 * 65535. Driver accumulates a 32-bit value
555 */
556 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
557 (u16)erx_stat);
558}
559
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000560void be_parse_stats(struct be_adapter *adapter)
561{
Ajit Khaparde61000862013-10-03 16:16:33 -0500562 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000563 struct be_rx_obj *rxo;
564 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000565 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000566
Sathya Perlaca34fe32012-11-06 17:48:56 +0000567 if (lancer_chip(adapter)) {
568 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000569 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 if (BE2_chip(adapter))
571 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 else if (BE3_chip(adapter))
573 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500575 else
576 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000577
Ajit Khaparde61000862013-10-03 16:16:33 -0500578 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000579 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000580 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
581 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000582 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000583 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584}
585
Sathya Perlaab1594e2011-07-25 19:10:15 +0000586static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530587 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000592 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 u64 pkts, bytes;
594 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700595 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596
Sathya Perla3abcded2010-10-03 22:12:27 -0700597 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530599
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700601 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000602 pkts = rx_stats(rxo)->rx_pkts;
603 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700604 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000605 stats->rx_packets += pkts;
606 stats->rx_bytes += bytes;
607 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
608 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
609 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700610 }
611
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530614
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700616 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 pkts = tx_stats(txo)->tx_pkts;
618 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700619 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->tx_packets += pkts;
621 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000622 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623
624 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000626 drvs->rx_alignment_symbol_errors +
627 drvs->rx_in_range_errors +
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long +
630 drvs->rx_dropped_too_small +
631 drvs->rx_dropped_too_short +
632 drvs->rx_dropped_header_too_small +
633 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000640
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642
643 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 /* receiver fifo overrun */
647 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000648 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000649 drvs->rx_input_fifo_overflow_drop +
650 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652}
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 struct net_device *netdev = adapter->netdev;
657
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000659 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000662
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530663 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664 netif_carrier_on(netdev);
665 else
666 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200667
668 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669}
670
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672{
Sathya Perla3c8def92011-06-12 20:01:58 +0000673 struct be_tx_stats *stats = tx_stats(txo);
674
Sathya Perlaab1594e2011-07-25 19:10:15 +0000675 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000676 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500677 stats->tx_bytes += skb->len;
678 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000679 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500682/* Returns number of WRBs needed for the skb */
683static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500685 /* +1 for the header wrb */
686 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687}
688
689static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
690{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500691 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
692 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
693 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
694 wrb->rsvd0 = 0;
695}
696
697/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
698 * to avoid the swap and shift/mask operations in wrb_fill().
699 */
700static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
701{
702 wrb->frag_pa_hi = 0;
703 wrb->frag_pa_lo = 0;
704 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000705 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706}
707
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000708static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530709 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710{
711 u8 vlan_prio;
712 u16 vlan_tag;
713
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100714 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000715 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
716 /* If vlan priority provided by OS is NOT in available bmap */
717 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
718 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
719 adapter->recommended_prio;
720
721 return vlan_tag;
722}
723
Sathya Perlac9c47142014-03-27 10:46:19 +0530724/* Used only for IP tunnel packets */
725static u16 skb_inner_ip_proto(struct sk_buff *skb)
726{
727 return (inner_ip_hdr(skb)->version == 4) ?
728 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
729}
730
731static u16 skb_ip_proto(struct sk_buff *skb)
732{
733 return (ip_hdr(skb)->version == 4) ?
734 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
735}
736
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530737static inline bool be_is_txq_full(struct be_tx_obj *txo)
738{
739 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
740}
741
742static inline bool be_can_txq_wake(struct be_tx_obj *txo)
743{
744 return atomic_read(&txo->q.used) < txo->q.len / 2;
745}
746
747static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
748{
749 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
750}
751
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530752static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
753 struct sk_buff *skb,
754 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530756 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000758 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530759 BE_WRB_F_SET(wrb_params->features, LSO, 1);
760 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000761 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530764 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530766 proto = skb_inner_ip_proto(skb);
767 } else {
768 proto = skb_ip_proto(skb);
769 }
770 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530772 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530773 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 }
775
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100776 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
778 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 }
780
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530781 BE_WRB_F_SET(wrb_params->features, CRC, 1);
782}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500783
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530784static void wrb_fill_hdr(struct be_adapter *adapter,
785 struct be_eth_hdr_wrb *hdr,
786 struct be_wrb_params *wrb_params,
787 struct sk_buff *skb)
788{
789 memset(hdr, 0, sizeof(*hdr));
790
791 SET_TX_WRB_HDR_BITS(crc, hdr,
792 BE_WRB_F_GET(wrb_params->features, CRC));
793 SET_TX_WRB_HDR_BITS(ipcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, IPCS));
795 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
796 BE_WRB_F_GET(wrb_params->features, TCPCS));
797 SET_TX_WRB_HDR_BITS(udpcs, hdr,
798 BE_WRB_F_GET(wrb_params->features, UDPCS));
799
800 SET_TX_WRB_HDR_BITS(lso, hdr,
801 BE_WRB_F_GET(wrb_params->features, LSO));
802 SET_TX_WRB_HDR_BITS(lso6, hdr,
803 BE_WRB_F_GET(wrb_params->features, LSO6));
804 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
805
806 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
807 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500808 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530809 SET_TX_WRB_HDR_BITS(event, hdr,
810 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
811 SET_TX_WRB_HDR_BITS(vlan, hdr,
812 BE_WRB_F_GET(wrb_params->features, VLAN));
813 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
814
815 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
816 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817}
818
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000819static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530820 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000821{
822 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500823 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000824
Sathya Perla7101e112010-03-22 20:41:12 +0000825
Sathya Perlaf986afc2015-02-06 08:18:43 -0500826 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
827 (u64)le32_to_cpu(wrb->frag_pa_lo);
828 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000829 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500830 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000831 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500832 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 }
834}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530836/* Grab a WRB header for xmit */
837static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530839 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530841 queue_head_inc(&txo->q);
842 return head;
843}
844
845/* Set up the WRB header for xmit */
846static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
847 struct be_tx_obj *txo,
848 struct be_wrb_params *wrb_params,
849 struct sk_buff *skb, u16 head)
850{
851 u32 num_frags = skb_wrb_cnt(skb);
852 struct be_queue_info *txq = &txo->q;
853 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
854
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530855 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500856 be_dws_cpu_to_le(hdr, sizeof(*hdr));
857
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500858 BUG_ON(txo->sent_skb_list[head]);
859 txo->sent_skb_list[head] = skb;
860 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530861 atomic_add(num_frags, &txq->used);
862 txo->last_req_wrb_cnt = num_frags;
863 txo->pend_wrb_cnt += num_frags;
864}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530866/* Setup a WRB fragment (buffer descriptor) for xmit */
867static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
868 int len)
869{
870 struct be_eth_wrb *wrb;
871 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530873 wrb = queue_head_node(txq);
874 wrb_fill(wrb, busaddr, len);
875 queue_head_inc(txq);
876}
877
878/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
879 * was invoked. The producer index is restored to the previous packet and the
880 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
881 */
882static void be_xmit_restore(struct be_adapter *adapter,
883 struct be_tx_obj *txo, u16 head, bool map_single,
884 u32 copied)
885{
886 struct device *dev;
887 struct be_eth_wrb *wrb;
888 struct be_queue_info *txq = &txo->q;
889
890 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500891 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530892
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500893 /* skip the first wrb (hdr); it's not mapped */
894 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000895 while (copied) {
896 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000897 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000898 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500899 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000900 queue_head_inc(txq);
901 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530902
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500903 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530904}
905
906/* Enqueue the given packet for transmit. This routine allocates WRBs for the
907 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
908 * of WRBs used up by the packet.
909 */
910static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
911 struct sk_buff *skb,
912 struct be_wrb_params *wrb_params)
913{
914 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
915 struct device *dev = &adapter->pdev->dev;
916 struct be_queue_info *txq = &txo->q;
917 bool map_single = false;
918 u16 head = txq->head;
919 dma_addr_t busaddr;
920 int len;
921
922 head = be_tx_get_wrb_hdr(txo);
923
924 if (skb->len > skb->data_len) {
925 len = skb_headlen(skb);
926
927 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
928 if (dma_mapping_error(dev, busaddr))
929 goto dma_err;
930 map_single = true;
931 be_tx_setup_wrb_frag(txo, busaddr, len);
932 copied += len;
933 }
934
935 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
936 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
937 len = skb_frag_size(frag);
938
939 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
940 if (dma_mapping_error(dev, busaddr))
941 goto dma_err;
942 be_tx_setup_wrb_frag(txo, busaddr, len);
943 copied += len;
944 }
945
946 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
947
948 be_tx_stats_update(txo, skb);
949 return wrb_cnt;
950
951dma_err:
952 adapter->drv_stats.dma_map_errors++;
953 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000954 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700955}
956
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500957static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
958{
959 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
960}
961
Somnath Kotur93040ae2012-06-26 22:32:10 +0000962static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000963 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530964 struct be_wrb_params
965 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000966{
967 u16 vlan_tag = 0;
968
969 skb = skb_share_check(skb, GFP_ATOMIC);
970 if (unlikely(!skb))
971 return skb;
972
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100973 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000974 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530975
976 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
977 if (!vlan_tag)
978 vlan_tag = adapter->pvid;
979 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
980 * skip VLAN insertion
981 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530982 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530983 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000984
985 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100986 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
987 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000988 if (unlikely(!skb))
989 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000990 skb->vlan_tci = 0;
991 }
992
993 /* Insert the outer VLAN, if any */
994 if (adapter->qnq_vid) {
995 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100996 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
997 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000998 if (unlikely(!skb))
999 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301000 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001001 }
1002
Somnath Kotur93040ae2012-06-26 22:32:10 +00001003 return skb;
1004}
1005
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001006static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1007{
1008 struct ethhdr *eh = (struct ethhdr *)skb->data;
1009 u16 offset = ETH_HLEN;
1010
1011 if (eh->h_proto == htons(ETH_P_IPV6)) {
1012 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1013
1014 offset += sizeof(struct ipv6hdr);
1015 if (ip6h->nexthdr != NEXTHDR_TCP &&
1016 ip6h->nexthdr != NEXTHDR_UDP) {
1017 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301018 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001019
1020 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1021 if (ehdr->hdrlen == 0xff)
1022 return true;
1023 }
1024 }
1025 return false;
1026}
1027
1028static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1029{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001030 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001031}
1032
Sathya Perla748b5392014-05-09 13:29:13 +05301033static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001035 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001036}
1037
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301038static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1039 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301040 struct be_wrb_params
1041 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001043 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001044 unsigned int eth_hdr_len;
1045 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001046
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001047 /* For padded packets, BE HW modifies tot_len field in IP header
1048 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001049 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001050 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001051 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1052 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001053 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001054 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001055 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001056 ip = (struct iphdr *)ip_hdr(skb);
1057 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1058 }
1059
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001060 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301061 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001062 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301063 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001064 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301065 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001066
Somnath Kotur93040ae2012-06-26 22:32:10 +00001067 /* HW has a bug wherein it will calculate CSUM for VLAN
1068 * pkts even though it is disabled.
1069 * Manually insert VLAN in pkt.
1070 */
1071 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001072 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301073 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001074 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301075 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001076 }
1077
1078 /* HW may lockup when VLAN HW tagging is requested on
1079 * certain ipv6 packets. Drop such pkts if the HW workaround to
1080 * skip HW tagging is not enabled by FW.
1081 */
1082 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301083 (adapter->pvid || adapter->qnq_vid) &&
1084 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001085 goto tx_drop;
1086
1087 /* Manual VLAN tag insertion to prevent:
1088 * ASIC lockup when the ASIC inserts VLAN tag into
1089 * certain ipv6 packets. Insert VLAN tags in driver,
1090 * and set event, completion, vlan bits accordingly
1091 * in the Tx WRB.
1092 */
1093 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1094 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301095 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001096 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301097 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001098 }
1099
Sathya Perlaee9c7992013-05-22 23:04:55 +00001100 return skb;
1101tx_drop:
1102 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301103err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001104 return NULL;
1105}
1106
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301107static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1108 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301109 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301110{
1111 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1112 * less may cause a transmit stall on that port. So the work-around is
1113 * to pad short packets (<= 32 bytes) to a 36-byte length.
1114 */
1115 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001116 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301117 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301118 }
1119
1120 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301121 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301122 if (!skb)
1123 return NULL;
1124 }
1125
1126 return skb;
1127}
1128
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001129static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1130{
1131 struct be_queue_info *txq = &txo->q;
1132 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1133
1134 /* Mark the last request eventable if it hasn't been marked already */
1135 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1136 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1137
1138 /* compose a dummy wrb if there are odd set of wrbs to notify */
1139 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001140 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001141 queue_head_inc(txq);
1142 atomic_inc(&txq->used);
1143 txo->pend_wrb_cnt++;
1144 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1145 TX_HDR_WRB_NUM_SHIFT);
1146 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1147 TX_HDR_WRB_NUM_SHIFT);
1148 }
1149 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1150 txo->pend_wrb_cnt = 0;
1151}
1152
Sathya Perlaee9c7992013-05-22 23:04:55 +00001153static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1154{
1155 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001156 u16 q_idx = skb_get_queue_mapping(skb);
1157 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301158 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301159 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001160 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001161
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301162 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001163 if (unlikely(!skb))
1164 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001165
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301166 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1167
1168 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001169 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001170 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001173
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301174 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001175 netif_stop_subqueue(netdev, q_idx);
1176 tx_stats(txo)->tx_stops++;
1177 }
1178
1179 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1180 be_xmit_flush(adapter, txo);
1181
1182 return NETDEV_TX_OK;
1183drop:
1184 tx_stats(txo)->tx_drv_drops++;
1185 /* Flush the already enqueued tx requests */
1186 if (flush && txo->pend_wrb_cnt)
1187 be_xmit_flush(adapter, txo);
1188
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189 return NETDEV_TX_OK;
1190}
1191
1192static int be_change_mtu(struct net_device *netdev, int new_mtu)
1193{
1194 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301195 struct device *dev = &adapter->pdev->dev;
1196
1197 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1198 dev_info(dev, "MTU must be between %d and %d bytes\n",
1199 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 return -EINVAL;
1201 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301202
1203 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301204 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 netdev->mtu = new_mtu;
1206 return 0;
1207}
1208
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001209static inline bool be_in_all_promisc(struct be_adapter *adapter)
1210{
1211 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1212 BE_IF_FLAGS_ALL_PROMISCUOUS;
1213}
1214
1215static int be_set_vlan_promisc(struct be_adapter *adapter)
1216{
1217 struct device *dev = &adapter->pdev->dev;
1218 int status;
1219
1220 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1221 return 0;
1222
1223 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1224 if (!status) {
1225 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1226 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1227 } else {
1228 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1229 }
1230 return status;
1231}
1232
1233static int be_clear_vlan_promisc(struct be_adapter *adapter)
1234{
1235 struct device *dev = &adapter->pdev->dev;
1236 int status;
1237
1238 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1239 if (!status) {
1240 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1241 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1242 }
1243 return status;
1244}
1245
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001247 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1248 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 */
Sathya Perla10329df2012-06-05 19:37:18 +00001250static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251{
Vasundhara Volam50762662014-09-12 17:39:14 +05301252 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001253 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301254 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001255 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001256
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001257 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001258 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001259 return 0;
1260
Sathya Perla92bf14a2013-08-27 16:57:32 +05301261 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001262 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001263
1264 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301265 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1266 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001267
Vasundhara Volam435452a2015-03-20 06:28:23 -04001268 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001269 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001270 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001271 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001272 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1273 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301274 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001275 return be_set_vlan_promisc(adapter);
1276 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1277 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001279 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280}
1281
Patrick McHardy80d5c362013-04-19 02:04:28 +00001282static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283{
1284 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001285 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001287 /* Packets with VID 0 are always received by Lancer by default */
1288 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301289 return status;
1290
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301291 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301292 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001293
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301294 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301295 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001296
Somnath Kotura6b74e02014-01-21 15:50:55 +05301297 status = be_vid_config(adapter);
1298 if (status) {
1299 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301300 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301301 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301302
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001303 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304}
1305
Patrick McHardy80d5c362013-04-19 02:04:28 +00001306static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307{
1308 struct be_adapter *adapter = netdev_priv(netdev);
1309
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001310 /* Packets with VID 0 are always received by Lancer by default */
1311 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301312 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001313
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301314 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301315 adapter->vlans_added--;
1316
1317 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318}
1319
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001320static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301321{
Sathya Perlaac34b742015-02-06 08:18:40 -05001322 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001323 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1324}
1325
1326static void be_set_all_promisc(struct be_adapter *adapter)
1327{
1328 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1329 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1330}
1331
1332static void be_set_mc_promisc(struct be_adapter *adapter)
1333{
1334 int status;
1335
1336 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1337 return;
1338
1339 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1340 if (!status)
1341 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1342}
1343
1344static void be_set_mc_list(struct be_adapter *adapter)
1345{
1346 int status;
1347
1348 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1349 if (!status)
1350 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1351 else
1352 be_set_mc_promisc(adapter);
1353}
1354
1355static void be_set_uc_list(struct be_adapter *adapter)
1356{
1357 struct netdev_hw_addr *ha;
1358 int i = 1; /* First slot is claimed by the Primary MAC */
1359
1360 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1361 be_cmd_pmac_del(adapter, adapter->if_handle,
1362 adapter->pmac_id[i], 0);
1363
1364 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1365 be_set_all_promisc(adapter);
1366 return;
1367 }
1368
1369 netdev_for_each_uc_addr(ha, adapter->netdev) {
1370 adapter->uc_macs++; /* First slot is for Primary MAC */
1371 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1372 &adapter->pmac_id[adapter->uc_macs], 0);
1373 }
1374}
1375
1376static void be_clear_uc_list(struct be_adapter *adapter)
1377{
1378 int i;
1379
1380 for (i = 1; i < (adapter->uc_macs + 1); i++)
1381 be_cmd_pmac_del(adapter, adapter->if_handle,
1382 adapter->pmac_id[i], 0);
1383 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301384}
1385
Sathya Perlaa54769f2011-10-24 02:45:00 +00001386static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387{
1388 struct be_adapter *adapter = netdev_priv(netdev);
1389
1390 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001391 be_set_all_promisc(adapter);
1392 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001394
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001395 /* Interface was previously in promiscuous mode; disable it */
1396 if (be_in_all_promisc(adapter)) {
1397 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001398 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001399 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001400 }
1401
Sathya Perlae7b909a2009-11-22 22:01:10 +00001402 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001403 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001404 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1405 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301406 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001407 }
Kalesh APa0794882014-05-30 19:06:23 +05301408
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001409 if (netdev_uc_count(netdev) != adapter->uc_macs)
1410 be_set_uc_list(adapter);
1411
1412 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413}
1414
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001415static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1416{
1417 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001418 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001419 int status;
1420
Sathya Perla11ac75e2011-12-13 00:58:50 +00001421 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001422 return -EPERM;
1423
Sathya Perla11ac75e2011-12-13 00:58:50 +00001424 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001425 return -EINVAL;
1426
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301427 /* Proceed further only if user provided MAC is different
1428 * from active MAC
1429 */
1430 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1431 return 0;
1432
Sathya Perla3175d8c2013-07-23 15:25:03 +05301433 if (BEx_chip(adapter)) {
1434 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1435 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001436
Sathya Perla11ac75e2011-12-13 00:58:50 +00001437 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1438 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301439 } else {
1440 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1441 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001442 }
1443
Kalesh APabccf232014-07-17 16:20:24 +05301444 if (status) {
1445 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1446 mac, vf, status);
1447 return be_cmd_status(status);
1448 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001449
Kalesh APabccf232014-07-17 16:20:24 +05301450 ether_addr_copy(vf_cfg->mac_addr, mac);
1451
1452 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001453}
1454
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001455static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301456 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001457{
1458 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001459 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001460
Sathya Perla11ac75e2011-12-13 00:58:50 +00001461 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001462 return -EPERM;
1463
Sathya Perla11ac75e2011-12-13 00:58:50 +00001464 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001465 return -EINVAL;
1466
1467 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001468 vi->max_tx_rate = vf_cfg->tx_rate;
1469 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001470 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1471 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001472 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301473 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001474 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001475
1476 return 0;
1477}
1478
Vasundhara Volam435452a2015-03-20 06:28:23 -04001479static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1480{
1481 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1482 u16 vids[BE_NUM_VLANS_SUPPORTED];
1483 int vf_if_id = vf_cfg->if_handle;
1484 int status;
1485
1486 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001487 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001488 if (status)
1489 return status;
1490
1491 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1492 vids[0] = 0;
1493 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1494 if (!status)
1495 dev_info(&adapter->pdev->dev,
1496 "Cleared guest VLANs on VF%d", vf);
1497
1498 /* After TVT is enabled, disallow VFs to program VLAN filters */
1499 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1500 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1501 ~BE_PRIV_FILTMGMT, vf + 1);
1502 if (!status)
1503 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1504 }
1505 return 0;
1506}
1507
1508static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1509{
1510 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1511 struct device *dev = &adapter->pdev->dev;
1512 int status;
1513
1514 /* Reset Transparent VLAN Tagging. */
1515 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001516 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001517 if (status)
1518 return status;
1519
1520 /* Allow VFs to program VLAN filtering */
1521 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1522 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1523 BE_PRIV_FILTMGMT, vf + 1);
1524 if (!status) {
1525 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1526 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1527 }
1528 }
1529
1530 dev_info(dev,
1531 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1532 return 0;
1533}
1534
Sathya Perla748b5392014-05-09 13:29:13 +05301535static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001536{
1537 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001538 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001539 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001540
Sathya Perla11ac75e2011-12-13 00:58:50 +00001541 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001542 return -EPERM;
1543
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001544 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001545 return -EINVAL;
1546
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001547 if (vlan || qos) {
1548 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001549 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001550 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001551 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001552 }
1553
Kalesh APabccf232014-07-17 16:20:24 +05301554 if (status) {
1555 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001556 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1557 status);
Kalesh APabccf232014-07-17 16:20:24 +05301558 return be_cmd_status(status);
1559 }
1560
1561 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301562 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001563}
1564
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001565static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1566 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001567{
1568 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301569 struct device *dev = &adapter->pdev->dev;
1570 int percent_rate, status = 0;
1571 u16 link_speed = 0;
1572 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001573
Sathya Perla11ac75e2011-12-13 00:58:50 +00001574 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001575 return -EPERM;
1576
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001577 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001578 return -EINVAL;
1579
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001580 if (min_tx_rate)
1581 return -EINVAL;
1582
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301583 if (!max_tx_rate)
1584 goto config_qos;
1585
1586 status = be_cmd_link_status_query(adapter, &link_speed,
1587 &link_status, 0);
1588 if (status)
1589 goto err;
1590
1591 if (!link_status) {
1592 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301593 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301594 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001595 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001596
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301597 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1598 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1599 link_speed);
1600 status = -EINVAL;
1601 goto err;
1602 }
1603
1604 /* On Skyhawk the QOS setting must be done only as a % value */
1605 percent_rate = link_speed / 100;
1606 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1607 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1608 percent_rate);
1609 status = -EINVAL;
1610 goto err;
1611 }
1612
1613config_qos:
1614 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001615 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301616 goto err;
1617
1618 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1619 return 0;
1620
1621err:
1622 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1623 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301624 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001625}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301626
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301627static int be_set_vf_link_state(struct net_device *netdev, int vf,
1628 int link_state)
1629{
1630 struct be_adapter *adapter = netdev_priv(netdev);
1631 int status;
1632
1633 if (!sriov_enabled(adapter))
1634 return -EPERM;
1635
1636 if (vf >= adapter->num_vfs)
1637 return -EINVAL;
1638
1639 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301640 if (status) {
1641 dev_err(&adapter->pdev->dev,
1642 "Link state change on VF %d failed: %#x\n", vf, status);
1643 return be_cmd_status(status);
1644 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301645
Kalesh APabccf232014-07-17 16:20:24 +05301646 adapter->vf_cfg[vf].plink_tracking = link_state;
1647
1648 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301649}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001650
Kalesh APe7bcbd72015-05-06 05:30:32 -04001651static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1652{
1653 struct be_adapter *adapter = netdev_priv(netdev);
1654 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1655 u8 spoofchk;
1656 int status;
1657
1658 if (!sriov_enabled(adapter))
1659 return -EPERM;
1660
1661 if (vf >= adapter->num_vfs)
1662 return -EINVAL;
1663
1664 if (BEx_chip(adapter))
1665 return -EOPNOTSUPP;
1666
1667 if (enable == vf_cfg->spoofchk)
1668 return 0;
1669
1670 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1671
1672 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1673 0, spoofchk);
1674 if (status) {
1675 dev_err(&adapter->pdev->dev,
1676 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1677 return be_cmd_status(status);
1678 }
1679
1680 vf_cfg->spoofchk = enable;
1681 return 0;
1682}
1683
Sathya Perla2632baf2013-10-01 16:00:00 +05301684static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1685 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686{
Sathya Perla2632baf2013-10-01 16:00:00 +05301687 aic->rx_pkts_prev = rx_pkts;
1688 aic->tx_reqs_prev = tx_pkts;
1689 aic->jiffies = now;
1690}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001691
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001692static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301693{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001694 struct be_adapter *adapter = eqo->adapter;
1695 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301696 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301697 struct be_rx_obj *rxo;
1698 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001699 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301700 ulong now;
1701 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001702 int i;
1703
1704 aic = &adapter->aic_obj[eqo->idx];
1705 if (!aic->enable) {
1706 if (aic->jiffies)
1707 aic->jiffies = 0;
1708 eqd = aic->et_eqd;
1709 return eqd;
1710 }
1711
1712 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1713 do {
1714 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1715 rx_pkts += rxo->stats.rx_pkts;
1716 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1717 }
1718
1719 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1720 do {
1721 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1722 tx_pkts += txo->stats.tx_reqs;
1723 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1724 }
1725
1726 /* Skip, if wrapped around or first calculation */
1727 now = jiffies;
1728 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1729 rx_pkts < aic->rx_pkts_prev ||
1730 tx_pkts < aic->tx_reqs_prev) {
1731 be_aic_update(aic, rx_pkts, tx_pkts, now);
1732 return aic->prev_eqd;
1733 }
1734
1735 delta = jiffies_to_msecs(now - aic->jiffies);
1736 if (delta == 0)
1737 return aic->prev_eqd;
1738
1739 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1740 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1741 eqd = (pps / 15000) << 2;
1742
1743 if (eqd < 8)
1744 eqd = 0;
1745 eqd = min_t(u32, eqd, aic->max_eqd);
1746 eqd = max_t(u32, eqd, aic->min_eqd);
1747
1748 be_aic_update(aic, rx_pkts, tx_pkts, now);
1749
1750 return eqd;
1751}
1752
1753/* For Skyhawk-R only */
1754static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1755{
1756 struct be_adapter *adapter = eqo->adapter;
1757 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1758 ulong now = jiffies;
1759 int eqd;
1760 u32 mult_enc;
1761
1762 if (!aic->enable)
1763 return 0;
1764
1765 if (time_before_eq(now, aic->jiffies) ||
1766 jiffies_to_msecs(now - aic->jiffies) < 1)
1767 eqd = aic->prev_eqd;
1768 else
1769 eqd = be_get_new_eqd(eqo);
1770
1771 if (eqd > 100)
1772 mult_enc = R2I_DLY_ENC_1;
1773 else if (eqd > 60)
1774 mult_enc = R2I_DLY_ENC_2;
1775 else if (eqd > 20)
1776 mult_enc = R2I_DLY_ENC_3;
1777 else
1778 mult_enc = R2I_DLY_ENC_0;
1779
1780 aic->prev_eqd = eqd;
1781
1782 return mult_enc;
1783}
1784
1785void be_eqd_update(struct be_adapter *adapter, bool force_update)
1786{
1787 struct be_set_eqd set_eqd[MAX_EVT_QS];
1788 struct be_aic_obj *aic;
1789 struct be_eq_obj *eqo;
1790 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001791
Sathya Perla2632baf2013-10-01 16:00:00 +05301792 for_all_evt_queues(adapter, eqo, i) {
1793 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001794 eqd = be_get_new_eqd(eqo);
1795 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301796 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1797 set_eqd[num].eq_id = eqo->q.id;
1798 aic->prev_eqd = eqd;
1799 num++;
1800 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001801 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301802
1803 if (num)
1804 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001805}
1806
Sathya Perla3abcded2010-10-03 22:12:27 -07001807static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301808 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001809{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001810 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001811
Sathya Perlaab1594e2011-07-25 19:10:15 +00001812 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001813 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001814 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001815 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001816 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001817 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001818 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001819 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001820 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821}
1822
Sathya Perla2e588f82011-03-11 02:49:26 +00001823static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001824{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001825 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301826 * Also ignore ipcksm for ipv6 pkts
1827 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001828 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301829 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001830}
1831
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301832static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001834 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001836 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301837 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838
Sathya Perla3abcded2010-10-03 22:12:27 -07001839 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840 BUG_ON(!rx_page_info->page);
1841
Sathya Perlae50287b2014-03-04 12:14:38 +05301842 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001843 dma_unmap_page(&adapter->pdev->dev,
1844 dma_unmap_addr(rx_page_info, bus),
1845 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301846 rx_page_info->last_frag = false;
1847 } else {
1848 dma_sync_single_for_cpu(&adapter->pdev->dev,
1849 dma_unmap_addr(rx_page_info, bus),
1850 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001851 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301853 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 atomic_dec(&rxq->used);
1855 return rx_page_info;
1856}
1857
1858/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001859static void be_rx_compl_discard(struct be_rx_obj *rxo,
1860 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001863 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001865 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301866 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001867 put_page(page_info->page);
1868 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 }
1870}
1871
1872/*
1873 * skb_fill_rx_data forms a complete skb for an ether frame
1874 * indicated by rxcp.
1875 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001876static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1877 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001880 u16 i, j;
1881 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 u8 *start;
1883
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301884 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885 start = page_address(page_info->page) + page_info->page_offset;
1886 prefetch(start);
1887
1888 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001889 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891 skb->len = curr_frag_len;
1892 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001893 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894 /* Complete packet has now been moved to data */
1895 put_page(page_info->page);
1896 skb->data_len = 0;
1897 skb->tail += curr_frag_len;
1898 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001899 hdr_len = ETH_HLEN;
1900 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001902 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 skb_shinfo(skb)->frags[0].page_offset =
1904 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301905 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1906 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001908 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 skb->tail += hdr_len;
1910 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001911 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912
Sathya Perla2e588f82011-03-11 02:49:26 +00001913 if (rxcp->pkt_size <= rx_frag_size) {
1914 BUG_ON(rxcp->num_rcvd != 1);
1915 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 }
1917
1918 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001919 remaining = rxcp->pkt_size - curr_frag_len;
1920 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301921 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001922 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001924 /* Coalesce all frags from the same physical page in one slot */
1925 if (page_info->page_offset == 0) {
1926 /* Fresh page */
1927 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001928 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001929 skb_shinfo(skb)->frags[j].page_offset =
1930 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001931 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001932 skb_shinfo(skb)->nr_frags++;
1933 } else {
1934 put_page(page_info->page);
1935 }
1936
Eric Dumazet9e903e02011-10-18 21:00:24 +00001937 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938 skb->len += curr_frag_len;
1939 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001940 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001941 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001942 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001944 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945}
1946
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001947/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301948static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001951 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001952 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001954
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001955 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001956 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001957 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001958 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 return;
1960 }
1961
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001964 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001965 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001966 else
1967 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001969 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001970 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001971 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001972 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301973
Tom Herbertb6c0e892014-08-27 21:27:17 -07001974 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301975 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Jiri Pirko343e43c2011-08-25 02:50:51 +00001977 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001978 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001979
1980 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981}
1982
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001983/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001984static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1985 struct napi_struct *napi,
1986 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001990 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001991 u16 remaining, curr_frag_len;
1992 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001993
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001994 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001995 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001997 return;
1998 }
1999
Sathya Perla2e588f82011-03-11 02:49:26 +00002000 remaining = rxcp->pkt_size;
2001 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302002 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
2004 curr_frag_len = min(remaining, rx_frag_size);
2005
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002006 /* Coalesce all frags from the same physical page in one slot */
2007 if (i == 0 || page_info->page_offset == 0) {
2008 /* First frag or Fresh page */
2009 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002010 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002011 skb_shinfo(skb)->frags[j].page_offset =
2012 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002013 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002014 } else {
2015 put_page(page_info->page);
2016 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002017 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002018 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 memset(page_info, 0, sizeof(*page_info));
2021 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002022 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002024 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002025 skb->len = rxcp->pkt_size;
2026 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002027 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002028 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002029 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002030 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302031
Tom Herbertb6c0e892014-08-27 21:27:17 -07002032 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302033 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002034
Jiri Pirko343e43c2011-08-25 02:50:51 +00002035 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002036 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002037
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039}
2040
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2042 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302044 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2045 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2046 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2047 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2048 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2049 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2050 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2051 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2052 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2053 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2054 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002055 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302056 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2057 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002058 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302059 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302060 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302061 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002062}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002064static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2065 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002066{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302067 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2068 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2069 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2070 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2071 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2072 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2073 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2074 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2075 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2076 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2077 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002078 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302079 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2080 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002081 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302082 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2083 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002084}
2085
2086static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2087{
2088 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2089 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2090 struct be_adapter *adapter = rxo->adapter;
2091
2092 /* For checking the valid bit it is Ok to use either definition as the
2093 * valid bit is at the same position in both v0 and v1 Rx compl */
2094 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002095 return NULL;
2096
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002097 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002098 be_dws_le_to_cpu(compl, sizeof(*compl));
2099
2100 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002102 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002104
Somnath Koture38b1702013-05-29 22:55:56 +00002105 if (rxcp->ip_frag)
2106 rxcp->l4_csum = 0;
2107
Sathya Perla15d72182011-03-21 20:49:26 +00002108 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302109 /* In QNQ modes, if qnq bit is not set, then the packet was
2110 * tagged only with the transparent outer vlan-tag and must
2111 * not be treated as a vlan packet by host
2112 */
2113 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002114 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002115
Sathya Perla15d72182011-03-21 20:49:26 +00002116 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002117 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002118
Somnath Kotur939cf302011-08-18 21:51:49 -07002119 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302120 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002121 rxcp->vlanf = 0;
2122 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002123
2124 /* As the compl has been parsed, reset it; we wont touch it again */
2125 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126
Sathya Perla3abcded2010-10-03 22:12:27 -07002127 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128 return rxcp;
2129}
2130
Eric Dumazet1829b082011-03-01 05:48:12 +00002131static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002134
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002136 gfp |= __GFP_COMP;
2137 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138}
2139
2140/*
2141 * Allocate a page, split it to fragments of size rx_frag_size and post as
2142 * receive buffers to BE
2143 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302144static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002145{
Sathya Perla3abcded2010-10-03 22:12:27 -07002146 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002147 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002148 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002150 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151 struct be_eth_rx_d *rxd;
2152 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302153 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154
Sathya Perla3abcded2010-10-03 22:12:27 -07002155 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302156 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002158 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002160 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161 break;
2162 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002163 page_dmaaddr = dma_map_page(dev, pagep, 0,
2164 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002165 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002166 if (dma_mapping_error(dev, page_dmaaddr)) {
2167 put_page(pagep);
2168 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302169 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002170 break;
2171 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302172 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173 } else {
2174 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302175 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302177 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179
2180 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302181 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2183 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184
2185 /* Any space left in the current big page for another frag? */
2186 if ((page_offset + rx_frag_size + rx_frag_size) >
2187 adapter->big_page_size) {
2188 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302189 page_info->last_frag = true;
2190 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2191 } else {
2192 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002194
2195 prev_page_info = page_info;
2196 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302199
2200 /* Mark the last frag of a page when we break out of the above loop
2201 * with no more slots available in the RXQ
2202 */
2203 if (pagep) {
2204 prev_page_info->last_frag = true;
2205 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2206 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207
2208 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302210 if (rxo->rx_post_starved)
2211 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302212 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002213 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302214 be_rxq_notify(adapter, rxq->id, notify);
2215 posted -= notify;
2216 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002217 } else if (atomic_read(&rxq->used) == 0) {
2218 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221}
2222
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302223static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302225 struct be_queue_info *tx_cq = &txo->cq;
2226 struct be_tx_compl_info *txcp = &txo->txcp;
2227 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302229 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 return NULL;
2231
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302232 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002233 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302234 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302236 txcp->status = GET_TX_COMPL_BITS(status, compl);
2237 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302239 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240 queue_tail_inc(tx_cq);
2241 return txcp;
2242}
2243
Sathya Perla3c8def92011-06-12 20:01:58 +00002244static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302245 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246{
Sathya Perla3c8def92011-06-12 20:01:58 +00002247 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002248 struct be_queue_info *txq = &txo->q;
2249 u16 frag_index, num_wrbs = 0;
2250 struct sk_buff *skb = NULL;
2251 bool unmap_skb_hdr = false;
2252 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002254 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002255 if (sent_skbs[txq->tail]) {
2256 /* Free skb from prev req */
2257 if (skb)
2258 dev_consume_skb_any(skb);
2259 skb = sent_skbs[txq->tail];
2260 sent_skbs[txq->tail] = NULL;
2261 queue_tail_inc(txq); /* skip hdr wrb */
2262 num_wrbs++;
2263 unmap_skb_hdr = true;
2264 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002265 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002266 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002267 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002268 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002269 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002271 num_wrbs++;
2272 } while (frag_index != last_index);
2273 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002275 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276}
2277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278/* Return the number of events in the event queue */
2279static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002280{
2281 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002283
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 do {
2285 eqe = queue_tail_node(&eqo->q);
2286 if (eqe->evt == 0)
2287 break;
2288
2289 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002290 eqe->evt = 0;
2291 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 queue_tail_inc(&eqo->q);
2293 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002294
2295 return num;
2296}
2297
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298/* Leaves the EQ is disarmed state */
2299static void be_eq_clean(struct be_eq_obj *eqo)
2300{
2301 int num = events_get(eqo);
2302
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002303 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304}
2305
2306static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307{
2308 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002309 struct be_queue_info *rxq = &rxo->q;
2310 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002311 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002312 struct be_adapter *adapter = rxo->adapter;
2313 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314
Sathya Perlad23e9462012-12-17 19:38:51 +00002315 /* Consume pending rx completions.
2316 * Wait for the flush completion (identified by zero num_rcvd)
2317 * to arrive. Notify CQ even when there are no more CQ entries
2318 * for HW to flush partially coalesced CQ entries.
2319 * In Lancer, there is no need to wait for flush compl.
2320 */
2321 for (;;) {
2322 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302323 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002324 if (lancer_chip(adapter))
2325 break;
2326
2327 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2328 dev_warn(&adapter->pdev->dev,
2329 "did not receive flush compl\n");
2330 break;
2331 }
2332 be_cq_notify(adapter, rx_cq->id, true, 0);
2333 mdelay(1);
2334 } else {
2335 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002336 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002337 if (rxcp->num_rcvd == 0)
2338 break;
2339 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340 }
2341
Sathya Perlad23e9462012-12-17 19:38:51 +00002342 /* After cleanup, leave the CQ in unarmed state */
2343 be_cq_notify(adapter, rx_cq->id, false, 0);
2344
2345 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302346 while (atomic_read(&rxq->used) > 0) {
2347 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348 put_page(page_info->page);
2349 memset(page_info, 0, sizeof(*page_info));
2350 }
2351 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302352 rxq->tail = 0;
2353 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354}
2355
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002356static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002358 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2359 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302360 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002361 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302362 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002363 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302365 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002366 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002367 pending_txqs = adapter->num_tx_qs;
2368
2369 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302370 cmpl = 0;
2371 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002372 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302373 while ((txcp = be_tx_compl_get(txo))) {
2374 num_wrbs +=
2375 be_tx_compl_process(adapter, txo,
2376 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002377 cmpl++;
2378 }
2379 if (cmpl) {
2380 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2381 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302382 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002383 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302384 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002385 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002386 }
2387
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302388 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002389 break;
2390
2391 mdelay(1);
2392 } while (true);
2393
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002394 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002395 for_all_tx_queues(adapter, txo, i) {
2396 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002397
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002398 if (atomic_read(&txq->used)) {
2399 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2400 i, atomic_read(&txq->used));
2401 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002402 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002403 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2404 txq->len);
2405 /* Use the tx-compl process logic to handle requests
2406 * that were not sent to the HW.
2407 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002408 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2409 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002410 BUG_ON(atomic_read(&txq->used));
2411 txo->pend_wrb_cnt = 0;
2412 /* Since hw was never notified of these requests,
2413 * reset TXQ indices
2414 */
2415 txq->head = notified_idx;
2416 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002417 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002418 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002419}
2420
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421static void be_evt_queues_destroy(struct be_adapter *adapter)
2422{
2423 struct be_eq_obj *eqo;
2424 int i;
2425
2426 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002427 if (eqo->q.created) {
2428 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002429 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302430 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302431 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002432 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002433 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434 be_queue_free(adapter, &eqo->q);
2435 }
2436}
2437
2438static int be_evt_queues_create(struct be_adapter *adapter)
2439{
2440 struct be_queue_info *eq;
2441 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302442 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 int i, rc;
2444
Sathya Perla92bf14a2013-08-27 16:57:32 +05302445 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2446 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002447
2448 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002449 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2450 return -ENOMEM;
2451 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2452 eqo->affinity_mask);
2453
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302454 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2455 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302456 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302457 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002458 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302460 aic->max_eqd = BE_MAX_EQD;
2461 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462
2463 eq = &eqo->q;
2464 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302465 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002466 if (rc)
2467 return rc;
2468
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302469 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002470 if (rc)
2471 return rc;
2472 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002473 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474}
2475
Sathya Perla5fb379e2009-06-18 00:02:59 +00002476static void be_mcc_queues_destroy(struct be_adapter *adapter)
2477{
2478 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002479
Sathya Perla8788fdc2009-07-27 22:52:03 +00002480 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002481 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002482 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002483 be_queue_free(adapter, q);
2484
Sathya Perla8788fdc2009-07-27 22:52:03 +00002485 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002486 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002487 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002488 be_queue_free(adapter, q);
2489}
2490
2491/* Must be called only after TX qs are created as MCC shares TX EQ */
2492static int be_mcc_queues_create(struct be_adapter *adapter)
2493{
2494 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002495
Sathya Perla8788fdc2009-07-27 22:52:03 +00002496 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002497 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302498 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002499 goto err;
2500
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002501 /* Use the default EQ for MCC completions */
2502 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002503 goto mcc_cq_free;
2504
Sathya Perla8788fdc2009-07-27 22:52:03 +00002505 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002506 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2507 goto mcc_cq_destroy;
2508
Sathya Perla8788fdc2009-07-27 22:52:03 +00002509 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002510 goto mcc_q_free;
2511
2512 return 0;
2513
2514mcc_q_free:
2515 be_queue_free(adapter, q);
2516mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002517 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002518mcc_cq_free:
2519 be_queue_free(adapter, cq);
2520err:
2521 return -1;
2522}
2523
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524static void be_tx_queues_destroy(struct be_adapter *adapter)
2525{
2526 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002527 struct be_tx_obj *txo;
2528 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002529
Sathya Perla3c8def92011-06-12 20:01:58 +00002530 for_all_tx_queues(adapter, txo, i) {
2531 q = &txo->q;
2532 if (q->created)
2533 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2534 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002535
Sathya Perla3c8def92011-06-12 20:01:58 +00002536 q = &txo->cq;
2537 if (q->created)
2538 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2539 be_queue_free(adapter, q);
2540 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541}
2542
Sathya Perla77071332013-08-27 16:57:34 +05302543static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002544{
Sathya Perla73f394e2015-03-26 03:05:09 -04002545 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002546 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002547 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302548 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549
Sathya Perla92bf14a2013-08-27 16:57:32 +05302550 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002551
Sathya Perla3c8def92011-06-12 20:01:58 +00002552 for_all_tx_queues(adapter, txo, i) {
2553 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002554 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2555 sizeof(struct be_eth_tx_compl));
2556 if (status)
2557 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002558
John Stultz827da442013-10-07 15:51:58 -07002559 u64_stats_init(&txo->stats.sync);
2560 u64_stats_init(&txo->stats.sync_compl);
2561
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002562 /* If num_evt_qs is less than num_tx_qs, then more than
2563 * one txq share an eq
2564 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002565 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2566 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002567 if (status)
2568 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002570 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2571 sizeof(struct be_eth_wrb));
2572 if (status)
2573 return status;
2574
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002575 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 if (status)
2577 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002578
2579 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2580 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 }
2582
Sathya Perlad3791422012-09-28 04:39:44 +00002583 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2584 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002585 return 0;
2586}
2587
2588static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589{
2590 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002591 struct be_rx_obj *rxo;
2592 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593
Sathya Perla3abcded2010-10-03 22:12:27 -07002594 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002595 q = &rxo->cq;
2596 if (q->created)
2597 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2598 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600}
2601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002603{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002604 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002605 struct be_rx_obj *rxo;
2606 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607
Sathya Perla92bf14a2013-08-27 16:57:32 +05302608 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002609 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302610
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002611 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2612 if (adapter->num_rss_qs <= 1)
2613 adapter->num_rss_qs = 0;
2614
2615 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2616
2617 /* When the interface is not capable of RSS rings (and there is no
2618 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002619 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002620 if (adapter->num_rx_qs == 0)
2621 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302622
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002623 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002624 for_all_rx_queues(adapter, rxo, i) {
2625 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002626 cq = &rxo->cq;
2627 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302628 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002629 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002630 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631
John Stultz827da442013-10-07 15:51:58 -07002632 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2634 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002635 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002637 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638
Sathya Perlad3791422012-09-28 04:39:44 +00002639 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002640 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002641 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002642}
2643
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644static irqreturn_t be_intx(int irq, void *dev)
2645{
Sathya Perlae49cc342012-11-27 19:50:02 +00002646 struct be_eq_obj *eqo = dev;
2647 struct be_adapter *adapter = eqo->adapter;
2648 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002649
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002650 /* IRQ is not expected when NAPI is scheduled as the EQ
2651 * will not be armed.
2652 * But, this can happen on Lancer INTx where it takes
2653 * a while to de-assert INTx or in BE2 where occasionaly
2654 * an interrupt may be raised even when EQ is unarmed.
2655 * If NAPI is already scheduled, then counting & notifying
2656 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002657 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002658 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002659 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002660 __napi_schedule(&eqo->napi);
2661 if (num_evts)
2662 eqo->spurious_intr = 0;
2663 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002664 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002665
2666 /* Return IRQ_HANDLED only for the the first spurious intr
2667 * after a valid intr to stop the kernel from branding
2668 * this irq as a bad one!
2669 */
2670 if (num_evts || eqo->spurious_intr++ == 0)
2671 return IRQ_HANDLED;
2672 else
2673 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674}
2675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002676static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002677{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002678 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002679
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002680 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002681 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002682 return IRQ_HANDLED;
2683}
2684
Sathya Perla2e588f82011-03-11 02:49:26 +00002685static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686{
Somnath Koture38b1702013-05-29 22:55:56 +00002687 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688}
2689
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002690static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302691 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692{
Sathya Perla3abcded2010-10-03 22:12:27 -07002693 struct be_adapter *adapter = rxo->adapter;
2694 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002695 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302697 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002698
2699 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002700 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701 if (!rxcp)
2702 break;
2703
Sathya Perla12004ae2011-08-02 19:57:46 +00002704 /* Is it a flush compl that has no data */
2705 if (unlikely(rxcp->num_rcvd == 0))
2706 goto loop_continue;
2707
2708 /* Discard compl with partial DMA Lancer B0 */
2709 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002710 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002711 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002712 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002713
Sathya Perla12004ae2011-08-02 19:57:46 +00002714 /* On BE drop pkts that arrive due to imperfect filtering in
2715 * promiscuous mode on some skews
2716 */
2717 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302718 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002720 goto loop_continue;
2721 }
2722
Sathya Perla6384a4d2013-10-25 10:40:16 +05302723 /* Don't do gro when we're busy_polling */
2724 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002725 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002726 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302727 be_rx_compl_process(rxo, napi, rxcp);
2728
Sathya Perla12004ae2011-08-02 19:57:46 +00002729loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302730 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002731 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002732 }
2733
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002734 if (work_done) {
2735 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002736
Sathya Perla6384a4d2013-10-25 10:40:16 +05302737 /* When an rx-obj gets into post_starved state, just
2738 * let be_worker do the posting.
2739 */
2740 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2741 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302742 be_post_rx_frags(rxo, GFP_ATOMIC,
2743 max_t(u32, MAX_RX_POST,
2744 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002745 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747 return work_done;
2748}
2749
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302750static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302751{
2752 switch (status) {
2753 case BE_TX_COMP_HDR_PARSE_ERR:
2754 tx_stats(txo)->tx_hdr_parse_err++;
2755 break;
2756 case BE_TX_COMP_NDMA_ERR:
2757 tx_stats(txo)->tx_dma_err++;
2758 break;
2759 case BE_TX_COMP_ACL_ERR:
2760 tx_stats(txo)->tx_spoof_check_err++;
2761 break;
2762 }
2763}
2764
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302765static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302766{
2767 switch (status) {
2768 case LANCER_TX_COMP_LSO_ERR:
2769 tx_stats(txo)->tx_tso_err++;
2770 break;
2771 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2772 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2773 tx_stats(txo)->tx_spoof_check_err++;
2774 break;
2775 case LANCER_TX_COMP_QINQ_ERR:
2776 tx_stats(txo)->tx_qinq_err++;
2777 break;
2778 case LANCER_TX_COMP_PARITY_ERR:
2779 tx_stats(txo)->tx_internal_parity_err++;
2780 break;
2781 case LANCER_TX_COMP_DMA_ERR:
2782 tx_stats(txo)->tx_dma_err++;
2783 break;
2784 }
2785}
2786
Sathya Perlac8f64612014-09-02 09:56:55 +05302787static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2788 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002789{
Sathya Perlac8f64612014-09-02 09:56:55 +05302790 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302791 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002792
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302793 while ((txcp = be_tx_compl_get(txo))) {
2794 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302795 work_done++;
2796
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302797 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302798 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302799 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302800 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302801 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302802 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 }
2804
2805 if (work_done) {
2806 be_cq_notify(adapter, txo->cq.id, true, work_done);
2807 atomic_sub(num_wrbs, &txo->q.used);
2808
2809 /* As Tx wrbs have been freed up, wake up netdev queue
2810 * if it was stopped due to lack of tx wrbs. */
2811 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302812 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002813 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002814 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002815
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002816 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2817 tx_stats(txo)->tx_compl += work_done;
2818 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2819 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820}
Sathya Perla3c8def92011-06-12 20:01:58 +00002821
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002822#ifdef CONFIG_NET_RX_BUSY_POLL
2823static inline bool be_lock_napi(struct be_eq_obj *eqo)
2824{
2825 bool status = true;
2826
2827 spin_lock(&eqo->lock); /* BH is already disabled */
2828 if (eqo->state & BE_EQ_LOCKED) {
2829 WARN_ON(eqo->state & BE_EQ_NAPI);
2830 eqo->state |= BE_EQ_NAPI_YIELD;
2831 status = false;
2832 } else {
2833 eqo->state = BE_EQ_NAPI;
2834 }
2835 spin_unlock(&eqo->lock);
2836 return status;
2837}
2838
2839static inline void be_unlock_napi(struct be_eq_obj *eqo)
2840{
2841 spin_lock(&eqo->lock); /* BH is already disabled */
2842
2843 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2844 eqo->state = BE_EQ_IDLE;
2845
2846 spin_unlock(&eqo->lock);
2847}
2848
2849static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2850{
2851 bool status = true;
2852
2853 spin_lock_bh(&eqo->lock);
2854 if (eqo->state & BE_EQ_LOCKED) {
2855 eqo->state |= BE_EQ_POLL_YIELD;
2856 status = false;
2857 } else {
2858 eqo->state |= BE_EQ_POLL;
2859 }
2860 spin_unlock_bh(&eqo->lock);
2861 return status;
2862}
2863
2864static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2865{
2866 spin_lock_bh(&eqo->lock);
2867
2868 WARN_ON(eqo->state & (BE_EQ_NAPI));
2869 eqo->state = BE_EQ_IDLE;
2870
2871 spin_unlock_bh(&eqo->lock);
2872}
2873
2874static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2875{
2876 spin_lock_init(&eqo->lock);
2877 eqo->state = BE_EQ_IDLE;
2878}
2879
2880static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2881{
2882 local_bh_disable();
2883
2884 /* It's enough to just acquire napi lock on the eqo to stop
2885 * be_busy_poll() from processing any queueus.
2886 */
2887 while (!be_lock_napi(eqo))
2888 mdelay(1);
2889
2890 local_bh_enable();
2891}
2892
2893#else /* CONFIG_NET_RX_BUSY_POLL */
2894
2895static inline bool be_lock_napi(struct be_eq_obj *eqo)
2896{
2897 return true;
2898}
2899
2900static inline void be_unlock_napi(struct be_eq_obj *eqo)
2901{
2902}
2903
2904static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2905{
2906 return false;
2907}
2908
2909static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2910{
2911}
2912
2913static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2914{
2915}
2916
2917static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2918{
2919}
2920#endif /* CONFIG_NET_RX_BUSY_POLL */
2921
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302922int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002923{
2924 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2925 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002926 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302927 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302928 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002929 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00002930
Sathya Perla0b545a62012-11-23 00:27:18 +00002931 num_evts = events_get(eqo);
2932
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302933 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2934 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002935
Sathya Perla6384a4d2013-10-25 10:40:16 +05302936 if (be_lock_napi(eqo)) {
2937 /* This loop will iterate twice for EQ0 in which
2938 * completions of the last RXQ (default one) are also processed
2939 * For other EQs the loop iterates only once
2940 */
2941 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2942 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2943 max_work = max(work, max_work);
2944 }
2945 be_unlock_napi(eqo);
2946 } else {
2947 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002948 }
2949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 if (is_mcc_eqo(eqo))
2951 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002953 if (max_work < budget) {
2954 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002955
2956 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
2957 * delay via a delay multiplier encoding value
2958 */
2959 if (skyhawk_chip(adapter))
2960 mult_enc = be_get_eq_delay_mult_enc(eqo);
2961
2962 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
2963 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002964 } else {
2965 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002966 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002967 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002968 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002969}
2970
Sathya Perla6384a4d2013-10-25 10:40:16 +05302971#ifdef CONFIG_NET_RX_BUSY_POLL
2972static int be_busy_poll(struct napi_struct *napi)
2973{
2974 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2975 struct be_adapter *adapter = eqo->adapter;
2976 struct be_rx_obj *rxo;
2977 int i, work = 0;
2978
2979 if (!be_lock_busy_poll(eqo))
2980 return LL_FLUSH_BUSY;
2981
2982 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2983 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2984 if (work)
2985 break;
2986 }
2987
2988 be_unlock_busy_poll(eqo);
2989 return work;
2990}
2991#endif
2992
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002993void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002994{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002995 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2996 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002997 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302998 bool error_detected = false;
2999 struct device *dev = &adapter->pdev->dev;
3000 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003001
Sathya Perlad23e9462012-12-17 19:38:51 +00003002 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00003003 return;
3004
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003005 if (lancer_chip(adapter)) {
3006 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3007 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3008 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303009 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003010 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303011 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303012 adapter->hw_error = true;
Kalesh APd0e1b312015-02-23 04:20:12 -05003013 error_detected = true;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303014 /* Do not log error messages if its a FW reset */
3015 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3016 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3017 dev_info(dev, "Firmware update in progress\n");
3018 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303019 dev_err(dev, "Error detected in the card\n");
3020 dev_err(dev, "ERR: sliport status 0x%x\n",
3021 sliport_status);
3022 dev_err(dev, "ERR: sliport error1 0x%x\n",
3023 sliport_err1);
3024 dev_err(dev, "ERR: sliport error2 0x%x\n",
3025 sliport_err2);
3026 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003027 }
3028 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003029 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3030 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3031 ue_lo_mask = ioread32(adapter->pcicfg +
3032 PCICFG_UE_STATUS_LOW_MASK);
3033 ue_hi_mask = ioread32(adapter->pcicfg +
3034 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003035
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003036 ue_lo = (ue_lo & ~ue_lo_mask);
3037 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003038
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303039 /* On certain platforms BE hardware can indicate spurious UEs.
3040 * Allow HW to stop working completely in case of a real UE.
3041 * Hence not setting the hw_error for UE detection.
3042 */
3043
3044 if (ue_lo || ue_hi) {
3045 error_detected = true;
3046 dev_err(dev,
3047 "Unrecoverable Error detected in the adapter");
3048 dev_err(dev, "Please reboot server to recover");
3049 if (skyhawk_chip(adapter))
3050 adapter->hw_error = true;
3051 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3052 if (ue_lo & 1)
3053 dev_err(dev, "UE: %s bit set\n",
3054 ue_status_low_desc[i]);
3055 }
3056 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3057 if (ue_hi & 1)
3058 dev_err(dev, "UE: %s bit set\n",
3059 ue_status_hi_desc[i]);
3060 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303061 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003062 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303063 if (error_detected)
3064 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003065}
3066
Sathya Perla8d56ff12009-11-22 22:02:26 +00003067static void be_msix_disable(struct be_adapter *adapter)
3068{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003069 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003070 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003071 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303072 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003073 }
3074}
3075
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003076static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003077{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003078 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003079 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080
Sathya Perla92bf14a2013-08-27 16:57:32 +05303081 /* If RoCE is supported, program the max number of NIC vectors that
3082 * may be configured via set-channels, along with vectors needed for
3083 * RoCe. Else, just program the number we'll use initially.
3084 */
3085 if (be_roce_supported(adapter))
3086 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3087 2 * num_online_cpus());
3088 else
3089 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003090
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003091 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003092 adapter->msix_entries[i].entry = i;
3093
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003094 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3095 MIN_MSIX_VECTORS, num_vec);
3096 if (num_vec < 0)
3097 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003098
Sathya Perla92bf14a2013-08-27 16:57:32 +05303099 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3100 adapter->num_msix_roce_vec = num_vec / 2;
3101 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3102 adapter->num_msix_roce_vec);
3103 }
3104
3105 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3106
3107 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3108 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003109 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003110
3111fail:
3112 dev_warn(dev, "MSIx enable failed\n");
3113
3114 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003115 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003116 return num_vec;
3117 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003118}
3119
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003120static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303121 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303123 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124}
3125
3126static int be_msix_register(struct be_adapter *adapter)
3127{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003128 struct net_device *netdev = adapter->netdev;
3129 struct be_eq_obj *eqo;
3130 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003132 for_all_evt_queues(adapter, eqo, i) {
3133 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3134 vec = be_msix_vec_get(adapter, eqo);
3135 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003136 if (status)
3137 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003138
3139 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003140 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003141
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003143err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003144 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3145 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3146 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303147 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003148 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149 return status;
3150}
3151
3152static int be_irq_register(struct be_adapter *adapter)
3153{
3154 struct net_device *netdev = adapter->netdev;
3155 int status;
3156
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003157 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158 status = be_msix_register(adapter);
3159 if (status == 0)
3160 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003161 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003162 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003163 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003164 }
3165
Sathya Perlae49cc342012-11-27 19:50:02 +00003166 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003167 netdev->irq = adapter->pdev->irq;
3168 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003169 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003170 if (status) {
3171 dev_err(&adapter->pdev->dev,
3172 "INTx request IRQ failed - err %d\n", status);
3173 return status;
3174 }
3175done:
3176 adapter->isr_registered = true;
3177 return 0;
3178}
3179
3180static void be_irq_unregister(struct be_adapter *adapter)
3181{
3182 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003183 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003184 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003185
3186 if (!adapter->isr_registered)
3187 return;
3188
3189 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003190 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003191 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192 goto done;
3193 }
3194
3195 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003196 for_all_evt_queues(adapter, eqo, i) {
3197 vec = be_msix_vec_get(adapter, eqo);
3198 irq_set_affinity_hint(vec, NULL);
3199 free_irq(vec, eqo);
3200 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202done:
3203 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204}
3205
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003206static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003207{
3208 struct be_queue_info *q;
3209 struct be_rx_obj *rxo;
3210 int i;
3211
3212 for_all_rx_queues(adapter, rxo, i) {
3213 q = &rxo->q;
3214 if (q->created) {
3215 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003216 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003217 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003218 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003219 }
3220}
3221
Sathya Perla889cd4b2010-05-30 23:33:45 +00003222static int be_close(struct net_device *netdev)
3223{
3224 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003225 struct be_eq_obj *eqo;
3226 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003227
Kalesh APe1ad8e32014-04-14 16:12:41 +05303228 /* This protection is needed as be_close() may be called even when the
3229 * adapter is in cleared state (after eeh perm failure)
3230 */
3231 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3232 return 0;
3233
Parav Pandit045508a2012-03-26 14:27:13 +00003234 be_roce_dev_close(adapter);
3235
Ivan Veceradff345c52013-11-27 08:59:32 +01003236 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3237 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003238 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303239 be_disable_busy_poll(eqo);
3240 }
David S. Miller71237b62013-11-28 18:53:36 -05003241 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003242 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003243
3244 be_async_mcc_disable(adapter);
3245
3246 /* Wait for all pending tx completions to arrive so that
3247 * all tx skbs are freed.
3248 */
Sathya Perlafba87552013-05-08 02:05:50 +00003249 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303250 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003251
3252 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003253 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003254
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003255 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003256 if (msix_enabled(adapter))
3257 synchronize_irq(be_msix_vec_get(adapter, eqo));
3258 else
3259 synchronize_irq(netdev->irq);
3260 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003261 }
3262
Sathya Perla889cd4b2010-05-30 23:33:45 +00003263 be_irq_unregister(adapter);
3264
Sathya Perla482c9e72011-06-29 23:33:17 +00003265 return 0;
3266}
3267
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003268static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003269{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003270 struct rss_info *rss = &adapter->rss_info;
3271 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003272 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003273 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003274
3275 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003276 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3277 sizeof(struct be_eth_rx_d));
3278 if (rc)
3279 return rc;
3280 }
3281
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003282 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3283 rxo = default_rxo(adapter);
3284 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3285 rx_frag_size, adapter->if_handle,
3286 false, &rxo->rss_id);
3287 if (rc)
3288 return rc;
3289 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003290
3291 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003292 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003293 rx_frag_size, adapter->if_handle,
3294 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003295 if (rc)
3296 return rc;
3297 }
3298
3299 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003300 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003301 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303302 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003303 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303304 rss->rsstable[j + i] = rxo->rss_id;
3305 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003306 }
3307 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303308 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3309 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003310
3311 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303312 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3313 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303314 } else {
3315 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303316 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303317 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003318
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003319 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303320 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003321 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303322 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303323 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303324 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003325 }
3326
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003327 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303328
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003329 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3330 * which is a queue empty condition
3331 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003332 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003333 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3334
Sathya Perla889cd4b2010-05-30 23:33:45 +00003335 return 0;
3336}
3337
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003338static int be_open(struct net_device *netdev)
3339{
3340 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003341 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003342 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003343 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003344 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003345 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003346
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003347 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003348 if (status)
3349 goto err;
3350
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003351 status = be_irq_register(adapter);
3352 if (status)
3353 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003354
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003355 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003356 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003357
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003358 for_all_tx_queues(adapter, txo, i)
3359 be_cq_notify(adapter, txo->cq.id, true, 0);
3360
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003361 be_async_mcc_enable(adapter);
3362
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003363 for_all_evt_queues(adapter, eqo, i) {
3364 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303365 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003366 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003367 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003368 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003369
Sathya Perla323ff712012-09-28 04:39:43 +00003370 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003371 if (!status)
3372 be_link_status_update(adapter, link_status);
3373
Sathya Perlafba87552013-05-08 02:05:50 +00003374 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003375 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303376
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303377#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303378 if (skyhawk_chip(adapter))
3379 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303380#endif
3381
Sathya Perla889cd4b2010-05-30 23:33:45 +00003382 return 0;
3383err:
3384 be_close(adapter->netdev);
3385 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003386}
3387
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003388static int be_setup_wol(struct be_adapter *adapter, bool enable)
3389{
3390 struct be_dma_mem cmd;
3391 int status = 0;
3392 u8 mac[ETH_ALEN];
3393
Joe Perchesc7bf7162015-03-02 19:54:47 -08003394 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003395
3396 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07003397 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3398 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303399 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303400 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003401
3402 if (enable) {
3403 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303404 PCICFG_PM_CONTROL_OFFSET,
3405 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003406 if (status) {
3407 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003408 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003409 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3410 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003411 return status;
3412 }
3413 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303414 adapter->netdev->dev_addr,
3415 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003416 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3417 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3418 } else {
3419 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3420 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3421 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3422 }
3423
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003424 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003425 return status;
3426}
3427
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003428static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3429{
3430 u32 addr;
3431
3432 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3433
3434 mac[5] = (u8)(addr & 0xFF);
3435 mac[4] = (u8)((addr >> 8) & 0xFF);
3436 mac[3] = (u8)((addr >> 16) & 0xFF);
3437 /* Use the OUI from the current MAC address */
3438 memcpy(mac, adapter->netdev->dev_addr, 3);
3439}
3440
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003441/*
3442 * Generate a seed MAC address from the PF MAC Address using jhash.
3443 * MAC Address for VFs are assigned incrementally starting from the seed.
3444 * These addresses are programmed in the ASIC by the PF and the VF driver
3445 * queries for the MAC address during its probe.
3446 */
Sathya Perla4c876612013-02-03 20:30:11 +00003447static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003448{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003449 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003450 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003451 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003452 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003453
3454 be_vf_eth_addr_generate(adapter, mac);
3455
Sathya Perla11ac75e2011-12-13 00:58:50 +00003456 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303457 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003458 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003459 vf_cfg->if_handle,
3460 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303461 else
3462 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3463 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003464
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003465 if (status)
3466 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303467 "Mac address assignment failed for VF %d\n",
3468 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003469 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003470 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003471
3472 mac[5] += 1;
3473 }
3474 return status;
3475}
3476
Sathya Perla4c876612013-02-03 20:30:11 +00003477static int be_vfs_mac_query(struct be_adapter *adapter)
3478{
3479 int status, vf;
3480 u8 mac[ETH_ALEN];
3481 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003482
3483 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303484 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3485 mac, vf_cfg->if_handle,
3486 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003487 if (status)
3488 return status;
3489 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3490 }
3491 return 0;
3492}
3493
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003494static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003495{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003496 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003497 u32 vf;
3498
Sathya Perla257a3fe2013-06-14 15:54:51 +05303499 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003500 dev_warn(&adapter->pdev->dev,
3501 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003502 goto done;
3503 }
3504
Sathya Perlab4c1df92013-05-08 02:05:47 +00003505 pci_disable_sriov(adapter->pdev);
3506
Sathya Perla11ac75e2011-12-13 00:58:50 +00003507 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303508 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003509 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3510 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303511 else
3512 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3513 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003514
Sathya Perla11ac75e2011-12-13 00:58:50 +00003515 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3516 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003517done:
3518 kfree(adapter->vf_cfg);
3519 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303520 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003521}
3522
Sathya Perla77071332013-08-27 16:57:34 +05303523static void be_clear_queues(struct be_adapter *adapter)
3524{
3525 be_mcc_queues_destroy(adapter);
3526 be_rx_cqs_destroy(adapter);
3527 be_tx_queues_destroy(adapter);
3528 be_evt_queues_destroy(adapter);
3529}
3530
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303531static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003532{
Sathya Perla191eb752012-02-23 18:50:13 +00003533 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3534 cancel_delayed_work_sync(&adapter->work);
3535 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3536 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303537}
3538
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003539static void be_cancel_err_detection(struct be_adapter *adapter)
3540{
3541 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3542 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3543 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3544 }
3545}
3546
Somnath Koturb05004a2013-12-05 12:08:16 +05303547static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303548{
Somnath Koturb05004a2013-12-05 12:08:16 +05303549 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003550 be_cmd_pmac_del(adapter, adapter->if_handle,
3551 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303552 kfree(adapter->pmac_id);
3553 adapter->pmac_id = NULL;
3554 }
3555}
3556
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303557#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303558static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3559{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003560 struct net_device *netdev = adapter->netdev;
3561
Sathya Perlac9c47142014-03-27 10:46:19 +05303562 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3563 be_cmd_manage_iface(adapter, adapter->if_handle,
3564 OP_CONVERT_TUNNEL_TO_NORMAL);
3565
3566 if (adapter->vxlan_port)
3567 be_cmd_set_vxlan_port(adapter, 0);
3568
3569 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3570 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003571
3572 netdev->hw_enc_features = 0;
3573 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303574 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303575}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303576#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303577
Vasundhara Volamf2858732015-03-04 00:44:33 -05003578static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3579{
3580 struct be_resources res = adapter->pool_res;
3581 u16 num_vf_qs = 1;
3582
3583 /* Distribute the queue resources equally among the PF and it's VFs
3584 * Do not distribute queue resources in multi-channel configuration.
3585 */
3586 if (num_vfs && !be_is_mc(adapter)) {
3587 /* If number of VFs requested is 8 less than max supported,
3588 * assign 8 queue pairs to the PF and divide the remaining
3589 * resources evenly among the VFs
3590 */
3591 if (num_vfs < (be_max_vfs(adapter) - 8))
3592 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3593 else
3594 num_vf_qs = res.max_rss_qs / num_vfs;
3595
3596 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3597 * interfaces per port. Provide RSS on VFs, only if number
3598 * of VFs requested is less than MAX_RSS_IFACES limit.
3599 */
3600 if (num_vfs >= MAX_RSS_IFACES)
3601 num_vf_qs = 1;
3602 }
3603 return num_vf_qs;
3604}
3605
Somnath Koturb05004a2013-12-05 12:08:16 +05303606static int be_clear(struct be_adapter *adapter)
3607{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003608 struct pci_dev *pdev = adapter->pdev;
3609 u16 num_vf_qs;
3610
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303611 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003612
Sathya Perla11ac75e2011-12-13 00:58:50 +00003613 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003614 be_vf_clear(adapter);
3615
Vasundhara Volambec84e62014-06-30 13:01:32 +05303616 /* Re-configure FW to distribute resources evenly across max-supported
3617 * number of VFs, only when VFs are not already enabled.
3618 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003619 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3620 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003621 num_vf_qs = be_calculate_vf_qs(adapter,
3622 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303623 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003624 pci_sriov_get_totalvfs(pdev),
3625 num_vf_qs);
3626 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303627
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303628#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303629 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303630#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303631 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303632 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003633
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003634 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003635
Sathya Perla77071332013-08-27 16:57:34 +05303636 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003638 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303639 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003640 return 0;
3641}
3642
Kalesh AP0700d812015-01-20 03:51:43 -05003643static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3644 u32 cap_flags, u32 vf)
3645{
3646 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003647
3648 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3649 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003650 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003651
3652 en_flags &= cap_flags;
3653
Vasundhara Volam435452a2015-03-20 06:28:23 -04003654 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003655}
3656
Sathya Perla4c876612013-02-03 20:30:11 +00003657static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003658{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303659 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003660 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003661 u32 cap_flags, vf;
3662 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003663
Kalesh AP0700d812015-01-20 03:51:43 -05003664 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003665 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003666 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003667
Sathya Perla4c876612013-02-03 20:30:11 +00003668 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303669 if (!BE3_chip(adapter)) {
3670 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003671 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303672 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003673 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303674 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003675 /* Prevent VFs from enabling VLAN promiscuous
3676 * mode
3677 */
3678 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3679 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303680 }
Sathya Perla4c876612013-02-03 20:30:11 +00003681
Kalesh AP0700d812015-01-20 03:51:43 -05003682 status = be_if_create(adapter, &vf_cfg->if_handle,
3683 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003684 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003685 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003686 }
Kalesh AP0700d812015-01-20 03:51:43 -05003687
3688 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003689}
3690
Sathya Perla39f1d942012-05-08 19:41:24 +00003691static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003692{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003693 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003694 int vf;
3695
Sathya Perla39f1d942012-05-08 19:41:24 +00003696 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3697 GFP_KERNEL);
3698 if (!adapter->vf_cfg)
3699 return -ENOMEM;
3700
Sathya Perla11ac75e2011-12-13 00:58:50 +00003701 for_all_vfs(adapter, vf_cfg, vf) {
3702 vf_cfg->if_handle = -1;
3703 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003704 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003705 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003706}
3707
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003708static int be_vf_setup(struct be_adapter *adapter)
3709{
Sathya Perla4c876612013-02-03 20:30:11 +00003710 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303711 struct be_vf_cfg *vf_cfg;
3712 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003713 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003714
Sathya Perla257a3fe2013-06-14 15:54:51 +05303715 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003716
3717 status = be_vf_setup_init(adapter);
3718 if (status)
3719 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003720
Sathya Perla4c876612013-02-03 20:30:11 +00003721 if (old_vfs) {
3722 for_all_vfs(adapter, vf_cfg, vf) {
3723 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3724 if (status)
3725 goto err;
3726 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003727
Sathya Perla4c876612013-02-03 20:30:11 +00003728 status = be_vfs_mac_query(adapter);
3729 if (status)
3730 goto err;
3731 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303732 status = be_vfs_if_create(adapter);
3733 if (status)
3734 goto err;
3735
Sathya Perla39f1d942012-05-08 19:41:24 +00003736 status = be_vf_eth_addr_config(adapter);
3737 if (status)
3738 goto err;
3739 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003740
Sathya Perla11ac75e2011-12-13 00:58:50 +00003741 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303742 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003743 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3744 vf + 1);
3745 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303746 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003747 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303748 BE_PRIV_FILTMGMT,
3749 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003750 if (!status) {
3751 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303752 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3753 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003754 }
Sathya Perla04a06022013-07-23 15:25:00 +05303755 }
3756
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303757 /* Allow full available bandwidth */
3758 if (!old_vfs)
3759 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003760
Kalesh APe7bcbd72015-05-06 05:30:32 -04003761 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3762 vf_cfg->if_handle, NULL,
3763 &spoofchk);
3764 if (!status)
3765 vf_cfg->spoofchk = spoofchk;
3766
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303767 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303768 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303769 be_cmd_set_logical_link_config(adapter,
3770 IFLA_VF_LINK_STATE_AUTO,
3771 vf+1);
3772 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003773 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003774
3775 if (!old_vfs) {
3776 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3777 if (status) {
3778 dev_err(dev, "SRIOV enable failed\n");
3779 adapter->num_vfs = 0;
3780 goto err;
3781 }
3782 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303783
3784 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003785 return 0;
3786err:
Sathya Perla4c876612013-02-03 20:30:11 +00003787 dev_err(dev, "VF setup failed\n");
3788 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003789 return status;
3790}
3791
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303792/* Converting function_mode bits on BE3 to SH mc_type enums */
3793
3794static u8 be_convert_mc_type(u32 function_mode)
3795{
Suresh Reddy66064db2014-06-23 16:41:29 +05303796 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303797 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303798 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303799 return FLEX10;
3800 else if (function_mode & VNIC_MODE)
3801 return vNIC2;
3802 else if (function_mode & UMC_ENABLED)
3803 return UMC;
3804 else
3805 return MC_NONE;
3806}
3807
Sathya Perla92bf14a2013-08-27 16:57:32 +05303808/* On BE2/BE3 FW does not suggest the supported limits */
3809static void BEx_get_resources(struct be_adapter *adapter,
3810 struct be_resources *res)
3811{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303812 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303813
3814 if (be_physfn(adapter))
3815 res->max_uc_mac = BE_UC_PMAC_COUNT;
3816 else
3817 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3818
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303819 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3820
3821 if (be_is_mc(adapter)) {
3822 /* Assuming that there are 4 channels per port,
3823 * when multi-channel is enabled
3824 */
3825 if (be_is_qnq_mode(adapter))
3826 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3827 else
3828 /* In a non-qnq multichannel mode, the pvid
3829 * takes up one vlan entry
3830 */
3831 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3832 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303833 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303834 }
3835
Sathya Perla92bf14a2013-08-27 16:57:32 +05303836 res->max_mcast_mac = BE_MAX_MC;
3837
Vasundhara Volama5243da2014-03-11 18:53:07 +05303838 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3839 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3840 * *only* if it is RSS-capable.
3841 */
3842 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04003843 be_virtfn(adapter) ||
3844 (be_is_mc(adapter) &&
3845 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303846 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303847 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3848 struct be_resources super_nic_res = {0};
3849
3850 /* On a SuperNIC profile, the driver needs to use the
3851 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3852 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003853 be_cmd_get_profile_config(adapter, &super_nic_res,
3854 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303855 /* Some old versions of BE3 FW don't report max_tx_qs value */
3856 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3857 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303858 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303859 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303860
3861 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3862 !use_sriov && be_physfn(adapter))
3863 res->max_rss_qs = (adapter->be3_native) ?
3864 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3865 res->max_rx_qs = res->max_rss_qs + 1;
3866
Suresh Reddye3dc8672014-01-06 13:02:25 +05303867 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303868 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303869 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3870 else
3871 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303872
3873 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003874 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303875 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3876 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3877}
3878
Sathya Perla30128032011-11-10 19:17:57 +00003879static void be_setup_init(struct be_adapter *adapter)
3880{
3881 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003882 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003883 adapter->if_handle = -1;
3884 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003885 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003886 if (be_physfn(adapter))
3887 adapter->cmd_privileges = MAX_PRIVILEGES;
3888 else
3889 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003890}
3891
Vasundhara Volambec84e62014-06-30 13:01:32 +05303892static int be_get_sriov_config(struct be_adapter *adapter)
3893{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303894 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303895 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303896
Vasundhara Volamf2858732015-03-04 00:44:33 -05003897 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05303898
Vasundhara Volamace40af2015-03-04 00:44:34 -05003899 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303900 if (BE3_chip(adapter) && !res.max_vfs) {
3901 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3902 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3903 }
3904
Sathya Perlad3d18312014-08-01 17:47:30 +05303905 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303906
Vasundhara Volamace40af2015-03-04 00:44:34 -05003907 /* If during previous unload of the driver, the VFs were not disabled,
3908 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3909 * Instead use the TotalVFs value stored in the pci-dev struct.
3910 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303911 old_vfs = pci_num_vf(adapter->pdev);
3912 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05003913 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3914 old_vfs);
3915
3916 adapter->pool_res.max_vfs =
3917 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303918 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303919 }
3920
3921 return 0;
3922}
3923
Vasundhara Volamace40af2015-03-04 00:44:34 -05003924static void be_alloc_sriov_res(struct be_adapter *adapter)
3925{
3926 int old_vfs = pci_num_vf(adapter->pdev);
3927 u16 num_vf_qs;
3928 int status;
3929
3930 be_get_sriov_config(adapter);
3931
3932 if (!old_vfs)
3933 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3934
3935 /* When the HW is in SRIOV capable configuration, the PF-pool
3936 * resources are given to PF during driver load, if there are no
3937 * old VFs. This facility is not available in BE3 FW.
3938 * Also, this is done by FW in Lancer chip.
3939 */
3940 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3941 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3942 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3943 num_vf_qs);
3944 if (status)
3945 dev_err(&adapter->pdev->dev,
3946 "Failed to optimize SRIOV resources\n");
3947 }
3948}
3949
Sathya Perla92bf14a2013-08-27 16:57:32 +05303950static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003951{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303952 struct device *dev = &adapter->pdev->dev;
3953 struct be_resources res = {0};
3954 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003955
Sathya Perla92bf14a2013-08-27 16:57:32 +05303956 if (BEx_chip(adapter)) {
3957 BEx_get_resources(adapter, &res);
3958 adapter->res = res;
3959 }
3960
Sathya Perla92bf14a2013-08-27 16:57:32 +05303961 /* For Lancer, SH etc read per-function resource limits from FW.
3962 * GET_FUNC_CONFIG returns per function guaranteed limits.
3963 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3964 */
Sathya Perla4c876612013-02-03 20:30:11 +00003965 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303966 status = be_cmd_get_func_config(adapter, &res);
3967 if (status)
3968 return status;
3969
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003970 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3971 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3972 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3973 res.max_rss_qs -= 1;
3974
Sathya Perla92bf14a2013-08-27 16:57:32 +05303975 /* If RoCE may be enabled stash away half the EQs for RoCE */
3976 if (be_roce_supported(adapter))
3977 res.max_evt_qs /= 2;
3978 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003979 }
3980
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003981 /* If FW supports RSS default queue, then skip creating non-RSS
3982 * queue for non-IP traffic.
3983 */
3984 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3985 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3986
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303987 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3988 be_max_txqs(adapter), be_max_rxqs(adapter),
3989 be_max_rss(adapter), be_max_eqs(adapter),
3990 be_max_vfs(adapter));
3991 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3992 be_max_uc(adapter), be_max_mc(adapter),
3993 be_max_vlans(adapter));
3994
Vasundhara Volamace40af2015-03-04 00:44:34 -05003995 /* Sanitize cfg_num_qs based on HW and platform limits */
3996 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3997 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05303998 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003999}
4000
Sathya Perla39f1d942012-05-08 19:41:24 +00004001static int be_get_config(struct be_adapter *adapter)
4002{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004003 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304004 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004005
4006 status = be_cmd_get_cntl_attributes(adapter);
4007 if (status)
4008 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004009
Kalesh APe97e3cd2014-07-17 16:20:26 +05304010 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004011 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304012 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004013
Sathya Perla6b085ba2015-02-23 04:20:09 -05004014 if (BEx_chip(adapter)) {
4015 level = be_cmd_get_fw_log_level(adapter);
4016 adapter->msg_enable =
4017 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4018 }
4019
4020 be_cmd_get_acpi_wol_cap(adapter);
4021
Vasundhara Volam21252372015-02-06 08:18:42 -05004022 be_cmd_query_port_name(adapter);
4023
4024 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304025 status = be_cmd_get_active_profile(adapter, &profile_id);
4026 if (!status)
4027 dev_info(&adapter->pdev->dev,
4028 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304029 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304030
Sathya Perla92bf14a2013-08-27 16:57:32 +05304031 status = be_get_resources(adapter);
4032 if (status)
4033 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004034
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304035 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4036 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304037 if (!adapter->pmac_id)
4038 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004039
Sathya Perla92bf14a2013-08-27 16:57:32 +05304040 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004041}
4042
Sathya Perla95046b92013-07-23 15:25:02 +05304043static int be_mac_setup(struct be_adapter *adapter)
4044{
4045 u8 mac[ETH_ALEN];
4046 int status;
4047
4048 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4049 status = be_cmd_get_perm_mac(adapter, mac);
4050 if (status)
4051 return status;
4052
4053 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4054 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4055 } else {
4056 /* Maybe the HW was reset; dev_addr must be re-programmed */
4057 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4058 }
4059
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06004060 /* For BE3-R VFs, the PF programs the initial MAC address */
4061 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4062 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4063 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05304064 return 0;
4065}
4066
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304067static void be_schedule_worker(struct be_adapter *adapter)
4068{
4069 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4070 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4071}
4072
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004073static void be_schedule_err_detection(struct be_adapter *adapter)
4074{
4075 schedule_delayed_work(&adapter->be_err_detection_work,
4076 msecs_to_jiffies(1000));
4077 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4078}
4079
Sathya Perla77071332013-08-27 16:57:34 +05304080static int be_setup_queues(struct be_adapter *adapter)
4081{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304082 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304083 int status;
4084
4085 status = be_evt_queues_create(adapter);
4086 if (status)
4087 goto err;
4088
4089 status = be_tx_qs_create(adapter);
4090 if (status)
4091 goto err;
4092
4093 status = be_rx_cqs_create(adapter);
4094 if (status)
4095 goto err;
4096
4097 status = be_mcc_queues_create(adapter);
4098 if (status)
4099 goto err;
4100
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304101 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4102 if (status)
4103 goto err;
4104
4105 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4106 if (status)
4107 goto err;
4108
Sathya Perla77071332013-08-27 16:57:34 +05304109 return 0;
4110err:
4111 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4112 return status;
4113}
4114
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304115int be_update_queues(struct be_adapter *adapter)
4116{
4117 struct net_device *netdev = adapter->netdev;
4118 int status;
4119
4120 if (netif_running(netdev))
4121 be_close(netdev);
4122
4123 be_cancel_worker(adapter);
4124
4125 /* If any vectors have been shared with RoCE we cannot re-program
4126 * the MSIx table.
4127 */
4128 if (!adapter->num_msix_roce_vec)
4129 be_msix_disable(adapter);
4130
4131 be_clear_queues(adapter);
4132
4133 if (!msix_enabled(adapter)) {
4134 status = be_msix_enable(adapter);
4135 if (status)
4136 return status;
4137 }
4138
4139 status = be_setup_queues(adapter);
4140 if (status)
4141 return status;
4142
4143 be_schedule_worker(adapter);
4144
4145 if (netif_running(netdev))
4146 status = be_open(netdev);
4147
4148 return status;
4149}
4150
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004151static inline int fw_major_num(const char *fw_ver)
4152{
4153 int fw_major = 0, i;
4154
4155 i = sscanf(fw_ver, "%d.", &fw_major);
4156 if (i != 1)
4157 return 0;
4158
4159 return fw_major;
4160}
4161
Sathya Perlaf962f842015-02-23 04:20:16 -05004162/* If any VFs are already enabled don't FLR the PF */
4163static bool be_reset_required(struct be_adapter *adapter)
4164{
4165 return pci_num_vf(adapter->pdev) ? false : true;
4166}
4167
4168/* Wait for the FW to be ready and perform the required initialization */
4169static int be_func_init(struct be_adapter *adapter)
4170{
4171 int status;
4172
4173 status = be_fw_wait_ready(adapter);
4174 if (status)
4175 return status;
4176
4177 if (be_reset_required(adapter)) {
4178 status = be_cmd_reset_function(adapter);
4179 if (status)
4180 return status;
4181
4182 /* Wait for interrupts to quiesce after an FLR */
4183 msleep(100);
4184
4185 /* We can clear all errors when function reset succeeds */
4186 be_clear_all_error(adapter);
4187 }
4188
4189 /* Tell FW we're ready to fire cmds */
4190 status = be_cmd_fw_init(adapter);
4191 if (status)
4192 return status;
4193
4194 /* Allow interrupts for other ULPs running on NIC function */
4195 be_intr_set(adapter, true);
4196
4197 return 0;
4198}
4199
Sathya Perla5fb379e2009-06-18 00:02:59 +00004200static int be_setup(struct be_adapter *adapter)
4201{
Sathya Perla39f1d942012-05-08 19:41:24 +00004202 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004203 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004204
Sathya Perlaf962f842015-02-23 04:20:16 -05004205 status = be_func_init(adapter);
4206 if (status)
4207 return status;
4208
Sathya Perla30128032011-11-10 19:17:57 +00004209 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004210
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004211 if (!lancer_chip(adapter))
4212 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004213
Vasundhara Volamace40af2015-03-04 00:44:34 -05004214 if (!BE2_chip(adapter) && be_physfn(adapter))
4215 be_alloc_sriov_res(adapter);
4216
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004217 status = be_get_config(adapter);
4218 if (status)
4219 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004220
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004221 status = be_msix_enable(adapter);
4222 if (status)
4223 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004224
Kalesh AP0700d812015-01-20 03:51:43 -05004225 status = be_if_create(adapter, &adapter->if_handle,
4226 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004227 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004228 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004229
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304230 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4231 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304232 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304233 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004234 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004235 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004236
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004237 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004238
Sathya Perla95046b92013-07-23 15:25:02 +05304239 status = be_mac_setup(adapter);
4240 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004241 goto err;
4242
Kalesh APe97e3cd2014-07-17 16:20:26 +05304243 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304244 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004245
Somnath Koture9e2a902013-10-24 14:37:53 +05304246 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304247 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304248 adapter->fw_ver);
4249 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4250 }
4251
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004252 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004253 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004254
4255 be_set_rx_mode(adapter->netdev);
4256
Kalesh AP00d594c2015-01-20 03:51:44 -05004257 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4258 adapter->rx_fc);
4259 if (status)
4260 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4261 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004262
Kalesh AP00d594c2015-01-20 03:51:44 -05004263 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4264 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004265
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304266 if (be_physfn(adapter))
4267 be_cmd_set_logical_link_config(adapter,
4268 IFLA_VF_LINK_STATE_AUTO, 0);
4269
Vasundhara Volambec84e62014-06-30 13:01:32 +05304270 if (adapter->num_vfs)
4271 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004272
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004273 status = be_cmd_get_phy_info(adapter);
4274 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004275 adapter->phy.fc_autoneg = 1;
4276
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304277 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304278 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004279 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004280err:
4281 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004282 return status;
4283}
4284
Ivan Vecera66268732011-12-08 01:31:21 +00004285#ifdef CONFIG_NET_POLL_CONTROLLER
4286static void be_netpoll(struct net_device *netdev)
4287{
4288 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004289 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004290 int i;
4291
Sathya Perlae49cc342012-11-27 19:50:02 +00004292 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004293 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004294 napi_schedule(&eqo->napi);
4295 }
Ivan Vecera66268732011-12-08 01:31:21 +00004296}
4297#endif
4298
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304299static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004300
Sathya Perla306f1342011-08-02 19:57:45 +00004301static bool phy_flashing_required(struct be_adapter *adapter)
4302{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004303 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004304 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004305}
4306
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004307static bool is_comp_in_ufi(struct be_adapter *adapter,
4308 struct flash_section_info *fsec, int type)
4309{
4310 int i = 0, img_type = 0;
4311 struct flash_section_info_g2 *fsec_g2 = NULL;
4312
Sathya Perlaca34fe32012-11-06 17:48:56 +00004313 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004314 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4315
4316 for (i = 0; i < MAX_FLASH_COMP; i++) {
4317 if (fsec_g2)
4318 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4319 else
4320 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4321
4322 if (img_type == type)
4323 return true;
4324 }
4325 return false;
4326
4327}
4328
Jingoo Han4188e7d2013-08-05 18:02:02 +09004329static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304330 int header_size,
4331 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004332{
4333 struct flash_section_info *fsec = NULL;
4334 const u8 *p = fw->data;
4335
4336 p += header_size;
4337 while (p < (fw->data + fw->size)) {
4338 fsec = (struct flash_section_info *)p;
4339 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4340 return fsec;
4341 p += 32;
4342 }
4343 return NULL;
4344}
4345
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304346static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4347 u32 img_offset, u32 img_size, int hdr_size,
4348 u16 img_optype, bool *crc_match)
4349{
4350 u32 crc_offset;
4351 int status;
4352 u8 crc[4];
4353
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004354 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4355 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304356 if (status)
4357 return status;
4358
4359 crc_offset = hdr_size + img_offset + img_size - 4;
4360
4361 /* Skip flashing, if crc of flashed region matches */
4362 if (!memcmp(crc, p + crc_offset, 4))
4363 *crc_match = true;
4364 else
4365 *crc_match = false;
4366
4367 return status;
4368}
4369
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004370static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004371 struct be_dma_mem *flash_cmd, int optype, int img_size,
4372 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004373{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004374 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004375 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304376 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004377
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004378 while (total_bytes) {
4379 num_bytes = min_t(u32, 32*1024, total_bytes);
4380
4381 total_bytes -= num_bytes;
4382
4383 if (!total_bytes) {
4384 if (optype == OPTYPE_PHY_FW)
4385 flash_op = FLASHROM_OPER_PHY_FLASH;
4386 else
4387 flash_op = FLASHROM_OPER_FLASH;
4388 } else {
4389 if (optype == OPTYPE_PHY_FW)
4390 flash_op = FLASHROM_OPER_PHY_SAVE;
4391 else
4392 flash_op = FLASHROM_OPER_SAVE;
4393 }
4394
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004395 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004396 img += num_bytes;
4397 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004398 flash_op, img_offset +
4399 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304400 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304401 optype == OPTYPE_PHY_FW)
4402 break;
4403 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004404 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004405
4406 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004407 }
4408 return 0;
4409}
4410
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004411/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004412static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304413 const struct firmware *fw,
4414 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004415{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004416 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304417 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004418 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304419 int status, i, filehdr_size, num_comp;
4420 const struct flash_comp *pflashcomp;
4421 bool crc_match;
4422 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004423
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004424 struct flash_comp gen3_flash_types[] = {
4425 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4426 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4427 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4428 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4429 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4430 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4431 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4432 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4433 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4434 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4435 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4436 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4437 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4438 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4439 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4440 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4441 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4442 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4443 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4444 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004445 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004446
4447 struct flash_comp gen2_flash_types[] = {
4448 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4449 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4450 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4451 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4452 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4453 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4454 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4455 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4456 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4457 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4458 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4459 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4460 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4461 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4462 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4463 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004464 };
4465
Sathya Perlaca34fe32012-11-06 17:48:56 +00004466 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004467 pflashcomp = gen3_flash_types;
4468 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004469 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004470 } else {
4471 pflashcomp = gen2_flash_types;
4472 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004473 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004474 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004475 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004476
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004477 /* Get flash section info*/
4478 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4479 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304480 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004481 return -1;
4482 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004483 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004484 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004485 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004486
4487 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4488 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4489 continue;
4490
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004491 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4492 !phy_flashing_required(adapter))
4493 continue;
4494
4495 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304496 status = be_check_flash_crc(adapter, fw->data,
4497 pflashcomp[i].offset,
4498 pflashcomp[i].size,
4499 filehdr_size +
4500 img_hdrs_size,
4501 OPTYPE_REDBOOT, &crc_match);
4502 if (status) {
4503 dev_err(dev,
4504 "Could not get CRC for 0x%x region\n",
4505 pflashcomp[i].optype);
4506 continue;
4507 }
4508
4509 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004510 continue;
4511 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004512
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304513 p = fw->data + filehdr_size + pflashcomp[i].offset +
4514 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004515 if (p + pflashcomp[i].size > fw->data + fw->size)
4516 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004517
4518 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004519 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004520 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304521 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004522 pflashcomp[i].img_type);
4523 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004524 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004525 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004526 return 0;
4527}
4528
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304529static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4530{
4531 u32 img_type = le32_to_cpu(fsec_entry.type);
4532 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4533
4534 if (img_optype != 0xFFFF)
4535 return img_optype;
4536
4537 switch (img_type) {
4538 case IMAGE_FIRMWARE_iSCSI:
4539 img_optype = OPTYPE_ISCSI_ACTIVE;
4540 break;
4541 case IMAGE_BOOT_CODE:
4542 img_optype = OPTYPE_REDBOOT;
4543 break;
4544 case IMAGE_OPTION_ROM_ISCSI:
4545 img_optype = OPTYPE_BIOS;
4546 break;
4547 case IMAGE_OPTION_ROM_PXE:
4548 img_optype = OPTYPE_PXE_BIOS;
4549 break;
4550 case IMAGE_OPTION_ROM_FCoE:
4551 img_optype = OPTYPE_FCOE_BIOS;
4552 break;
4553 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4554 img_optype = OPTYPE_ISCSI_BACKUP;
4555 break;
4556 case IMAGE_NCSI:
4557 img_optype = OPTYPE_NCSI_FW;
4558 break;
4559 case IMAGE_FLASHISM_JUMPVECTOR:
4560 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4561 break;
4562 case IMAGE_FIRMWARE_PHY:
4563 img_optype = OPTYPE_SH_PHY_FW;
4564 break;
4565 case IMAGE_REDBOOT_DIR:
4566 img_optype = OPTYPE_REDBOOT_DIR;
4567 break;
4568 case IMAGE_REDBOOT_CONFIG:
4569 img_optype = OPTYPE_REDBOOT_CONFIG;
4570 break;
4571 case IMAGE_UFI_DIR:
4572 img_optype = OPTYPE_UFI_DIR;
4573 break;
4574 default:
4575 break;
4576 }
4577
4578 return img_optype;
4579}
4580
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004581static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304582 const struct firmware *fw,
4583 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004584{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004585 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004586 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304587 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004588 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304589 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004590 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304591 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304592 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004593
4594 filehdr_size = sizeof(struct flash_file_hdr_g3);
4595 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4596 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304597 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304598 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004599 }
4600
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004601retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004602 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4603 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4604 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304605 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4606 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4607 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004608
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304609 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004610 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004611
4612 if (flash_offset_support)
4613 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4614 else
4615 flash_optype = img_optype;
4616
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304617 /* Don't bother verifying CRC if an old FW image is being
4618 * flashed
4619 */
4620 if (old_fw_img)
4621 goto flash;
4622
4623 status = be_check_flash_crc(adapter, fw->data, img_offset,
4624 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004625 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304626 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304627 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4628 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004629 /* The current FW image on the card does not support
4630 * OFFSET based flashing. Retry using older mechanism
4631 * of OPTYPE based flashing
4632 */
4633 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4634 flash_offset_support = false;
4635 goto retry_flash;
4636 }
4637
4638 /* The current FW image on the card does not recognize
4639 * the new FLASH op_type. The FW download is partially
4640 * complete. Reboot the server now to enable FW image
4641 * to recognize the new FLASH op_type. To complete the
4642 * remaining process, download the same FW again after
4643 * the reboot.
4644 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304645 dev_err(dev, "Flash incomplete. Reset the server\n");
4646 dev_err(dev, "Download FW image again after reset\n");
4647 return -EAGAIN;
4648 } else if (status) {
4649 dev_err(dev, "Could not get CRC for 0x%x region\n",
4650 img_optype);
4651 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004652 }
4653
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304654 if (crc_match)
4655 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004656
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304657flash:
4658 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004659 if (p + img_size > fw->data + fw->size)
4660 return -1;
4661
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004662 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4663 img_offset);
4664
4665 /* The current FW image on the card does not support OFFSET
4666 * based flashing. Retry using older mechanism of OPTYPE based
4667 * flashing
4668 */
4669 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4670 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4671 flash_offset_support = false;
4672 goto retry_flash;
4673 }
4674
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304675 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4676 * UFI_DIR region
4677 */
Kalesh AP4c600052014-05-30 19:06:26 +05304678 if (old_fw_img &&
4679 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4680 (img_optype == OPTYPE_UFI_DIR &&
4681 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304682 continue;
4683 } else if (status) {
4684 dev_err(dev, "Flashing section type 0x%x failed\n",
4685 img_type);
4686 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004687 }
4688 }
4689 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004690}
4691
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004692static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304693 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004694{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004695#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4696#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304697 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004698 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004699 const u8 *data_ptr = NULL;
4700 u8 *dest_image_ptr = NULL;
4701 size_t image_size = 0;
4702 u32 chunk_size = 0;
4703 u32 data_written = 0;
4704 u32 offset = 0;
4705 int status = 0;
4706 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004707 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004708
4709 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304710 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304711 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004712 }
4713
4714 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4715 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304716 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004717 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304718 if (!flash_cmd.va)
4719 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004720
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004721 dest_image_ptr = flash_cmd.va +
4722 sizeof(struct lancer_cmd_req_write_object);
4723 image_size = fw->size;
4724 data_ptr = fw->data;
4725
4726 while (image_size) {
4727 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4728
4729 /* Copy the image chunk content. */
4730 memcpy(dest_image_ptr, data_ptr, chunk_size);
4731
4732 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004733 chunk_size, offset,
4734 LANCER_FW_DOWNLOAD_LOCATION,
4735 &data_written, &change_status,
4736 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004737 if (status)
4738 break;
4739
4740 offset += data_written;
4741 data_ptr += data_written;
4742 image_size -= data_written;
4743 }
4744
4745 if (!status) {
4746 /* Commit the FW written */
4747 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004748 0, offset,
4749 LANCER_FW_DOWNLOAD_LOCATION,
4750 &data_written, &change_status,
4751 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004752 }
4753
Kalesh APbb864e02014-09-02 09:56:51 +05304754 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004755 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304756 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304757 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004758 }
4759
Kalesh APbb864e02014-09-02 09:56:51 +05304760 dev_info(dev, "Firmware flashed successfully\n");
4761
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004762 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304763 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004764 status = lancer_physdev_ctrl(adapter,
4765 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004766 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304767 dev_err(dev, "Adapter busy, could not reset FW\n");
4768 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004769 }
4770 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304771 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004772 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304773
4774 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004775}
4776
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004777/* Check if the flash image file is compatible with the adapter that
4778 * is being flashed.
4779 */
4780static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4781 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004782{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004783 if (!fhdr) {
4784 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4785 return -1;
4786 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004787
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004788 /* First letter of the build version is used to identify
4789 * which chip this image file is meant for.
4790 */
4791 switch (fhdr->build[0]) {
4792 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004793 if (!skyhawk_chip(adapter))
4794 return false;
4795 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004796 case BLD_STR_UFI_TYPE_BE3:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004797 if (!BE3_chip(adapter))
4798 return false;
4799 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004800 case BLD_STR_UFI_TYPE_BE2:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004801 if (!BE2_chip(adapter))
4802 return false;
4803 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004804 default:
4805 return false;
4806 }
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004807
4808 return (fhdr->asic_type_rev >= adapter->asic_rev);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004809}
4810
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004811static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4812{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004813 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004814 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004815 struct image_hdr *img_hdr_ptr;
4816 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004817 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004818
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004819 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4820 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4821 dev_err(dev, "Flash image is not compatible with adapter\n");
4822 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004823 }
4824
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004825 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4826 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4827 GFP_KERNEL);
4828 if (!flash_cmd.va)
4829 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004830
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004831 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4832 for (i = 0; i < num_imgs; i++) {
4833 img_hdr_ptr = (struct image_hdr *)(fw->data +
4834 (sizeof(struct flash_file_hdr_g3) +
4835 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004836 if (!BE2_chip(adapter) &&
4837 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4838 continue;
4839
4840 if (skyhawk_chip(adapter))
4841 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4842 num_imgs);
4843 else
4844 status = be_flash_BEx(adapter, fw, &flash_cmd,
4845 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004846 }
4847
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004848 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4849 if (!status)
4850 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004851
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004852 return status;
4853}
4854
4855int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4856{
4857 const struct firmware *fw;
4858 int status;
4859
4860 if (!netif_running(adapter->netdev)) {
4861 dev_err(&adapter->pdev->dev,
4862 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304863 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004864 }
4865
4866 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4867 if (status)
4868 goto fw_exit;
4869
4870 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4871
4872 if (lancer_chip(adapter))
4873 status = lancer_fw_download(adapter, fw);
4874 else
4875 status = be_fw_download(adapter, fw);
4876
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004877 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304878 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004879
Ajit Khaparde84517482009-09-04 03:12:16 +00004880fw_exit:
4881 release_firmware(fw);
4882 return status;
4883}
4884
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004885static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4886 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004887{
4888 struct be_adapter *adapter = netdev_priv(dev);
4889 struct nlattr *attr, *br_spec;
4890 int rem;
4891 int status = 0;
4892 u16 mode = 0;
4893
4894 if (!sriov_enabled(adapter))
4895 return -EOPNOTSUPP;
4896
4897 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004898 if (!br_spec)
4899 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004900
4901 nla_for_each_nested(attr, br_spec, rem) {
4902 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4903 continue;
4904
Thomas Grafb7c1a312014-11-26 13:42:17 +01004905 if (nla_len(attr) < sizeof(mode))
4906 return -EINVAL;
4907
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004908 mode = nla_get_u16(attr);
4909 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4910 return -EINVAL;
4911
4912 status = be_cmd_set_hsw_config(adapter, 0, 0,
4913 adapter->if_handle,
4914 mode == BRIDGE_MODE_VEPA ?
4915 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004916 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004917 if (status)
4918 goto err;
4919
4920 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4921 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4922
4923 return status;
4924 }
4925err:
4926 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4927 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4928
4929 return status;
4930}
4931
4932static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004933 struct net_device *dev, u32 filter_mask,
4934 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004935{
4936 struct be_adapter *adapter = netdev_priv(dev);
4937 int status = 0;
4938 u8 hsw_mode;
4939
4940 if (!sriov_enabled(adapter))
4941 return 0;
4942
4943 /* BE and Lancer chips support VEB mode only */
4944 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4945 hsw_mode = PORT_FWD_TYPE_VEB;
4946 } else {
4947 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004948 adapter->if_handle, &hsw_mode,
4949 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004950 if (status)
4951 return 0;
4952 }
4953
4954 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4955 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004956 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004957 0, 0, nlflags);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004958}
4959
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304960#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004961/* VxLAN offload Notes:
4962 *
4963 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4964 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4965 * is expected to work across all types of IP tunnels once exported. Skyhawk
4966 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304967 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4968 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4969 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004970 *
4971 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4972 * adds more than one port, disable offloads and don't re-enable them again
4973 * until after all the tunnels are removed.
4974 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304975static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4976 __be16 port)
4977{
4978 struct be_adapter *adapter = netdev_priv(netdev);
4979 struct device *dev = &adapter->pdev->dev;
4980 int status;
4981
4982 if (lancer_chip(adapter) || BEx_chip(adapter))
4983 return;
4984
4985 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304986 dev_info(dev,
4987 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004988 dev_info(dev, "Disabling VxLAN offloads\n");
4989 adapter->vxlan_port_count++;
4990 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304991 }
4992
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004993 if (adapter->vxlan_port_count++ >= 1)
4994 return;
4995
Sathya Perlac9c47142014-03-27 10:46:19 +05304996 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4997 OP_CONVERT_NORMAL_TO_TUNNEL);
4998 if (status) {
4999 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5000 goto err;
5001 }
5002
5003 status = be_cmd_set_vxlan_port(adapter, port);
5004 if (status) {
5005 dev_warn(dev, "Failed to add VxLAN port\n");
5006 goto err;
5007 }
5008 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5009 adapter->vxlan_port = port;
5010
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005011 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5012 NETIF_F_TSO | NETIF_F_TSO6 |
5013 NETIF_F_GSO_UDP_TUNNEL;
5014 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305015 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005016
Sathya Perlac9c47142014-03-27 10:46:19 +05305017 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5018 be16_to_cpu(port));
5019 return;
5020err:
5021 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305022}
5023
5024static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5025 __be16 port)
5026{
5027 struct be_adapter *adapter = netdev_priv(netdev);
5028
5029 if (lancer_chip(adapter) || BEx_chip(adapter))
5030 return;
5031
5032 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005033 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305034
5035 be_disable_vxlan_offloads(adapter);
5036
5037 dev_info(&adapter->pdev->dev,
5038 "Disabled VxLAN offloads for UDP port %d\n",
5039 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005040done:
5041 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305042}
Joe Stringer725d5482014-11-13 16:38:13 -08005043
Jesse Gross5f352272014-12-23 22:37:26 -08005044static netdev_features_t be_features_check(struct sk_buff *skb,
5045 struct net_device *dev,
5046 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005047{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305048 struct be_adapter *adapter = netdev_priv(dev);
5049 u8 l4_hdr = 0;
5050
5051 /* The code below restricts offload features for some tunneled packets.
5052 * Offload features for normal (non tunnel) packets are unchanged.
5053 */
5054 if (!skb->encapsulation ||
5055 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5056 return features;
5057
5058 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5059 * should disable tunnel offload features if it's not a VxLAN packet,
5060 * as tunnel offloads have been enabled only for VxLAN. This is done to
5061 * allow other tunneled traffic like GRE work fine while VxLAN
5062 * offloads are configured in Skyhawk-R.
5063 */
5064 switch (vlan_get_protocol(skb)) {
5065 case htons(ETH_P_IP):
5066 l4_hdr = ip_hdr(skb)->protocol;
5067 break;
5068 case htons(ETH_P_IPV6):
5069 l4_hdr = ipv6_hdr(skb)->nexthdr;
5070 break;
5071 default:
5072 return features;
5073 }
5074
5075 if (l4_hdr != IPPROTO_UDP ||
5076 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5077 skb->inner_protocol != htons(ETH_P_TEB) ||
5078 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5079 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5080 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5081
5082 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005083}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305084#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305085
stephen hemmingere5686ad2012-01-05 19:10:25 +00005086static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005087 .ndo_open = be_open,
5088 .ndo_stop = be_close,
5089 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005090 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005091 .ndo_set_mac_address = be_mac_addr_set,
5092 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005093 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005094 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005095 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5096 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005097 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005098 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005099 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005100 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305101 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005102 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005103#ifdef CONFIG_NET_POLL_CONTROLLER
5104 .ndo_poll_controller = be_netpoll,
5105#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005106 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5107 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305108#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305109 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305110#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305111#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305112 .ndo_add_vxlan_port = be_add_vxlan_port,
5113 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005114 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305115#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005116};
5117
5118static void be_netdev_init(struct net_device *netdev)
5119{
5120 struct be_adapter *adapter = netdev_priv(netdev);
5121
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005122 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005123 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005124 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005125 if (be_multi_rxq(adapter))
5126 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005127
5128 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005129 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005130
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005131 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005132 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005133
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005134 netdev->priv_flags |= IFF_UNICAST_FLT;
5135
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005136 netdev->flags |= IFF_MULTICAST;
5137
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005138 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005139
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005140 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005141
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005142 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005143}
5144
Kalesh AP87ac1a52015-02-23 04:20:15 -05005145static void be_cleanup(struct be_adapter *adapter)
5146{
5147 struct net_device *netdev = adapter->netdev;
5148
5149 rtnl_lock();
5150 netif_device_detach(netdev);
5151 if (netif_running(netdev))
5152 be_close(netdev);
5153 rtnl_unlock();
5154
5155 be_clear(adapter);
5156}
5157
Kalesh AP484d76f2015-02-23 04:20:14 -05005158static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005159{
Kalesh APd0e1b312015-02-23 04:20:12 -05005160 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005161 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005162
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005163 status = be_setup(adapter);
5164 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005165 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005166
Kalesh APd0e1b312015-02-23 04:20:12 -05005167 if (netif_running(netdev)) {
5168 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005169 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005170 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005171 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005172
Kalesh APd0e1b312015-02-23 04:20:12 -05005173 netif_device_attach(netdev);
5174
Kalesh AP484d76f2015-02-23 04:20:14 -05005175 return 0;
5176}
5177
5178static int be_err_recover(struct be_adapter *adapter)
5179{
5180 struct device *dev = &adapter->pdev->dev;
5181 int status;
5182
5183 status = be_resume(adapter);
5184 if (status)
5185 goto err;
5186
Sathya Perla9fa465c2015-02-23 04:20:13 -05005187 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005188 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005189err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005190 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305191 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005192 else
5193 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005194
5195 return status;
5196}
5197
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005198static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005199{
5200 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005201 container_of(work, struct be_adapter,
5202 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005203 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005204
5205 be_detect_error(adapter);
5206
Kalesh APd0e1b312015-02-23 04:20:12 -05005207 if (adapter->hw_error) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005208 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005209
5210 /* As of now error recovery support is in Lancer only */
5211 if (lancer_chip(adapter))
5212 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005213 }
5214
Sathya Perla9fa465c2015-02-23 04:20:13 -05005215 /* Always attempt recovery on VFs */
5216 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005217 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005218}
5219
Vasundhara Volam21252372015-02-06 08:18:42 -05005220static void be_log_sfp_info(struct be_adapter *adapter)
5221{
5222 int status;
5223
5224 status = be_cmd_query_sfp_info(adapter);
5225 if (!status) {
5226 dev_err(&adapter->pdev->dev,
5227 "Unqualified SFP+ detected on %c from %s part no: %s",
5228 adapter->port_name, adapter->phy.vendor_name,
5229 adapter->phy.vendor_pn);
5230 }
5231 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5232}
5233
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005234static void be_worker(struct work_struct *work)
5235{
5236 struct be_adapter *adapter =
5237 container_of(work, struct be_adapter, work.work);
5238 struct be_rx_obj *rxo;
5239 int i;
5240
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005241 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005242 * mcc completions
5243 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005244 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005245 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005246 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005247 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005248 goto reschedule;
5249 }
5250
5251 if (!adapter->stats_cmd_sent) {
5252 if (lancer_chip(adapter))
5253 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305254 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005255 else
5256 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5257 }
5258
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305259 if (be_physfn(adapter) &&
5260 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005261 be_cmd_get_die_temperature(adapter);
5262
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005263 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305264 /* Replenish RX-queues starved due to memory
5265 * allocation failures.
5266 */
5267 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305268 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005269 }
5270
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005271 /* EQ-delay update for Skyhawk is done while notifying EQ */
5272 if (!skyhawk_chip(adapter))
5273 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005274
Vasundhara Volam21252372015-02-06 08:18:42 -05005275 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5276 be_log_sfp_info(adapter);
5277
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005278reschedule:
5279 adapter->work_counter++;
5280 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5281}
5282
Sathya Perla78fad34e2015-02-23 04:20:08 -05005283static void be_unmap_pci_bars(struct be_adapter *adapter)
5284{
5285 if (adapter->csr)
5286 pci_iounmap(adapter->pdev, adapter->csr);
5287 if (adapter->db)
5288 pci_iounmap(adapter->pdev, adapter->db);
5289}
5290
5291static int db_bar(struct be_adapter *adapter)
5292{
Kalesh AP18c57c72015-05-06 05:30:38 -04005293 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005294 return 0;
5295 else
5296 return 4;
5297}
5298
5299static int be_roce_map_pci_bars(struct be_adapter *adapter)
5300{
5301 if (skyhawk_chip(adapter)) {
5302 adapter->roce_db.size = 4096;
5303 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5304 db_bar(adapter));
5305 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5306 db_bar(adapter));
5307 }
5308 return 0;
5309}
5310
5311static int be_map_pci_bars(struct be_adapter *adapter)
5312{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005313 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005314 u8 __iomem *addr;
5315 u32 sli_intf;
5316
5317 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5318 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5319 SLI_INTF_FAMILY_SHIFT;
5320 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5321
5322 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005323 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005324 if (!adapter->csr)
5325 return -ENOMEM;
5326 }
5327
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005328 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005329 if (!addr)
5330 goto pci_map_err;
5331 adapter->db = addr;
5332
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005333 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5334 if (be_physfn(adapter)) {
5335 /* PCICFG is the 2nd BAR in BE2 */
5336 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5337 if (!addr)
5338 goto pci_map_err;
5339 adapter->pcicfg = addr;
5340 } else {
5341 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5342 }
5343 }
5344
Sathya Perla78fad34e2015-02-23 04:20:08 -05005345 be_roce_map_pci_bars(adapter);
5346 return 0;
5347
5348pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005349 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005350 be_unmap_pci_bars(adapter);
5351 return -ENOMEM;
5352}
5353
5354static void be_drv_cleanup(struct be_adapter *adapter)
5355{
5356 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5357 struct device *dev = &adapter->pdev->dev;
5358
5359 if (mem->va)
5360 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5361
5362 mem = &adapter->rx_filter;
5363 if (mem->va)
5364 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5365
5366 mem = &adapter->stats_cmd;
5367 if (mem->va)
5368 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5369}
5370
5371/* Allocate and initialize various fields in be_adapter struct */
5372static int be_drv_init(struct be_adapter *adapter)
5373{
5374 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5375 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5376 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5377 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5378 struct device *dev = &adapter->pdev->dev;
5379 int status = 0;
5380
5381 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5382 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5383 &mbox_mem_alloc->dma,
5384 GFP_KERNEL);
5385 if (!mbox_mem_alloc->va)
5386 return -ENOMEM;
5387
5388 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5389 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5390 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5391 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5392
5393 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5394 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5395 &rx_filter->dma, GFP_KERNEL);
5396 if (!rx_filter->va) {
5397 status = -ENOMEM;
5398 goto free_mbox;
5399 }
5400
5401 if (lancer_chip(adapter))
5402 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5403 else if (BE2_chip(adapter))
5404 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5405 else if (BE3_chip(adapter))
5406 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5407 else
5408 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5409 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5410 &stats_cmd->dma, GFP_KERNEL);
5411 if (!stats_cmd->va) {
5412 status = -ENOMEM;
5413 goto free_rx_filter;
5414 }
5415
5416 mutex_init(&adapter->mbox_lock);
5417 spin_lock_init(&adapter->mcc_lock);
5418 spin_lock_init(&adapter->mcc_cq_lock);
5419 init_completion(&adapter->et_cmd_compl);
5420
5421 pci_save_state(adapter->pdev);
5422
5423 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005424 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5425 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005426
5427 adapter->rx_fc = true;
5428 adapter->tx_fc = true;
5429
5430 /* Must be a power of 2 or else MODULO will BUG_ON */
5431 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005432
5433 return 0;
5434
5435free_rx_filter:
5436 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5437free_mbox:
5438 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5439 mbox_mem_alloc->dma);
5440 return status;
5441}
5442
5443static void be_remove(struct pci_dev *pdev)
5444{
5445 struct be_adapter *adapter = pci_get_drvdata(pdev);
5446
5447 if (!adapter)
5448 return;
5449
5450 be_roce_dev_remove(adapter);
5451 be_intr_set(adapter, false);
5452
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005453 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005454
5455 unregister_netdev(adapter->netdev);
5456
5457 be_clear(adapter);
5458
5459 /* tell fw we're done with firing cmds */
5460 be_cmd_fw_clean(adapter);
5461
5462 be_unmap_pci_bars(adapter);
5463 be_drv_cleanup(adapter);
5464
5465 pci_disable_pcie_error_reporting(pdev);
5466
5467 pci_release_regions(pdev);
5468 pci_disable_device(pdev);
5469
5470 free_netdev(adapter->netdev);
5471}
5472
Sathya Perlad3791422012-09-28 04:39:44 +00005473static char *mc_name(struct be_adapter *adapter)
5474{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305475 char *str = ""; /* default */
5476
5477 switch (adapter->mc_type) {
5478 case UMC:
5479 str = "UMC";
5480 break;
5481 case FLEX10:
5482 str = "FLEX10";
5483 break;
5484 case vNIC1:
5485 str = "vNIC-1";
5486 break;
5487 case nPAR:
5488 str = "nPAR";
5489 break;
5490 case UFP:
5491 str = "UFP";
5492 break;
5493 case vNIC2:
5494 str = "vNIC-2";
5495 break;
5496 default:
5497 str = "";
5498 }
5499
5500 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005501}
5502
5503static inline char *func_name(struct be_adapter *adapter)
5504{
5505 return be_physfn(adapter) ? "PF" : "VF";
5506}
5507
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005508static inline char *nic_name(struct pci_dev *pdev)
5509{
5510 switch (pdev->device) {
5511 case OC_DEVICE_ID1:
5512 return OC_NAME;
5513 case OC_DEVICE_ID2:
5514 return OC_NAME_BE;
5515 case OC_DEVICE_ID3:
5516 case OC_DEVICE_ID4:
5517 return OC_NAME_LANCER;
5518 case BE_DEVICE_ID2:
5519 return BE3_NAME;
5520 case OC_DEVICE_ID5:
5521 case OC_DEVICE_ID6:
5522 return OC_NAME_SH;
5523 default:
5524 return BE_NAME;
5525 }
5526}
5527
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005528static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005529{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005530 struct be_adapter *adapter;
5531 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005532 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005533
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305534 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5535
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005536 status = pci_enable_device(pdev);
5537 if (status)
5538 goto do_none;
5539
5540 status = pci_request_regions(pdev, DRV_NAME);
5541 if (status)
5542 goto disable_dev;
5543 pci_set_master(pdev);
5544
Sathya Perla7f640062012-06-05 19:37:20 +00005545 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305546 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005547 status = -ENOMEM;
5548 goto rel_reg;
5549 }
5550 adapter = netdev_priv(netdev);
5551 adapter->pdev = pdev;
5552 pci_set_drvdata(pdev, adapter);
5553 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005554 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005555
Russell King4c15c242013-06-26 23:49:11 +01005556 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005557 if (!status) {
5558 netdev->features |= NETIF_F_HIGHDMA;
5559 } else {
Russell King4c15c242013-06-26 23:49:11 +01005560 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005561 if (status) {
5562 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5563 goto free_netdev;
5564 }
5565 }
5566
Kalesh AP2f951a92014-09-12 17:39:21 +05305567 status = pci_enable_pcie_error_reporting(pdev);
5568 if (!status)
5569 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005570
Sathya Perla78fad34e2015-02-23 04:20:08 -05005571 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005572 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005573 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005574
Sathya Perla78fad34e2015-02-23 04:20:08 -05005575 status = be_drv_init(adapter);
5576 if (status)
5577 goto unmap_bars;
5578
Sathya Perla5fb379e2009-06-18 00:02:59 +00005579 status = be_setup(adapter);
5580 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005581 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005582
Sathya Perla3abcded2010-10-03 22:12:27 -07005583 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005584 status = register_netdev(netdev);
5585 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005586 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005587
Parav Pandit045508a2012-03-26 14:27:13 +00005588 be_roce_dev_add(adapter);
5589
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005590 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005591
Sathya Perlad3791422012-09-28 04:39:44 +00005592 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005593 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005594
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005595 return 0;
5596
Sathya Perla5fb379e2009-06-18 00:02:59 +00005597unsetup:
5598 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005599drv_cleanup:
5600 be_drv_cleanup(adapter);
5601unmap_bars:
5602 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005603free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005604 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005605rel_reg:
5606 pci_release_regions(pdev);
5607disable_dev:
5608 pci_disable_device(pdev);
5609do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005610 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005611 return status;
5612}
5613
5614static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5615{
5616 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005617
Suresh Reddy76a9e082014-01-15 13:23:40 +05305618 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005619 be_setup_wol(adapter, true);
5620
Ajit Khaparded4360d62013-11-22 12:51:09 -06005621 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005622 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005623
Kalesh AP87ac1a52015-02-23 04:20:15 -05005624 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005625
5626 pci_save_state(pdev);
5627 pci_disable_device(pdev);
5628 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5629 return 0;
5630}
5631
Kalesh AP484d76f2015-02-23 04:20:14 -05005632static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005633{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005634 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005635 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005636
5637 status = pci_enable_device(pdev);
5638 if (status)
5639 return status;
5640
Yijing Wang1ca01512013-06-27 20:53:42 +08005641 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005642 pci_restore_state(pdev);
5643
Kalesh AP484d76f2015-02-23 04:20:14 -05005644 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005645 if (status)
5646 return status;
5647
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005648 be_schedule_err_detection(adapter);
5649
Suresh Reddy76a9e082014-01-15 13:23:40 +05305650 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005651 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005652
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005653 return 0;
5654}
5655
Sathya Perla82456b02010-02-17 01:35:37 +00005656/*
5657 * An FLR will stop BE from DMAing any data.
5658 */
5659static void be_shutdown(struct pci_dev *pdev)
5660{
5661 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005662
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005663 if (!adapter)
5664 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005665
Devesh Sharmad114f992014-06-10 19:32:15 +05305666 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005667 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005668 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005669
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005670 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005671
Ajit Khaparde57841862011-04-06 18:08:43 +00005672 be_cmd_reset_function(adapter);
5673
Sathya Perla82456b02010-02-17 01:35:37 +00005674 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005675}
5676
Sathya Perlacf588472010-02-14 21:22:01 +00005677static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305678 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005679{
5680 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005681
5682 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5683
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005684 if (!adapter->eeh_error) {
5685 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005686
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005687 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005688
Kalesh AP87ac1a52015-02-23 04:20:15 -05005689 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005690 }
Sathya Perlacf588472010-02-14 21:22:01 +00005691
5692 if (state == pci_channel_io_perm_failure)
5693 return PCI_ERS_RESULT_DISCONNECT;
5694
5695 pci_disable_device(pdev);
5696
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005697 /* The error could cause the FW to trigger a flash debug dump.
5698 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005699 * can cause it not to recover; wait for it to finish.
5700 * Wait only for first function as it is needed only once per
5701 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005702 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005703 if (pdev->devfn == 0)
5704 ssleep(30);
5705
Sathya Perlacf588472010-02-14 21:22:01 +00005706 return PCI_ERS_RESULT_NEED_RESET;
5707}
5708
5709static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5710{
5711 struct be_adapter *adapter = pci_get_drvdata(pdev);
5712 int status;
5713
5714 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005715
5716 status = pci_enable_device(pdev);
5717 if (status)
5718 return PCI_ERS_RESULT_DISCONNECT;
5719
5720 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005721 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005722 pci_restore_state(pdev);
5723
5724 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005725 dev_info(&adapter->pdev->dev,
5726 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005727 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005728 if (status)
5729 return PCI_ERS_RESULT_DISCONNECT;
5730
Sathya Perlad6b6d982012-09-05 01:56:48 +00005731 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005732 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005733 return PCI_ERS_RESULT_RECOVERED;
5734}
5735
5736static void be_eeh_resume(struct pci_dev *pdev)
5737{
5738 int status = 0;
5739 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005740
5741 dev_info(&adapter->pdev->dev, "EEH resume\n");
5742
5743 pci_save_state(pdev);
5744
Kalesh AP484d76f2015-02-23 04:20:14 -05005745 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005746 if (status)
5747 goto err;
5748
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005749 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005750 return;
5751err:
5752 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005753}
5754
Vasundhara Volamace40af2015-03-04 00:44:34 -05005755static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5756{
5757 struct be_adapter *adapter = pci_get_drvdata(pdev);
5758 u16 num_vf_qs;
5759 int status;
5760
5761 if (!num_vfs)
5762 be_vf_clear(adapter);
5763
5764 adapter->num_vfs = num_vfs;
5765
5766 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5767 dev_warn(&pdev->dev,
5768 "Cannot disable VFs while they are assigned\n");
5769 return -EBUSY;
5770 }
5771
5772 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5773 * are equally distributed across the max-number of VFs. The user may
5774 * request only a subset of the max-vfs to be enabled.
5775 * Based on num_vfs, redistribute the resources across num_vfs so that
5776 * each VF will have access to more number of resources.
5777 * This facility is not available in BE3 FW.
5778 * Also, this is done by FW in Lancer chip.
5779 */
5780 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5781 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5782 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5783 adapter->num_vfs, num_vf_qs);
5784 if (status)
5785 dev_err(&pdev->dev,
5786 "Failed to optimize SR-IOV resources\n");
5787 }
5788
5789 status = be_get_resources(adapter);
5790 if (status)
5791 return be_cmd_status(status);
5792
5793 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5794 rtnl_lock();
5795 status = be_update_queues(adapter);
5796 rtnl_unlock();
5797 if (status)
5798 return be_cmd_status(status);
5799
5800 if (adapter->num_vfs)
5801 status = be_vf_setup(adapter);
5802
5803 if (!status)
5804 return adapter->num_vfs;
5805
5806 return 0;
5807}
5808
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005809static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005810 .error_detected = be_eeh_err_detected,
5811 .slot_reset = be_eeh_reset,
5812 .resume = be_eeh_resume,
5813};
5814
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005815static struct pci_driver be_driver = {
5816 .name = DRV_NAME,
5817 .id_table = be_dev_ids,
5818 .probe = be_probe,
5819 .remove = be_remove,
5820 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005821 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005822 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005823 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005824 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005825};
5826
5827static int __init be_init_module(void)
5828{
Joe Perches8e95a202009-12-03 07:58:21 +00005829 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5830 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005831 printk(KERN_WARNING DRV_NAME
5832 " : Module param rx_frag_size must be 2048/4096/8192."
5833 " Using 2048\n");
5834 rx_frag_size = 2048;
5835 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005836
Vasundhara Volamace40af2015-03-04 00:44:34 -05005837 if (num_vfs > 0) {
5838 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5839 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5840 }
5841
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005842 return pci_register_driver(&be_driver);
5843}
5844module_init(be_init_module);
5845
5846static void __exit be_exit_module(void)
5847{
5848 pci_unregister_driver(&be_driver);
5849}
5850module_exit(be_exit_module);