blob: c996dd76f5461253dc841ebf2fe539f813954b2b [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Sathya Perla5a712c12013-07-23 15:24:59 +0530276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the
279 * FILTMGMT privilege. This failure is OK, only if the PF programmed
280 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle, &adapter->pmac_id[0], 0);
284 if (!status) {
285 curr_pmac_id = adapter->pmac_id[0];
286
287 /* Delete the old programmed MAC. This call may fail if the
288 * old MAC was already deleted by the PF driver.
289 */
290 if (adapter->pmac_id[0] != old_pmac_id)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000293 }
294
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 /* Decide if the new MAC is successfully activated only after
296 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530298 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
299 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000300 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302
Sathya Perla5a712c12013-07-23 15:24:59 +0530303 /* The MAC change did not happen, either due to lack of privilege
304 * or PF didn't pre-provision.
305 */
dingtianhong61d23e92013-12-30 15:40:43 +0800306 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 status = -EPERM;
308 goto err;
309 }
310
Somnath Koture3a7ae22011-10-27 07:14:05 +0000311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530312 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000313 return 0;
314err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 return status;
317}
318
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319/* BE2 supports only v0 cmd */
320static void *hw_stats_from_cmd(struct be_adapter *adapter)
321{
322 if (BE2_chip(adapter)) {
323 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500326 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000327 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else {
331 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000334 }
335}
336
337/* BE2 supports only v0 cmd */
338static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
339{
340 if (BE2_chip(adapter)) {
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500344 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000345 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else {
349 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000352 }
353}
354
355static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000357 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
358 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
359 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 &rxf_stats->port[adapter->port_num];
362 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000363
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_pause_frames = port_stats->rx_pause_frames;
366 drvs->rx_crc_errors = port_stats->rx_crc_errors;
367 drvs->rx_control_frames = port_stats->rx_control_frames;
368 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
370 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
371 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
372 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
373 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
374 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
375 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
376 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
377 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
378 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_dropped_header_too_small =
381 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000382 drvs->rx_address_filtered =
383 port_stats->rx_address_filtered +
384 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_alignment_symbol_errors =
386 port_stats->rx_alignment_symbol_errors;
387
388 drvs->tx_pauseframes = port_stats->tx_pauseframes;
389 drvs->tx_controlframes = port_stats->tx_controlframes;
390
391 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 drvs->forwarded_packets = rxf_stats->forwarded_packets;
398 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
400 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
402}
403
Sathya Perlaca34fe32012-11-06 17:48:56 +0000404static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000406 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
407 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
408 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 &rxf_stats->port[adapter->port_num];
411 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
Sathya Perlaac124ff2011-07-25 19:10:14 +0000413 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000414 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
415 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_pause_frames = port_stats->rx_pause_frames;
417 drvs->rx_crc_errors = port_stats->rx_crc_errors;
418 drvs->rx_control_frames = port_stats->rx_control_frames;
419 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
420 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
421 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
422 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
423 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
424 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
429 drvs->rx_dropped_header_too_small =
430 port_stats->rx_dropped_header_too_small;
431 drvs->rx_input_fifo_overflow_drop =
432 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000433 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_alignment_symbol_errors =
435 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->tx_pauseframes = port_stats->tx_pauseframes;
438 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000439 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000440 drvs->jabber_events = port_stats->jabber_events;
441 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->forwarded_packets = rxf_stats->forwarded_packets;
444 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000445 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
446 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
448}
449
Ajit Khaparde61000862013-10-03 16:16:33 -0500450static void populate_be_v2_stats(struct be_adapter *adapter)
451{
452 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
453 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
454 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
455 struct be_port_rxf_stats_v2 *port_stats =
456 &rxf_stats->port[adapter->port_num];
457 struct be_drv_stats *drvs = &adapter->drv_stats;
458
459 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
460 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
461 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
462 drvs->rx_pause_frames = port_stats->rx_pause_frames;
463 drvs->rx_crc_errors = port_stats->rx_crc_errors;
464 drvs->rx_control_frames = port_stats->rx_control_frames;
465 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
466 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
467 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
468 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
469 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
470 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
471 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
472 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
473 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
474 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
475 drvs->rx_dropped_header_too_small =
476 port_stats->rx_dropped_header_too_small;
477 drvs->rx_input_fifo_overflow_drop =
478 port_stats->rx_input_fifo_overflow_drop;
479 drvs->rx_address_filtered = port_stats->rx_address_filtered;
480 drvs->rx_alignment_symbol_errors =
481 port_stats->rx_alignment_symbol_errors;
482 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
483 drvs->tx_pauseframes = port_stats->tx_pauseframes;
484 drvs->tx_controlframes = port_stats->tx_controlframes;
485 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
486 drvs->jabber_events = port_stats->jabber_events;
487 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
488 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
489 drvs->forwarded_packets = rxf_stats->forwarded_packets;
490 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
491 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
492 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
493 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530494 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500495 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
496 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
497 drvs->rx_roce_frames = port_stats->roce_frames_received;
498 drvs->roce_drops_crc = port_stats->roce_drops_crc;
499 drvs->roce_drops_payload_len =
500 port_stats->roce_drops_payload_len;
501 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500502}
503
Selvin Xavier005d5692011-05-16 07:36:35 +0000504static void populate_lancer_stats(struct be_adapter *adapter)
505{
Selvin Xavier005d5692011-05-16 07:36:35 +0000506 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530507 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000508
509 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
510 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
511 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
512 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000513 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000514 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000515 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
516 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
517 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
518 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
519 drvs->rx_dropped_tcp_length =
520 pport_stats->rx_dropped_invalid_tcp_length;
521 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
522 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
523 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
524 drvs->rx_dropped_header_too_small =
525 pport_stats->rx_dropped_header_too_small;
526 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000527 drvs->rx_address_filtered =
528 pport_stats->rx_address_filtered +
529 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000530 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
533 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000534 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000535 drvs->forwarded_packets = pport_stats->num_forwards_lo;
536 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000537 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000538 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000539}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000540
Sathya Perla09c1c682011-08-22 19:41:53 +0000541static void accumulate_16bit_val(u32 *acc, u16 val)
542{
543#define lo(x) (x & 0xFFFF)
544#define hi(x) (x & 0xFFFF0000)
545 bool wrapped = val < lo(*acc);
546 u32 newacc = hi(*acc) + val;
547
548 if (wrapped)
549 newacc += 65536;
550 ACCESS_ONCE(*acc) = newacc;
551}
552
Jingoo Han4188e7d2013-08-05 18:02:02 +0900553static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530554 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000555{
556 if (!BEx_chip(adapter))
557 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
558 else
559 /* below erx HW counter can actually wrap around after
560 * 65535. Driver accumulates a 32-bit value
561 */
562 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
563 (u16)erx_stat);
564}
565
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000566void be_parse_stats(struct be_adapter *adapter)
567{
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569 struct be_rx_obj *rxo;
570 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000571 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 if (lancer_chip(adapter)) {
574 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000575 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 if (BE2_chip(adapter))
577 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500578 else if (BE3_chip(adapter))
579 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500581 else
582 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000583
Ajit Khaparde61000862013-10-03 16:16:33 -0500584 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000585 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000586 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
587 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000588 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000589 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590}
591
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530593 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000595 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000596 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700597 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000598 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64 pkts, bytes;
600 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530605
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700607 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 pkts = rx_stats(rxo)->rx_pkts;
609 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700610 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 stats->rx_packets += pkts;
612 stats->rx_bytes += bytes;
613 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
614 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
615 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700616 }
617
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000619 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530620
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700622 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 pkts = tx_stats(txo)->tx_pkts;
624 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700625 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 stats->tx_packets += pkts;
627 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000628 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629
630 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000632 drvs->rx_alignment_symbol_errors +
633 drvs->rx_in_range_errors +
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long +
636 drvs->rx_dropped_too_small +
637 drvs->rx_dropped_too_short +
638 drvs->rx_dropped_header_too_small +
639 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000644 drvs->rx_out_range_errors +
645 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000646
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
649 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000650 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 /* receiver fifo overrun */
653 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000655 drvs->rx_input_fifo_overflow_drop +
656 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000657 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658}
659
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 struct net_device *netdev = adapter->netdev;
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000665 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000666 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530669 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 netif_carrier_on(netdev);
671 else
672 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200673
674 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675}
676
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500677static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla3c8def92011-06-12 20:01:58 +0000679 struct be_tx_stats *stats = tx_stats(txo);
680
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000682 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500683 stats->tx_bytes += skb->len;
684 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000685 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686}
687
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500688/* Returns number of WRBs needed for the skb */
689static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500691 /* +1 for the header wrb */
692 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693}
694
695static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
696{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500697 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
698 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
699 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
700 wrb->rsvd0 = 0;
701}
702
703/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
704 * to avoid the swap and shift/mask operations in wrb_fill().
705 */
706static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
707{
708 wrb->frag_pa_hi = 0;
709 wrb->frag_pa_lo = 0;
710 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000711 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712}
713
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000714static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530715 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716{
717 u8 vlan_prio;
718 u16 vlan_tag;
719
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100720 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000721 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
722 /* If vlan priority provided by OS is NOT in available bmap */
723 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
724 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
725 adapter->recommended_prio;
726
727 return vlan_tag;
728}
729
Sathya Perlac9c47142014-03-27 10:46:19 +0530730/* Used only for IP tunnel packets */
731static u16 skb_inner_ip_proto(struct sk_buff *skb)
732{
733 return (inner_ip_hdr(skb)->version == 4) ?
734 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
735}
736
737static u16 skb_ip_proto(struct sk_buff *skb)
738{
739 return (ip_hdr(skb)->version == 4) ?
740 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
741}
742
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530743static inline bool be_is_txq_full(struct be_tx_obj *txo)
744{
745 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
746}
747
748static inline bool be_can_txq_wake(struct be_tx_obj *txo)
749{
750 return atomic_read(&txo->q.used) < txo->q.len / 2;
751}
752
753static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
754{
755 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
756}
757
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530758static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
759 struct sk_buff *skb,
760 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000764 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765 BE_WRB_F_SET(wrb_params->features, LSO, 1);
766 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000767 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530768 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530770 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530772 proto = skb_inner_ip_proto(skb);
773 } else {
774 proto = skb_ip_proto(skb);
775 }
776 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530778 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530779 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 }
781
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100782 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
784 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 }
786
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530787 BE_WRB_F_SET(wrb_params->features, CRC, 1);
788}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500789
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530790static void wrb_fill_hdr(struct be_adapter *adapter,
791 struct be_eth_hdr_wrb *hdr,
792 struct be_wrb_params *wrb_params,
793 struct sk_buff *skb)
794{
795 memset(hdr, 0, sizeof(*hdr));
796
797 SET_TX_WRB_HDR_BITS(crc, hdr,
798 BE_WRB_F_GET(wrb_params->features, CRC));
799 SET_TX_WRB_HDR_BITS(ipcs, hdr,
800 BE_WRB_F_GET(wrb_params->features, IPCS));
801 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
802 BE_WRB_F_GET(wrb_params->features, TCPCS));
803 SET_TX_WRB_HDR_BITS(udpcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, UDPCS));
805
806 SET_TX_WRB_HDR_BITS(lso, hdr,
807 BE_WRB_F_GET(wrb_params->features, LSO));
808 SET_TX_WRB_HDR_BITS(lso6, hdr,
809 BE_WRB_F_GET(wrb_params->features, LSO6));
810 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
811
812 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
813 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500814 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530815 SET_TX_WRB_HDR_BITS(event, hdr,
816 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
817 SET_TX_WRB_HDR_BITS(vlan, hdr,
818 BE_WRB_F_GET(wrb_params->features, VLAN));
819 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
820
821 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
822 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530823 SET_TX_WRB_HDR_BITS(mgmt, hdr,
824 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825}
826
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000827static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530828 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000829{
830 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500831 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000832
Sathya Perla7101e112010-03-22 20:41:12 +0000833
Sathya Perlaf986afc2015-02-06 08:18:43 -0500834 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
835 (u64)le32_to_cpu(wrb->frag_pa_lo);
836 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000837 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500838 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000839 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500840 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000841 }
842}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530844/* Grab a WRB header for xmit */
845static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530847 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530849 queue_head_inc(&txo->q);
850 return head;
851}
852
853/* Set up the WRB header for xmit */
854static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
855 struct be_tx_obj *txo,
856 struct be_wrb_params *wrb_params,
857 struct sk_buff *skb, u16 head)
858{
859 u32 num_frags = skb_wrb_cnt(skb);
860 struct be_queue_info *txq = &txo->q;
861 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
862
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530863 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500864 be_dws_cpu_to_le(hdr, sizeof(*hdr));
865
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500866 BUG_ON(txo->sent_skb_list[head]);
867 txo->sent_skb_list[head] = skb;
868 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530869 atomic_add(num_frags, &txq->used);
870 txo->last_req_wrb_cnt = num_frags;
871 txo->pend_wrb_cnt += num_frags;
872}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530874/* Setup a WRB fragment (buffer descriptor) for xmit */
875static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
876 int len)
877{
878 struct be_eth_wrb *wrb;
879 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881 wrb = queue_head_node(txq);
882 wrb_fill(wrb, busaddr, len);
883 queue_head_inc(txq);
884}
885
886/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
887 * was invoked. The producer index is restored to the previous packet and the
888 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
889 */
890static void be_xmit_restore(struct be_adapter *adapter,
891 struct be_tx_obj *txo, u16 head, bool map_single,
892 u32 copied)
893{
894 struct device *dev;
895 struct be_eth_wrb *wrb;
896 struct be_queue_info *txq = &txo->q;
897
898 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500899 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530900
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500901 /* skip the first wrb (hdr); it's not mapped */
902 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000903 while (copied) {
904 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000905 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000906 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500907 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000908 queue_head_inc(txq);
909 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530910
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500911 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530912}
913
914/* Enqueue the given packet for transmit. This routine allocates WRBs for the
915 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
916 * of WRBs used up by the packet.
917 */
918static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
919 struct sk_buff *skb,
920 struct be_wrb_params *wrb_params)
921{
922 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
923 struct device *dev = &adapter->pdev->dev;
924 struct be_queue_info *txq = &txo->q;
925 bool map_single = false;
926 u16 head = txq->head;
927 dma_addr_t busaddr;
928 int len;
929
930 head = be_tx_get_wrb_hdr(txo);
931
932 if (skb->len > skb->data_len) {
933 len = skb_headlen(skb);
934
935 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
936 if (dma_mapping_error(dev, busaddr))
937 goto dma_err;
938 map_single = true;
939 be_tx_setup_wrb_frag(txo, busaddr, len);
940 copied += len;
941 }
942
943 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
944 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
945 len = skb_frag_size(frag);
946
947 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
949 goto dma_err;
950 be_tx_setup_wrb_frag(txo, busaddr, len);
951 copied += len;
952 }
953
954 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
955
956 be_tx_stats_update(txo, skb);
957 return wrb_cnt;
958
959dma_err:
960 adapter->drv_stats.dma_map_errors++;
961 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000962 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963}
964
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500965static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
966{
967 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
968}
969
Somnath Kotur93040ae2012-06-26 22:32:10 +0000970static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000971 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530972 struct be_wrb_params
973 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000974{
975 u16 vlan_tag = 0;
976
977 skb = skb_share_check(skb, GFP_ATOMIC);
978 if (unlikely(!skb))
979 return skb;
980
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100981 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000982 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530983
984 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
985 if (!vlan_tag)
986 vlan_tag = adapter->pvid;
987 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
988 * skip VLAN insertion
989 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530990 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530991 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000992
993 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100994 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
995 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000996 if (unlikely(!skb))
997 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000998 skb->vlan_tci = 0;
999 }
1000
1001 /* Insert the outer VLAN, if any */
1002 if (adapter->qnq_vid) {
1003 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001004 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1005 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001006 if (unlikely(!skb))
1007 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301008 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001009 }
1010
Somnath Kotur93040ae2012-06-26 22:32:10 +00001011 return skb;
1012}
1013
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001014static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1015{
1016 struct ethhdr *eh = (struct ethhdr *)skb->data;
1017 u16 offset = ETH_HLEN;
1018
1019 if (eh->h_proto == htons(ETH_P_IPV6)) {
1020 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1021
1022 offset += sizeof(struct ipv6hdr);
1023 if (ip6h->nexthdr != NEXTHDR_TCP &&
1024 ip6h->nexthdr != NEXTHDR_UDP) {
1025 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301026 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001027
1028 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1029 if (ehdr->hdrlen == 0xff)
1030 return true;
1031 }
1032 }
1033 return false;
1034}
1035
1036static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1037{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001038 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001039}
1040
Sathya Perla748b5392014-05-09 13:29:13 +05301041static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001043 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001044}
1045
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301046static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1047 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301048 struct be_wrb_params
1049 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001051 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001052 unsigned int eth_hdr_len;
1053 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001054
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001055 /* For padded packets, BE HW modifies tot_len field in IP header
1056 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001057 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001058 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001059 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1060 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001061 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001062 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001063 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001064 ip = (struct iphdr *)ip_hdr(skb);
1065 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1066 }
1067
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001068 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301069 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001070 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301071 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001072 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301073 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001074
Somnath Kotur93040ae2012-06-26 22:32:10 +00001075 /* HW has a bug wherein it will calculate CSUM for VLAN
1076 * pkts even though it is disabled.
1077 * Manually insert VLAN in pkt.
1078 */
1079 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001080 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301081 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001082 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301083 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001084 }
1085
1086 /* HW may lockup when VLAN HW tagging is requested on
1087 * certain ipv6 packets. Drop such pkts if the HW workaround to
1088 * skip HW tagging is not enabled by FW.
1089 */
1090 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301091 (adapter->pvid || adapter->qnq_vid) &&
1092 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001093 goto tx_drop;
1094
1095 /* Manual VLAN tag insertion to prevent:
1096 * ASIC lockup when the ASIC inserts VLAN tag into
1097 * certain ipv6 packets. Insert VLAN tags in driver,
1098 * and set event, completion, vlan bits accordingly
1099 * in the Tx WRB.
1100 */
1101 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1102 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301103 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001104 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301105 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001106 }
1107
Sathya Perlaee9c7992013-05-22 23:04:55 +00001108 return skb;
1109tx_drop:
1110 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301111err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001112 return NULL;
1113}
1114
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301115static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1116 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301117 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301118{
1119 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1120 * less may cause a transmit stall on that port. So the work-around is
1121 * to pad short packets (<= 32 bytes) to a 36-byte length.
1122 */
1123 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001124 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301125 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301126 }
1127
1128 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301129 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130 if (!skb)
1131 return NULL;
1132 }
1133
1134 return skb;
1135}
1136
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001137static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1138{
1139 struct be_queue_info *txq = &txo->q;
1140 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1141
1142 /* Mark the last request eventable if it hasn't been marked already */
1143 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1144 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1145
1146 /* compose a dummy wrb if there are odd set of wrbs to notify */
1147 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001148 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001149 queue_head_inc(txq);
1150 atomic_inc(&txq->used);
1151 txo->pend_wrb_cnt++;
1152 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1153 TX_HDR_WRB_NUM_SHIFT);
1154 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1155 TX_HDR_WRB_NUM_SHIFT);
1156 }
1157 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1158 txo->pend_wrb_cnt = 0;
1159}
1160
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301161/* OS2BMC related */
1162
1163#define DHCP_CLIENT_PORT 68
1164#define DHCP_SERVER_PORT 67
1165#define NET_BIOS_PORT1 137
1166#define NET_BIOS_PORT2 138
1167#define DHCPV6_RAS_PORT 547
1168
1169#define is_mc_allowed_on_bmc(adapter, eh) \
1170 (!is_multicast_filt_enabled(adapter) && \
1171 is_multicast_ether_addr(eh->h_dest) && \
1172 !is_broadcast_ether_addr(eh->h_dest))
1173
1174#define is_bc_allowed_on_bmc(adapter, eh) \
1175 (!is_broadcast_filt_enabled(adapter) && \
1176 is_broadcast_ether_addr(eh->h_dest))
1177
1178#define is_arp_allowed_on_bmc(adapter, skb) \
1179 (is_arp(skb) && is_arp_filt_enabled(adapter))
1180
1181#define is_broadcast_packet(eh, adapter) \
1182 (is_multicast_ether_addr(eh->h_dest) && \
1183 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1184
1185#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1186
1187#define is_arp_filt_enabled(adapter) \
1188 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1189
1190#define is_dhcp_client_filt_enabled(adapter) \
1191 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1192
1193#define is_dhcp_srvr_filt_enabled(adapter) \
1194 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1195
1196#define is_nbios_filt_enabled(adapter) \
1197 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1198
1199#define is_ipv6_na_filt_enabled(adapter) \
1200 (adapter->bmc_filt_mask & \
1201 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1202
1203#define is_ipv6_ra_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1205
1206#define is_ipv6_ras_filt_enabled(adapter) \
1207 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1208
1209#define is_broadcast_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1211
1212#define is_multicast_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1214
1215static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1216 struct sk_buff **skb)
1217{
1218 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1219 bool os2bmc = false;
1220
1221 if (!be_is_os2bmc_enabled(adapter))
1222 goto done;
1223
1224 if (!is_multicast_ether_addr(eh->h_dest))
1225 goto done;
1226
1227 if (is_mc_allowed_on_bmc(adapter, eh) ||
1228 is_bc_allowed_on_bmc(adapter, eh) ||
1229 is_arp_allowed_on_bmc(adapter, (*skb))) {
1230 os2bmc = true;
1231 goto done;
1232 }
1233
1234 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1235 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1236 u8 nexthdr = hdr->nexthdr;
1237
1238 if (nexthdr == IPPROTO_ICMPV6) {
1239 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1240
1241 switch (icmp6->icmp6_type) {
1242 case NDISC_ROUTER_ADVERTISEMENT:
1243 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1244 goto done;
1245 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1246 os2bmc = is_ipv6_na_filt_enabled(adapter);
1247 goto done;
1248 default:
1249 break;
1250 }
1251 }
1252 }
1253
1254 if (is_udp_pkt((*skb))) {
1255 struct udphdr *udp = udp_hdr((*skb));
1256
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001257 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301258 case DHCP_CLIENT_PORT:
1259 os2bmc = is_dhcp_client_filt_enabled(adapter);
1260 goto done;
1261 case DHCP_SERVER_PORT:
1262 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1263 goto done;
1264 case NET_BIOS_PORT1:
1265 case NET_BIOS_PORT2:
1266 os2bmc = is_nbios_filt_enabled(adapter);
1267 goto done;
1268 case DHCPV6_RAS_PORT:
1269 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1270 goto done;
1271 default:
1272 break;
1273 }
1274 }
1275done:
1276 /* For packets over a vlan, which are destined
1277 * to BMC, asic expects the vlan to be inline in the packet.
1278 */
1279 if (os2bmc)
1280 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1281
1282 return os2bmc;
1283}
1284
Sathya Perlaee9c7992013-05-22 23:04:55 +00001285static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1286{
1287 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001288 u16 q_idx = skb_get_queue_mapping(skb);
1289 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301290 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301291 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001292 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001293
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301294 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001295 if (unlikely(!skb))
1296 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001297
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301298 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1299
1300 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001301 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001302 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001303 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001305
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301306 /* if os2bmc is enabled and if the pkt is destined to bmc,
1307 * enqueue the pkt a 2nd time with mgmt bit set.
1308 */
1309 if (be_send_pkt_to_bmc(adapter, &skb)) {
1310 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1311 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1312 if (unlikely(!wrb_cnt))
1313 goto drop;
1314 else
1315 skb_get(skb);
1316 }
1317
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301318 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001319 netif_stop_subqueue(netdev, q_idx);
1320 tx_stats(txo)->tx_stops++;
1321 }
1322
1323 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1324 be_xmit_flush(adapter, txo);
1325
1326 return NETDEV_TX_OK;
1327drop:
1328 tx_stats(txo)->tx_drv_drops++;
1329 /* Flush the already enqueued tx requests */
1330 if (flush && txo->pend_wrb_cnt)
1331 be_xmit_flush(adapter, txo);
1332
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001333 return NETDEV_TX_OK;
1334}
1335
1336static int be_change_mtu(struct net_device *netdev, int new_mtu)
1337{
1338 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301339 struct device *dev = &adapter->pdev->dev;
1340
1341 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1342 dev_info(dev, "MTU must be between %d and %d bytes\n",
1343 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 return -EINVAL;
1345 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301346
1347 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301348 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 netdev->mtu = new_mtu;
1350 return 0;
1351}
1352
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001353static inline bool be_in_all_promisc(struct be_adapter *adapter)
1354{
1355 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1356 BE_IF_FLAGS_ALL_PROMISCUOUS;
1357}
1358
1359static int be_set_vlan_promisc(struct be_adapter *adapter)
1360{
1361 struct device *dev = &adapter->pdev->dev;
1362 int status;
1363
1364 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1365 return 0;
1366
1367 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1368 if (!status) {
1369 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1370 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1371 } else {
1372 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1373 }
1374 return status;
1375}
1376
1377static int be_clear_vlan_promisc(struct be_adapter *adapter)
1378{
1379 struct device *dev = &adapter->pdev->dev;
1380 int status;
1381
1382 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1383 if (!status) {
1384 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1385 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1386 }
1387 return status;
1388}
1389
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001391 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1392 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 */
Sathya Perla10329df2012-06-05 19:37:18 +00001394static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395{
Vasundhara Volam50762662014-09-12 17:39:14 +05301396 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001397 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301398 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001399 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001400
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001401 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001402 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001403 return 0;
1404
Sathya Perla92bf14a2013-08-27 16:57:32 +05301405 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001406 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001407
1408 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301409 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1410 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001411
Vasundhara Volam435452a2015-03-20 06:28:23 -04001412 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001413 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001414 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001415 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001416 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1417 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301418 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001419 return be_set_vlan_promisc(adapter);
1420 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1421 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001423 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424}
1425
Patrick McHardy80d5c362013-04-19 02:04:28 +00001426static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427{
1428 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001429 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001431 /* Packets with VID 0 are always received by Lancer by default */
1432 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301433 return status;
1434
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301435 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301436 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001437
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301438 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301439 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001440
Somnath Kotura6b74e02014-01-21 15:50:55 +05301441 status = be_vid_config(adapter);
1442 if (status) {
1443 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301444 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301445 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301446
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001447 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Patrick McHardy80d5c362013-04-19 02:04:28 +00001450static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451{
1452 struct be_adapter *adapter = netdev_priv(netdev);
1453
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001454 /* Packets with VID 0 are always received by Lancer by default */
1455 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301456 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001457
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301458 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301459 adapter->vlans_added--;
1460
1461 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462}
1463
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001464static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301465{
Sathya Perlaac34b742015-02-06 08:18:40 -05001466 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001467 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1468}
1469
1470static void be_set_all_promisc(struct be_adapter *adapter)
1471{
1472 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1473 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1474}
1475
1476static void be_set_mc_promisc(struct be_adapter *adapter)
1477{
1478 int status;
1479
1480 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1481 return;
1482
1483 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1484 if (!status)
1485 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1486}
1487
1488static void be_set_mc_list(struct be_adapter *adapter)
1489{
1490 int status;
1491
1492 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1493 if (!status)
1494 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1495 else
1496 be_set_mc_promisc(adapter);
1497}
1498
1499static void be_set_uc_list(struct be_adapter *adapter)
1500{
1501 struct netdev_hw_addr *ha;
1502 int i = 1; /* First slot is claimed by the Primary MAC */
1503
1504 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1505 be_cmd_pmac_del(adapter, adapter->if_handle,
1506 adapter->pmac_id[i], 0);
1507
1508 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1509 be_set_all_promisc(adapter);
1510 return;
1511 }
1512
1513 netdev_for_each_uc_addr(ha, adapter->netdev) {
1514 adapter->uc_macs++; /* First slot is for Primary MAC */
1515 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1516 &adapter->pmac_id[adapter->uc_macs], 0);
1517 }
1518}
1519
1520static void be_clear_uc_list(struct be_adapter *adapter)
1521{
1522 int i;
1523
1524 for (i = 1; i < (adapter->uc_macs + 1); i++)
1525 be_cmd_pmac_del(adapter, adapter->if_handle,
1526 adapter->pmac_id[i], 0);
1527 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301528}
1529
Sathya Perlaa54769f2011-10-24 02:45:00 +00001530static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531{
1532 struct be_adapter *adapter = netdev_priv(netdev);
1533
1534 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001535 be_set_all_promisc(adapter);
1536 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001538
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001539 /* Interface was previously in promiscuous mode; disable it */
1540 if (be_in_all_promisc(adapter)) {
1541 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001542 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001543 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001544 }
1545
Sathya Perlae7b909a2009-11-22 22:01:10 +00001546 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001547 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001548 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1549 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301550 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001551 }
Kalesh APa0794882014-05-30 19:06:23 +05301552
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001553 if (netdev_uc_count(netdev) != adapter->uc_macs)
1554 be_set_uc_list(adapter);
1555
1556 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557}
1558
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001559static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1560{
1561 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001562 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001563 int status;
1564
Sathya Perla11ac75e2011-12-13 00:58:50 +00001565 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001566 return -EPERM;
1567
Sathya Perla11ac75e2011-12-13 00:58:50 +00001568 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001569 return -EINVAL;
1570
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301571 /* Proceed further only if user provided MAC is different
1572 * from active MAC
1573 */
1574 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1575 return 0;
1576
Sathya Perla3175d8c2013-07-23 15:25:03 +05301577 if (BEx_chip(adapter)) {
1578 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1579 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001580
Sathya Perla11ac75e2011-12-13 00:58:50 +00001581 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1582 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301583 } else {
1584 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1585 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001586 }
1587
Kalesh APabccf232014-07-17 16:20:24 +05301588 if (status) {
1589 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1590 mac, vf, status);
1591 return be_cmd_status(status);
1592 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001593
Kalesh APabccf232014-07-17 16:20:24 +05301594 ether_addr_copy(vf_cfg->mac_addr, mac);
1595
1596 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001597}
1598
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001599static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301600 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001601{
1602 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001603 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001604
Sathya Perla11ac75e2011-12-13 00:58:50 +00001605 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001606 return -EPERM;
1607
Sathya Perla11ac75e2011-12-13 00:58:50 +00001608 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001609 return -EINVAL;
1610
1611 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001612 vi->max_tx_rate = vf_cfg->tx_rate;
1613 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001614 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1615 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001616 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301617 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001618 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001619
1620 return 0;
1621}
1622
Vasundhara Volam435452a2015-03-20 06:28:23 -04001623static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1624{
1625 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1626 u16 vids[BE_NUM_VLANS_SUPPORTED];
1627 int vf_if_id = vf_cfg->if_handle;
1628 int status;
1629
1630 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001631 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001632 if (status)
1633 return status;
1634
1635 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1636 vids[0] = 0;
1637 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1638 if (!status)
1639 dev_info(&adapter->pdev->dev,
1640 "Cleared guest VLANs on VF%d", vf);
1641
1642 /* After TVT is enabled, disallow VFs to program VLAN filters */
1643 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1644 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1645 ~BE_PRIV_FILTMGMT, vf + 1);
1646 if (!status)
1647 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1648 }
1649 return 0;
1650}
1651
1652static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1653{
1654 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1655 struct device *dev = &adapter->pdev->dev;
1656 int status;
1657
1658 /* Reset Transparent VLAN Tagging. */
1659 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001660 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001661 if (status)
1662 return status;
1663
1664 /* Allow VFs to program VLAN filtering */
1665 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1666 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1667 BE_PRIV_FILTMGMT, vf + 1);
1668 if (!status) {
1669 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1670 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1671 }
1672 }
1673
1674 dev_info(dev,
1675 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1676 return 0;
1677}
1678
Sathya Perla748b5392014-05-09 13:29:13 +05301679static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001680{
1681 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001682 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001683 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001684
Sathya Perla11ac75e2011-12-13 00:58:50 +00001685 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001686 return -EPERM;
1687
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001688 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001689 return -EINVAL;
1690
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001691 if (vlan || qos) {
1692 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001693 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001694 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001695 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001696 }
1697
Kalesh APabccf232014-07-17 16:20:24 +05301698 if (status) {
1699 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001700 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1701 status);
Kalesh APabccf232014-07-17 16:20:24 +05301702 return be_cmd_status(status);
1703 }
1704
1705 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301706 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001707}
1708
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001709static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1710 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001711{
1712 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301713 struct device *dev = &adapter->pdev->dev;
1714 int percent_rate, status = 0;
1715 u16 link_speed = 0;
1716 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001717
Sathya Perla11ac75e2011-12-13 00:58:50 +00001718 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001719 return -EPERM;
1720
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001721 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001722 return -EINVAL;
1723
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001724 if (min_tx_rate)
1725 return -EINVAL;
1726
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301727 if (!max_tx_rate)
1728 goto config_qos;
1729
1730 status = be_cmd_link_status_query(adapter, &link_speed,
1731 &link_status, 0);
1732 if (status)
1733 goto err;
1734
1735 if (!link_status) {
1736 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301737 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301738 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001739 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001740
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301741 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1742 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1743 link_speed);
1744 status = -EINVAL;
1745 goto err;
1746 }
1747
1748 /* On Skyhawk the QOS setting must be done only as a % value */
1749 percent_rate = link_speed / 100;
1750 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1751 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1752 percent_rate);
1753 status = -EINVAL;
1754 goto err;
1755 }
1756
1757config_qos:
1758 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001759 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301760 goto err;
1761
1762 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1763 return 0;
1764
1765err:
1766 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1767 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301768 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001769}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301770
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301771static int be_set_vf_link_state(struct net_device *netdev, int vf,
1772 int link_state)
1773{
1774 struct be_adapter *adapter = netdev_priv(netdev);
1775 int status;
1776
1777 if (!sriov_enabled(adapter))
1778 return -EPERM;
1779
1780 if (vf >= adapter->num_vfs)
1781 return -EINVAL;
1782
1783 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301784 if (status) {
1785 dev_err(&adapter->pdev->dev,
1786 "Link state change on VF %d failed: %#x\n", vf, status);
1787 return be_cmd_status(status);
1788 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301789
Kalesh APabccf232014-07-17 16:20:24 +05301790 adapter->vf_cfg[vf].plink_tracking = link_state;
1791
1792 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301793}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001794
Kalesh APe7bcbd72015-05-06 05:30:32 -04001795static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1796{
1797 struct be_adapter *adapter = netdev_priv(netdev);
1798 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1799 u8 spoofchk;
1800 int status;
1801
1802 if (!sriov_enabled(adapter))
1803 return -EPERM;
1804
1805 if (vf >= adapter->num_vfs)
1806 return -EINVAL;
1807
1808 if (BEx_chip(adapter))
1809 return -EOPNOTSUPP;
1810
1811 if (enable == vf_cfg->spoofchk)
1812 return 0;
1813
1814 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1815
1816 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1817 0, spoofchk);
1818 if (status) {
1819 dev_err(&adapter->pdev->dev,
1820 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1821 return be_cmd_status(status);
1822 }
1823
1824 vf_cfg->spoofchk = enable;
1825 return 0;
1826}
1827
Sathya Perla2632baf2013-10-01 16:00:00 +05301828static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1829 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830{
Sathya Perla2632baf2013-10-01 16:00:00 +05301831 aic->rx_pkts_prev = rx_pkts;
1832 aic->tx_reqs_prev = tx_pkts;
1833 aic->jiffies = now;
1834}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001835
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001836static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301837{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001838 struct be_adapter *adapter = eqo->adapter;
1839 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301840 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301841 struct be_rx_obj *rxo;
1842 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001843 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301844 ulong now;
1845 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001846 int i;
1847
1848 aic = &adapter->aic_obj[eqo->idx];
1849 if (!aic->enable) {
1850 if (aic->jiffies)
1851 aic->jiffies = 0;
1852 eqd = aic->et_eqd;
1853 return eqd;
1854 }
1855
1856 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1857 do {
1858 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1859 rx_pkts += rxo->stats.rx_pkts;
1860 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1861 }
1862
1863 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1864 do {
1865 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1866 tx_pkts += txo->stats.tx_reqs;
1867 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1868 }
1869
1870 /* Skip, if wrapped around or first calculation */
1871 now = jiffies;
1872 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1873 rx_pkts < aic->rx_pkts_prev ||
1874 tx_pkts < aic->tx_reqs_prev) {
1875 be_aic_update(aic, rx_pkts, tx_pkts, now);
1876 return aic->prev_eqd;
1877 }
1878
1879 delta = jiffies_to_msecs(now - aic->jiffies);
1880 if (delta == 0)
1881 return aic->prev_eqd;
1882
1883 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1884 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1885 eqd = (pps / 15000) << 2;
1886
1887 if (eqd < 8)
1888 eqd = 0;
1889 eqd = min_t(u32, eqd, aic->max_eqd);
1890 eqd = max_t(u32, eqd, aic->min_eqd);
1891
1892 be_aic_update(aic, rx_pkts, tx_pkts, now);
1893
1894 return eqd;
1895}
1896
1897/* For Skyhawk-R only */
1898static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1899{
1900 struct be_adapter *adapter = eqo->adapter;
1901 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1902 ulong now = jiffies;
1903 int eqd;
1904 u32 mult_enc;
1905
1906 if (!aic->enable)
1907 return 0;
1908
1909 if (time_before_eq(now, aic->jiffies) ||
1910 jiffies_to_msecs(now - aic->jiffies) < 1)
1911 eqd = aic->prev_eqd;
1912 else
1913 eqd = be_get_new_eqd(eqo);
1914
1915 if (eqd > 100)
1916 mult_enc = R2I_DLY_ENC_1;
1917 else if (eqd > 60)
1918 mult_enc = R2I_DLY_ENC_2;
1919 else if (eqd > 20)
1920 mult_enc = R2I_DLY_ENC_3;
1921 else
1922 mult_enc = R2I_DLY_ENC_0;
1923
1924 aic->prev_eqd = eqd;
1925
1926 return mult_enc;
1927}
1928
1929void be_eqd_update(struct be_adapter *adapter, bool force_update)
1930{
1931 struct be_set_eqd set_eqd[MAX_EVT_QS];
1932 struct be_aic_obj *aic;
1933 struct be_eq_obj *eqo;
1934 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001935
Sathya Perla2632baf2013-10-01 16:00:00 +05301936 for_all_evt_queues(adapter, eqo, i) {
1937 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001938 eqd = be_get_new_eqd(eqo);
1939 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301940 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1941 set_eqd[num].eq_id = eqo->q.id;
1942 aic->prev_eqd = eqd;
1943 num++;
1944 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001945 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301946
1947 if (num)
1948 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001949}
1950
Sathya Perla3abcded2010-10-03 22:12:27 -07001951static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301952 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001953{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001954 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001955
Sathya Perlaab1594e2011-07-25 19:10:15 +00001956 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001957 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001958 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001959 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001960 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001961 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001962 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001963 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001964 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965}
1966
Sathya Perla2e588f82011-03-11 02:49:26 +00001967static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001968{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001969 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301970 * Also ignore ipcksm for ipv6 pkts
1971 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001972 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301973 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001974}
1975
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301976static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001980 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301981 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982
Sathya Perla3abcded2010-10-03 22:12:27 -07001983 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984 BUG_ON(!rx_page_info->page);
1985
Sathya Perlae50287b2014-03-04 12:14:38 +05301986 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001987 dma_unmap_page(&adapter->pdev->dev,
1988 dma_unmap_addr(rx_page_info, bus),
1989 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301990 rx_page_info->last_frag = false;
1991 } else {
1992 dma_sync_single_for_cpu(&adapter->pdev->dev,
1993 dma_unmap_addr(rx_page_info, bus),
1994 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001995 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301997 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998 atomic_dec(&rxq->used);
1999 return rx_page_info;
2000}
2001
2002/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003static void be_rx_compl_discard(struct be_rx_obj *rxo,
2004 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002007 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002009 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302010 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002011 put_page(page_info->page);
2012 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013 }
2014}
2015
2016/*
2017 * skb_fill_rx_data forms a complete skb for an ether frame
2018 * indicated by rxcp.
2019 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002020static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2021 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002024 u16 i, j;
2025 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 u8 *start;
2027
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302028 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029 start = page_address(page_info->page) + page_info->page_offset;
2030 prefetch(start);
2031
2032 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002033 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 skb->len = curr_frag_len;
2036 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002037 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 /* Complete packet has now been moved to data */
2039 put_page(page_info->page);
2040 skb->data_len = 0;
2041 skb->tail += curr_frag_len;
2042 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002043 hdr_len = ETH_HLEN;
2044 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002045 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002046 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 skb_shinfo(skb)->frags[0].page_offset =
2048 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302049 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2050 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002052 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053 skb->tail += hdr_len;
2054 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002055 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056
Sathya Perla2e588f82011-03-11 02:49:26 +00002057 if (rxcp->pkt_size <= rx_frag_size) {
2058 BUG_ON(rxcp->num_rcvd != 1);
2059 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 }
2061
2062 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002063 remaining = rxcp->pkt_size - curr_frag_len;
2064 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302065 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002066 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002068 /* Coalesce all frags from the same physical page in one slot */
2069 if (page_info->page_offset == 0) {
2070 /* Fresh page */
2071 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002072 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002073 skb_shinfo(skb)->frags[j].page_offset =
2074 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002075 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002076 skb_shinfo(skb)->nr_frags++;
2077 } else {
2078 put_page(page_info->page);
2079 }
2080
Eric Dumazet9e903e02011-10-18 21:00:24 +00002081 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082 skb->len += curr_frag_len;
2083 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002084 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002085 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002086 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002088 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089}
2090
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002091/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302092static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002093 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002096 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002097 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002098
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002099 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002100 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002101 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002103 return;
2104 }
2105
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002108 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002109 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002110 else
2111 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002113 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002114 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002116 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302117
Tom Herbertb6c0e892014-08-27 21:27:17 -07002118 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302119 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120
Jiri Pirko343e43c2011-08-25 02:50:51 +00002121 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002122 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002123
2124 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125}
2126
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002127/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002128static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2129 struct napi_struct *napi,
2130 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002134 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002135 u16 remaining, curr_frag_len;
2136 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002137
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002139 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002141 return;
2142 }
2143
Sathya Perla2e588f82011-03-11 02:49:26 +00002144 remaining = rxcp->pkt_size;
2145 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302146 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147
2148 curr_frag_len = min(remaining, rx_frag_size);
2149
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002150 /* Coalesce all frags from the same physical page in one slot */
2151 if (i == 0 || page_info->page_offset == 0) {
2152 /* First frag or Fresh page */
2153 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002154 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002155 skb_shinfo(skb)->frags[j].page_offset =
2156 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002157 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002158 } else {
2159 put_page(page_info->page);
2160 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002161 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002162 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002163 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002164 memset(page_info, 0, sizeof(*page_info));
2165 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002166 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002168 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002169 skb->len = rxcp->pkt_size;
2170 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002171 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002172 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002173 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002174 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302175
Tom Herbertb6c0e892014-08-27 21:27:17 -07002176 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302177 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002178
Jiri Pirko343e43c2011-08-25 02:50:51 +00002179 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002180 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183}
2184
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2186 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302188 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2189 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2190 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2191 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2192 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2193 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2194 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2195 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2196 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2197 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2198 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002199 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302200 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2201 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002202 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302203 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302204 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302205 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002206}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2209 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002210{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302211 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2212 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2213 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2214 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2215 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2216 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2217 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2218 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2219 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2220 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2221 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002222 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302223 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2224 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002225 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302226 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2227 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002228}
2229
2230static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2231{
2232 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2233 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2234 struct be_adapter *adapter = rxo->adapter;
2235
2236 /* For checking the valid bit it is Ok to use either definition as the
2237 * valid bit is at the same position in both v0 and v1 Rx compl */
2238 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239 return NULL;
2240
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002241 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002242 be_dws_le_to_cpu(compl, sizeof(*compl));
2243
2244 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002246 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002247 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002248
Somnath Koture38b1702013-05-29 22:55:56 +00002249 if (rxcp->ip_frag)
2250 rxcp->l4_csum = 0;
2251
Sathya Perla15d72182011-03-21 20:49:26 +00002252 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302253 /* In QNQ modes, if qnq bit is not set, then the packet was
2254 * tagged only with the transparent outer vlan-tag and must
2255 * not be treated as a vlan packet by host
2256 */
2257 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002258 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002259
Sathya Perla15d72182011-03-21 20:49:26 +00002260 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002261 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002262
Somnath Kotur939cf302011-08-18 21:51:49 -07002263 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302264 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002265 rxcp->vlanf = 0;
2266 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002267
2268 /* As the compl has been parsed, reset it; we wont touch it again */
2269 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270
Sathya Perla3abcded2010-10-03 22:12:27 -07002271 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 return rxcp;
2273}
2274
Eric Dumazet1829b082011-03-01 05:48:12 +00002275static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002280 gfp |= __GFP_COMP;
2281 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282}
2283
2284/*
2285 * Allocate a page, split it to fragments of size rx_frag_size and post as
2286 * receive buffers to BE
2287 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302288static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289{
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002291 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002292 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002294 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295 struct be_eth_rx_d *rxd;
2296 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302297 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302300 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002302 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002304 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305 break;
2306 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002307 page_dmaaddr = dma_map_page(dev, pagep, 0,
2308 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002309 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002310 if (dma_mapping_error(dev, page_dmaaddr)) {
2311 put_page(pagep);
2312 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302313 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002314 break;
2315 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302316 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317 } else {
2318 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302319 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302321 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323
2324 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302325 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2327 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328
2329 /* Any space left in the current big page for another frag? */
2330 if ((page_offset + rx_frag_size + rx_frag_size) >
2331 adapter->big_page_size) {
2332 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302333 page_info->last_frag = true;
2334 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2335 } else {
2336 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002338
2339 prev_page_info = page_info;
2340 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302343
2344 /* Mark the last frag of a page when we break out of the above loop
2345 * with no more slots available in the RXQ
2346 */
2347 if (pagep) {
2348 prev_page_info->last_frag = true;
2349 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2350 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351
2352 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302354 if (rxo->rx_post_starved)
2355 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302356 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002357 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302358 be_rxq_notify(adapter, rxq->id, notify);
2359 posted -= notify;
2360 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002361 } else if (atomic_read(&rxq->used) == 0) {
2362 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002363 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365}
2366
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302367static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302369 struct be_queue_info *tx_cq = &txo->cq;
2370 struct be_tx_compl_info *txcp = &txo->txcp;
2371 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302373 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374 return NULL;
2375
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302376 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002377 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302378 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302380 txcp->status = GET_TX_COMPL_BITS(status, compl);
2381 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302383 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 queue_tail_inc(tx_cq);
2385 return txcp;
2386}
2387
Sathya Perla3c8def92011-06-12 20:01:58 +00002388static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302389 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390{
Sathya Perla3c8def92011-06-12 20:01:58 +00002391 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002392 struct be_queue_info *txq = &txo->q;
2393 u16 frag_index, num_wrbs = 0;
2394 struct sk_buff *skb = NULL;
2395 bool unmap_skb_hdr = false;
2396 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002398 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002399 if (sent_skbs[txq->tail]) {
2400 /* Free skb from prev req */
2401 if (skb)
2402 dev_consume_skb_any(skb);
2403 skb = sent_skbs[txq->tail];
2404 sent_skbs[txq->tail] = NULL;
2405 queue_tail_inc(txq); /* skip hdr wrb */
2406 num_wrbs++;
2407 unmap_skb_hdr = true;
2408 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002409 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002410 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002411 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002412 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002413 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002414 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002415 num_wrbs++;
2416 } while (frag_index != last_index);
2417 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002419 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420}
2421
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422/* Return the number of events in the event queue */
2423static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002424{
2425 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002427
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 do {
2429 eqe = queue_tail_node(&eqo->q);
2430 if (eqe->evt == 0)
2431 break;
2432
2433 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002434 eqe->evt = 0;
2435 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436 queue_tail_inc(&eqo->q);
2437 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002438
2439 return num;
2440}
2441
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442/* Leaves the EQ is disarmed state */
2443static void be_eq_clean(struct be_eq_obj *eqo)
2444{
2445 int num = events_get(eqo);
2446
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002448}
2449
2450static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002451{
2452 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002453 struct be_queue_info *rxq = &rxo->q;
2454 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002455 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002456 struct be_adapter *adapter = rxo->adapter;
2457 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002458
Sathya Perlad23e9462012-12-17 19:38:51 +00002459 /* Consume pending rx completions.
2460 * Wait for the flush completion (identified by zero num_rcvd)
2461 * to arrive. Notify CQ even when there are no more CQ entries
2462 * for HW to flush partially coalesced CQ entries.
2463 * In Lancer, there is no need to wait for flush compl.
2464 */
2465 for (;;) {
2466 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302467 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002468 if (lancer_chip(adapter))
2469 break;
2470
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302471 if (flush_wait++ > 50 ||
2472 be_check_error(adapter,
2473 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002474 dev_warn(&adapter->pdev->dev,
2475 "did not receive flush compl\n");
2476 break;
2477 }
2478 be_cq_notify(adapter, rx_cq->id, true, 0);
2479 mdelay(1);
2480 } else {
2481 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002482 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002483 if (rxcp->num_rcvd == 0)
2484 break;
2485 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002486 }
2487
Sathya Perlad23e9462012-12-17 19:38:51 +00002488 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0);
2490
2491 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2496 }
2497 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302498 rxq->tail = 0;
2499 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002500}
2501
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002502static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002504 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2505 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302506 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002507 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302508 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002509 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302511 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002512 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002513 pending_txqs = adapter->num_tx_qs;
2514
2515 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302516 cmpl = 0;
2517 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002518 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302519 while ((txcp = be_tx_compl_get(txo))) {
2520 num_wrbs +=
2521 be_tx_compl_process(adapter, txo,
2522 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002523 cmpl++;
2524 }
2525 if (cmpl) {
2526 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2527 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302528 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002529 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302530 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002531 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002532 }
2533
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302534 if (pending_txqs == 0 || ++timeo > 10 ||
2535 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002536 break;
2537
2538 mdelay(1);
2539 } while (true);
2540
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002541 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002542 for_all_tx_queues(adapter, txo, i) {
2543 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002544
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002545 if (atomic_read(&txq->used)) {
2546 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2547 i, atomic_read(&txq->used));
2548 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002549 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002550 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2551 txq->len);
2552 /* Use the tx-compl process logic to handle requests
2553 * that were not sent to the HW.
2554 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002555 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2556 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002557 BUG_ON(atomic_read(&txq->used));
2558 txo->pend_wrb_cnt = 0;
2559 /* Since hw was never notified of these requests,
2560 * reset TXQ indices
2561 */
2562 txq->head = notified_idx;
2563 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002564 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002565 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566}
2567
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002568static void be_evt_queues_destroy(struct be_adapter *adapter)
2569{
2570 struct be_eq_obj *eqo;
2571 int i;
2572
2573 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002574 if (eqo->q.created) {
2575 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302577 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302578 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002579 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002580 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 be_queue_free(adapter, &eqo->q);
2582 }
2583}
2584
2585static int be_evt_queues_create(struct be_adapter *adapter)
2586{
2587 struct be_queue_info *eq;
2588 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302589 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002590 int i, rc;
2591
Sathya Perla92bf14a2013-08-27 16:57:32 +05302592 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2593 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002594
2595 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302596 int numa_node = dev_to_node(&adapter->pdev->dev);
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2598 return -ENOMEM;
Rusty Russellf36963c2015-05-09 03:14:13 +09302599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2602 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302603 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302604 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002605 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002606 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302607 aic->max_eqd = BE_MAX_EQD;
2608 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002609
2610 eq = &eqo->q;
2611 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302612 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 if (rc)
2614 return rc;
2615
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302616 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617 if (rc)
2618 return rc;
2619 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002620 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002621}
2622
Sathya Perla5fb379e2009-06-18 00:02:59 +00002623static void be_mcc_queues_destroy(struct be_adapter *adapter)
2624{
2625 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002626
Sathya Perla8788fdc2009-07-27 22:52:03 +00002627 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002628 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002629 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002630 be_queue_free(adapter, q);
2631
Sathya Perla8788fdc2009-07-27 22:52:03 +00002632 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002634 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002635 be_queue_free(adapter, q);
2636}
2637
2638/* Must be called only after TX qs are created as MCC shares TX EQ */
2639static int be_mcc_queues_create(struct be_adapter *adapter)
2640{
2641 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002642
Sathya Perla8788fdc2009-07-27 22:52:03 +00002643 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002644 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302645 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002646 goto err;
2647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 /* Use the default EQ for MCC completions */
2649 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002650 goto mcc_cq_free;
2651
Sathya Perla8788fdc2009-07-27 22:52:03 +00002652 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002653 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2654 goto mcc_cq_destroy;
2655
Sathya Perla8788fdc2009-07-27 22:52:03 +00002656 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002657 goto mcc_q_free;
2658
2659 return 0;
2660
2661mcc_q_free:
2662 be_queue_free(adapter, q);
2663mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002664 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002665mcc_cq_free:
2666 be_queue_free(adapter, cq);
2667err:
2668 return -1;
2669}
2670
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671static void be_tx_queues_destroy(struct be_adapter *adapter)
2672{
2673 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002674 struct be_tx_obj *txo;
2675 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002676
Sathya Perla3c8def92011-06-12 20:01:58 +00002677 for_all_tx_queues(adapter, txo, i) {
2678 q = &txo->q;
2679 if (q->created)
2680 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2681 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002682
Sathya Perla3c8def92011-06-12 20:01:58 +00002683 q = &txo->cq;
2684 if (q->created)
2685 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2686 be_queue_free(adapter, q);
2687 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688}
2689
Sathya Perla77071332013-08-27 16:57:34 +05302690static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691{
Sathya Perla73f394e2015-03-26 03:05:09 -04002692 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002693 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002694 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302695 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696
Sathya Perla92bf14a2013-08-27 16:57:32 +05302697 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002698
Sathya Perla3c8def92011-06-12 20:01:58 +00002699 for_all_tx_queues(adapter, txo, i) {
2700 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002701 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2702 sizeof(struct be_eth_tx_compl));
2703 if (status)
2704 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705
John Stultz827da442013-10-07 15:51:58 -07002706 u64_stats_init(&txo->stats.sync);
2707 u64_stats_init(&txo->stats.sync_compl);
2708
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002709 /* If num_evt_qs is less than num_tx_qs, then more than
2710 * one txq share an eq
2711 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002712 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2713 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002714 if (status)
2715 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002716
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2718 sizeof(struct be_eth_wrb));
2719 if (status)
2720 return status;
2721
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002722 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002723 if (status)
2724 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002725
2726 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2727 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 }
2729
Sathya Perlad3791422012-09-28 04:39:44 +00002730 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2731 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002732 return 0;
2733}
2734
2735static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002736{
2737 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002738 struct be_rx_obj *rxo;
2739 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740
Sathya Perla3abcded2010-10-03 22:12:27 -07002741 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002742 q = &rxo->cq;
2743 if (q->created)
2744 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2745 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002746 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747}
2748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002749static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002750{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002752 struct be_rx_obj *rxo;
2753 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754
Sathya Perla92bf14a2013-08-27 16:57:32 +05302755 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002756 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302757
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002758 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2759 if (adapter->num_rss_qs <= 1)
2760 adapter->num_rss_qs = 0;
2761
2762 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2763
2764 /* When the interface is not capable of RSS rings (and there is no
2765 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002766 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002767 if (adapter->num_rx_qs == 0)
2768 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302769
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002770 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002771 for_all_rx_queues(adapter, rxo, i) {
2772 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002773 cq = &rxo->cq;
2774 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302775 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002776 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002777 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778
John Stultz827da442013-10-07 15:51:58 -07002779 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002780 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2781 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002782 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002783 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002784 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002785
Sathya Perlad3791422012-09-28 04:39:44 +00002786 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002787 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002788 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002789}
2790
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002791static irqreturn_t be_intx(int irq, void *dev)
2792{
Sathya Perlae49cc342012-11-27 19:50:02 +00002793 struct be_eq_obj *eqo = dev;
2794 struct be_adapter *adapter = eqo->adapter;
2795 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002797 /* IRQ is not expected when NAPI is scheduled as the EQ
2798 * will not be armed.
2799 * But, this can happen on Lancer INTx where it takes
2800 * a while to de-assert INTx or in BE2 where occasionaly
2801 * an interrupt may be raised even when EQ is unarmed.
2802 * If NAPI is already scheduled, then counting & notifying
2803 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002804 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002805 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002806 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002807 __napi_schedule(&eqo->napi);
2808 if (num_evts)
2809 eqo->spurious_intr = 0;
2810 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002811 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002812
2813 /* Return IRQ_HANDLED only for the the first spurious intr
2814 * after a valid intr to stop the kernel from branding
2815 * this irq as a bad one!
2816 */
2817 if (num_evts || eqo->spurious_intr++ == 0)
2818 return IRQ_HANDLED;
2819 else
2820 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002821}
2822
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002823static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002824{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002826
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002827 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002828 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002829 return IRQ_HANDLED;
2830}
2831
Sathya Perla2e588f82011-03-11 02:49:26 +00002832static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002833{
Somnath Koture38b1702013-05-29 22:55:56 +00002834 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002835}
2836
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302838 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002839{
Sathya Perla3abcded2010-10-03 22:12:27 -07002840 struct be_adapter *adapter = rxo->adapter;
2841 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002842 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002843 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302844 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002845
2846 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002847 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002848 if (!rxcp)
2849 break;
2850
Sathya Perla12004ae2011-08-02 19:57:46 +00002851 /* Is it a flush compl that has no data */
2852 if (unlikely(rxcp->num_rcvd == 0))
2853 goto loop_continue;
2854
2855 /* Discard compl with partial DMA Lancer B0 */
2856 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002857 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002858 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002859 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002860
Sathya Perla12004ae2011-08-02 19:57:46 +00002861 /* On BE drop pkts that arrive due to imperfect filtering in
2862 * promiscuous mode on some skews
2863 */
2864 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302865 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002866 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002867 goto loop_continue;
2868 }
2869
Sathya Perla6384a4d2013-10-25 10:40:16 +05302870 /* Don't do gro when we're busy_polling */
2871 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002872 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002873 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302874 be_rx_compl_process(rxo, napi, rxcp);
2875
Sathya Perla12004ae2011-08-02 19:57:46 +00002876loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302877 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002878 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002879 }
2880
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002881 if (work_done) {
2882 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002883
Sathya Perla6384a4d2013-10-25 10:40:16 +05302884 /* When an rx-obj gets into post_starved state, just
2885 * let be_worker do the posting.
2886 */
2887 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2888 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302889 be_post_rx_frags(rxo, GFP_ATOMIC,
2890 max_t(u32, MAX_RX_POST,
2891 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002892 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002893
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002894 return work_done;
2895}
2896
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302897static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302898{
2899 switch (status) {
2900 case BE_TX_COMP_HDR_PARSE_ERR:
2901 tx_stats(txo)->tx_hdr_parse_err++;
2902 break;
2903 case BE_TX_COMP_NDMA_ERR:
2904 tx_stats(txo)->tx_dma_err++;
2905 break;
2906 case BE_TX_COMP_ACL_ERR:
2907 tx_stats(txo)->tx_spoof_check_err++;
2908 break;
2909 }
2910}
2911
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302912static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302913{
2914 switch (status) {
2915 case LANCER_TX_COMP_LSO_ERR:
2916 tx_stats(txo)->tx_tso_err++;
2917 break;
2918 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2919 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2920 tx_stats(txo)->tx_spoof_check_err++;
2921 break;
2922 case LANCER_TX_COMP_QINQ_ERR:
2923 tx_stats(txo)->tx_qinq_err++;
2924 break;
2925 case LANCER_TX_COMP_PARITY_ERR:
2926 tx_stats(txo)->tx_internal_parity_err++;
2927 break;
2928 case LANCER_TX_COMP_DMA_ERR:
2929 tx_stats(txo)->tx_dma_err++;
2930 break;
2931 }
2932}
2933
Sathya Perlac8f64612014-09-02 09:56:55 +05302934static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2935 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002936{
Sathya Perlac8f64612014-09-02 09:56:55 +05302937 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302938 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002939
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302940 while ((txcp = be_tx_compl_get(txo))) {
2941 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302942 work_done++;
2943
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302944 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302945 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302946 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302947 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302948 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302949 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 }
2951
2952 if (work_done) {
2953 be_cq_notify(adapter, txo->cq.id, true, work_done);
2954 atomic_sub(num_wrbs, &txo->q.used);
2955
2956 /* As Tx wrbs have been freed up, wake up netdev queue
2957 * if it was stopped due to lack of tx wrbs. */
2958 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302959 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002961 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002962
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002963 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2964 tx_stats(txo)->tx_compl += work_done;
2965 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2966 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002967}
Sathya Perla3c8def92011-06-12 20:01:58 +00002968
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002969#ifdef CONFIG_NET_RX_BUSY_POLL
2970static inline bool be_lock_napi(struct be_eq_obj *eqo)
2971{
2972 bool status = true;
2973
2974 spin_lock(&eqo->lock); /* BH is already disabled */
2975 if (eqo->state & BE_EQ_LOCKED) {
2976 WARN_ON(eqo->state & BE_EQ_NAPI);
2977 eqo->state |= BE_EQ_NAPI_YIELD;
2978 status = false;
2979 } else {
2980 eqo->state = BE_EQ_NAPI;
2981 }
2982 spin_unlock(&eqo->lock);
2983 return status;
2984}
2985
2986static inline void be_unlock_napi(struct be_eq_obj *eqo)
2987{
2988 spin_lock(&eqo->lock); /* BH is already disabled */
2989
2990 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2991 eqo->state = BE_EQ_IDLE;
2992
2993 spin_unlock(&eqo->lock);
2994}
2995
2996static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2997{
2998 bool status = true;
2999
3000 spin_lock_bh(&eqo->lock);
3001 if (eqo->state & BE_EQ_LOCKED) {
3002 eqo->state |= BE_EQ_POLL_YIELD;
3003 status = false;
3004 } else {
3005 eqo->state |= BE_EQ_POLL;
3006 }
3007 spin_unlock_bh(&eqo->lock);
3008 return status;
3009}
3010
3011static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3012{
3013 spin_lock_bh(&eqo->lock);
3014
3015 WARN_ON(eqo->state & (BE_EQ_NAPI));
3016 eqo->state = BE_EQ_IDLE;
3017
3018 spin_unlock_bh(&eqo->lock);
3019}
3020
3021static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3022{
3023 spin_lock_init(&eqo->lock);
3024 eqo->state = BE_EQ_IDLE;
3025}
3026
3027static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3028{
3029 local_bh_disable();
3030
3031 /* It's enough to just acquire napi lock on the eqo to stop
3032 * be_busy_poll() from processing any queueus.
3033 */
3034 while (!be_lock_napi(eqo))
3035 mdelay(1);
3036
3037 local_bh_enable();
3038}
3039
3040#else /* CONFIG_NET_RX_BUSY_POLL */
3041
3042static inline bool be_lock_napi(struct be_eq_obj *eqo)
3043{
3044 return true;
3045}
3046
3047static inline void be_unlock_napi(struct be_eq_obj *eqo)
3048{
3049}
3050
3051static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3052{
3053 return false;
3054}
3055
3056static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3057{
3058}
3059
3060static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3061{
3062}
3063
3064static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3065{
3066}
3067#endif /* CONFIG_NET_RX_BUSY_POLL */
3068
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303069int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003070{
3071 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3072 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003073 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303074 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303075 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003076 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003077
Sathya Perla0b545a62012-11-23 00:27:18 +00003078 num_evts = events_get(eqo);
3079
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303080 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3081 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003082
Sathya Perla6384a4d2013-10-25 10:40:16 +05303083 if (be_lock_napi(eqo)) {
3084 /* This loop will iterate twice for EQ0 in which
3085 * completions of the last RXQ (default one) are also processed
3086 * For other EQs the loop iterates only once
3087 */
3088 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3089 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3090 max_work = max(work, max_work);
3091 }
3092 be_unlock_napi(eqo);
3093 } else {
3094 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003095 }
3096
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003097 if (is_mcc_eqo(eqo))
3098 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003099
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003100 if (max_work < budget) {
3101 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003102
3103 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3104 * delay via a delay multiplier encoding value
3105 */
3106 if (skyhawk_chip(adapter))
3107 mult_enc = be_get_eq_delay_mult_enc(eqo);
3108
3109 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3110 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003111 } else {
3112 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003113 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003114 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003115 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116}
3117
Sathya Perla6384a4d2013-10-25 10:40:16 +05303118#ifdef CONFIG_NET_RX_BUSY_POLL
3119static int be_busy_poll(struct napi_struct *napi)
3120{
3121 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3122 struct be_adapter *adapter = eqo->adapter;
3123 struct be_rx_obj *rxo;
3124 int i, work = 0;
3125
3126 if (!be_lock_busy_poll(eqo))
3127 return LL_FLUSH_BUSY;
3128
3129 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3130 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3131 if (work)
3132 break;
3133 }
3134
3135 be_unlock_busy_poll(eqo);
3136 return work;
3137}
3138#endif
3139
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003140void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003141{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003142 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3143 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003144 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303145 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003146
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303147 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003148 return;
3149
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003150 if (lancer_chip(adapter)) {
3151 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3152 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303153 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003154 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303155 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003156 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303157 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303158 /* Do not log error messages if its a FW reset */
3159 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3160 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3161 dev_info(dev, "Firmware update in progress\n");
3162 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303163 dev_err(dev, "Error detected in the card\n");
3164 dev_err(dev, "ERR: sliport status 0x%x\n",
3165 sliport_status);
3166 dev_err(dev, "ERR: sliport error1 0x%x\n",
3167 sliport_err1);
3168 dev_err(dev, "ERR: sliport error2 0x%x\n",
3169 sliport_err2);
3170 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003171 }
3172 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003173 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3174 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3175 ue_lo_mask = ioread32(adapter->pcicfg +
3176 PCICFG_UE_STATUS_LOW_MASK);
3177 ue_hi_mask = ioread32(adapter->pcicfg +
3178 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003179
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003180 ue_lo = (ue_lo & ~ue_lo_mask);
3181 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003182
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303183 /* On certain platforms BE hardware can indicate spurious UEs.
3184 * Allow HW to stop working completely in case of a real UE.
3185 * Hence not setting the hw_error for UE detection.
3186 */
3187
3188 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303189 dev_err(dev,
3190 "Unrecoverable Error detected in the adapter");
3191 dev_err(dev, "Please reboot server to recover");
3192 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303193 be_set_error(adapter, BE_ERROR_UE);
3194
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303195 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3196 if (ue_lo & 1)
3197 dev_err(dev, "UE: %s bit set\n",
3198 ue_status_low_desc[i]);
3199 }
3200 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3201 if (ue_hi & 1)
3202 dev_err(dev, "UE: %s bit set\n",
3203 ue_status_hi_desc[i]);
3204 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303205 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003206 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003207}
3208
Sathya Perla8d56ff12009-11-22 22:02:26 +00003209static void be_msix_disable(struct be_adapter *adapter)
3210{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003211 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003212 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003213 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303214 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003215 }
3216}
3217
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003218static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003220 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003221 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003222
Sathya Perla92bf14a2013-08-27 16:57:32 +05303223 /* If RoCE is supported, program the max number of NIC vectors that
3224 * may be configured via set-channels, along with vectors needed for
3225 * RoCe. Else, just program the number we'll use initially.
3226 */
3227 if (be_roce_supported(adapter))
3228 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3229 2 * num_online_cpus());
3230 else
3231 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003232
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003233 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234 adapter->msix_entries[i].entry = i;
3235
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003236 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3237 MIN_MSIX_VECTORS, num_vec);
3238 if (num_vec < 0)
3239 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003240
Sathya Perla92bf14a2013-08-27 16:57:32 +05303241 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3242 adapter->num_msix_roce_vec = num_vec / 2;
3243 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3244 adapter->num_msix_roce_vec);
3245 }
3246
3247 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3248
3249 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3250 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003251 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003252
3253fail:
3254 dev_warn(dev, "MSIx enable failed\n");
3255
3256 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003257 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003258 return num_vec;
3259 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003260}
3261
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003262static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303263 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003264{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303265 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003266}
3267
3268static int be_msix_register(struct be_adapter *adapter)
3269{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003270 struct net_device *netdev = adapter->netdev;
3271 struct be_eq_obj *eqo;
3272 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003274 for_all_evt_queues(adapter, eqo, i) {
3275 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3276 vec = be_msix_vec_get(adapter, eqo);
3277 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003278 if (status)
3279 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003280
3281 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003282 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003285err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003286 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3287 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3288 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303289 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003290 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003291 return status;
3292}
3293
3294static int be_irq_register(struct be_adapter *adapter)
3295{
3296 struct net_device *netdev = adapter->netdev;
3297 int status;
3298
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003299 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003300 status = be_msix_register(adapter);
3301 if (status == 0)
3302 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003303 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003304 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003305 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306 }
3307
Sathya Perlae49cc342012-11-27 19:50:02 +00003308 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309 netdev->irq = adapter->pdev->irq;
3310 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003311 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003312 if (status) {
3313 dev_err(&adapter->pdev->dev,
3314 "INTx request IRQ failed - err %d\n", status);
3315 return status;
3316 }
3317done:
3318 adapter->isr_registered = true;
3319 return 0;
3320}
3321
3322static void be_irq_unregister(struct be_adapter *adapter)
3323{
3324 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003325 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003326 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327
3328 if (!adapter->isr_registered)
3329 return;
3330
3331 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003332 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003333 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334 goto done;
3335 }
3336
3337 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003338 for_all_evt_queues(adapter, eqo, i) {
3339 vec = be_msix_vec_get(adapter, eqo);
3340 irq_set_affinity_hint(vec, NULL);
3341 free_irq(vec, eqo);
3342 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003343
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344done:
3345 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003346}
3347
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003348static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003349{
3350 struct be_queue_info *q;
3351 struct be_rx_obj *rxo;
3352 int i;
3353
3354 for_all_rx_queues(adapter, rxo, i) {
3355 q = &rxo->q;
3356 if (q->created) {
3357 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003358 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003359 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003360 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003361 }
3362}
3363
Sathya Perla889cd4b2010-05-30 23:33:45 +00003364static int be_close(struct net_device *netdev)
3365{
3366 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003367 struct be_eq_obj *eqo;
3368 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003369
Kalesh APe1ad8e32014-04-14 16:12:41 +05303370 /* This protection is needed as be_close() may be called even when the
3371 * adapter is in cleared state (after eeh perm failure)
3372 */
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3374 return 0;
3375
Parav Pandit045508a2012-03-26 14:27:13 +00003376 be_roce_dev_close(adapter);
3377
Ivan Veceradff345c52013-11-27 08:59:32 +01003378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3379 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003380 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303381 be_disable_busy_poll(eqo);
3382 }
David S. Miller71237b62013-11-28 18:53:36 -05003383 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003384 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003385
3386 be_async_mcc_disable(adapter);
3387
3388 /* Wait for all pending tx completions to arrive so that
3389 * all tx skbs are freed.
3390 */
Sathya Perlafba87552013-05-08 02:05:50 +00003391 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303392 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003393
3394 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003395 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003396
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003397 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003398 if (msix_enabled(adapter))
3399 synchronize_irq(be_msix_vec_get(adapter, eqo));
3400 else
3401 synchronize_irq(netdev->irq);
3402 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003403 }
3404
Sathya Perla889cd4b2010-05-30 23:33:45 +00003405 be_irq_unregister(adapter);
3406
Sathya Perla482c9e72011-06-29 23:33:17 +00003407 return 0;
3408}
3409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003410static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003411{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003412 struct rss_info *rss = &adapter->rss_info;
3413 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003414 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003415 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003416
3417 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003418 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3419 sizeof(struct be_eth_rx_d));
3420 if (rc)
3421 return rc;
3422 }
3423
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003424 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3425 rxo = default_rxo(adapter);
3426 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3427 rx_frag_size, adapter->if_handle,
3428 false, &rxo->rss_id);
3429 if (rc)
3430 return rc;
3431 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003432
3433 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003434 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003435 rx_frag_size, adapter->if_handle,
3436 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003437 if (rc)
3438 return rc;
3439 }
3440
3441 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003442 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003443 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303444 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003445 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303446 rss->rsstable[j + i] = rxo->rss_id;
3447 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003448 }
3449 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303450 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3451 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003452
3453 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303454 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3455 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303456 } else {
3457 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303458 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303459 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003460
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003461 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303462 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003463 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303464 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303465 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303466 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003467 }
3468
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003469 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303470
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003471 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3472 * which is a queue empty condition
3473 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003474 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003475 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3476
Sathya Perla889cd4b2010-05-30 23:33:45 +00003477 return 0;
3478}
3479
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003480static int be_open(struct net_device *netdev)
3481{
3482 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003483 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003484 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003485 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003486 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003487 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003488
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003489 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003490 if (status)
3491 goto err;
3492
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003493 status = be_irq_register(adapter);
3494 if (status)
3495 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003496
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003497 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003498 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003499
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003500 for_all_tx_queues(adapter, txo, i)
3501 be_cq_notify(adapter, txo->cq.id, true, 0);
3502
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003503 be_async_mcc_enable(adapter);
3504
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003505 for_all_evt_queues(adapter, eqo, i) {
3506 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303507 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003508 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003509 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003510 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003511
Sathya Perla323ff712012-09-28 04:39:43 +00003512 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003513 if (!status)
3514 be_link_status_update(adapter, link_status);
3515
Sathya Perlafba87552013-05-08 02:05:50 +00003516 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003517 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303518
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303519#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303520 if (skyhawk_chip(adapter))
3521 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303522#endif
3523
Sathya Perla889cd4b2010-05-30 23:33:45 +00003524 return 0;
3525err:
3526 be_close(adapter->netdev);
3527 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003528}
3529
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003530static int be_setup_wol(struct be_adapter *adapter, bool enable)
3531{
Kalesh Purayil145155e2015-07-10 05:32:43 -04003532 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003533 struct be_dma_mem cmd;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003534 u8 mac[ETH_ALEN];
Kalesh Purayil145155e2015-07-10 05:32:43 -04003535 int status;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003536
Joe Perchesc7bf7162015-03-02 19:54:47 -08003537 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003538
3539 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Kalesh Purayil145155e2015-07-10 05:32:43 -04003540 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303541 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303542 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003543
3544 if (enable) {
3545 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303546 PCICFG_PM_CONTROL_OFFSET,
3547 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003548 if (status) {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003549 dev_err(dev, "Could not enable Wake-on-lan\n");
3550 goto err;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003551 }
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003552 } else {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003553 ether_addr_copy(mac, adapter->netdev->dev_addr);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003554 }
3555
Kalesh Purayil145155e2015-07-10 05:32:43 -04003556 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3557 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3558 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3559err:
3560 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003561 return status;
3562}
3563
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003564static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3565{
3566 u32 addr;
3567
3568 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3569
3570 mac[5] = (u8)(addr & 0xFF);
3571 mac[4] = (u8)((addr >> 8) & 0xFF);
3572 mac[3] = (u8)((addr >> 16) & 0xFF);
3573 /* Use the OUI from the current MAC address */
3574 memcpy(mac, adapter->netdev->dev_addr, 3);
3575}
3576
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003577/*
3578 * Generate a seed MAC address from the PF MAC Address using jhash.
3579 * MAC Address for VFs are assigned incrementally starting from the seed.
3580 * These addresses are programmed in the ASIC by the PF and the VF driver
3581 * queries for the MAC address during its probe.
3582 */
Sathya Perla4c876612013-02-03 20:30:11 +00003583static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003584{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003585 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003586 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003587 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003588 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003589
3590 be_vf_eth_addr_generate(adapter, mac);
3591
Sathya Perla11ac75e2011-12-13 00:58:50 +00003592 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303593 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003594 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003595 vf_cfg->if_handle,
3596 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303597 else
3598 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3599 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003600
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003601 if (status)
3602 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303603 "Mac address assignment failed for VF %d\n",
3604 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003605 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003606 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003607
3608 mac[5] += 1;
3609 }
3610 return status;
3611}
3612
Sathya Perla4c876612013-02-03 20:30:11 +00003613static int be_vfs_mac_query(struct be_adapter *adapter)
3614{
3615 int status, vf;
3616 u8 mac[ETH_ALEN];
3617 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003618
3619 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303620 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3621 mac, vf_cfg->if_handle,
3622 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003623 if (status)
3624 return status;
3625 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3626 }
3627 return 0;
3628}
3629
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003630static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003631{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003632 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003633 u32 vf;
3634
Sathya Perla257a3fe2013-06-14 15:54:51 +05303635 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003636 dev_warn(&adapter->pdev->dev,
3637 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003638 goto done;
3639 }
3640
Sathya Perlab4c1df92013-05-08 02:05:47 +00003641 pci_disable_sriov(adapter->pdev);
3642
Sathya Perla11ac75e2011-12-13 00:58:50 +00003643 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303644 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003645 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3646 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303647 else
3648 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3649 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003650
Sathya Perla11ac75e2011-12-13 00:58:50 +00003651 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3652 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003653done:
3654 kfree(adapter->vf_cfg);
3655 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303656 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003657}
3658
Sathya Perla77071332013-08-27 16:57:34 +05303659static void be_clear_queues(struct be_adapter *adapter)
3660{
3661 be_mcc_queues_destroy(adapter);
3662 be_rx_cqs_destroy(adapter);
3663 be_tx_queues_destroy(adapter);
3664 be_evt_queues_destroy(adapter);
3665}
3666
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303667static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003668{
Sathya Perla191eb752012-02-23 18:50:13 +00003669 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3670 cancel_delayed_work_sync(&adapter->work);
3671 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3672 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303673}
3674
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003675static void be_cancel_err_detection(struct be_adapter *adapter)
3676{
3677 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3678 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3679 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3680 }
3681}
3682
Somnath Koturb05004a2013-12-05 12:08:16 +05303683static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303684{
Somnath Koturb05004a2013-12-05 12:08:16 +05303685 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003686 be_cmd_pmac_del(adapter, adapter->if_handle,
3687 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303688 kfree(adapter->pmac_id);
3689 adapter->pmac_id = NULL;
3690 }
3691}
3692
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303693#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303694static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3695{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003696 struct net_device *netdev = adapter->netdev;
3697
Sathya Perlac9c47142014-03-27 10:46:19 +05303698 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3699 be_cmd_manage_iface(adapter, adapter->if_handle,
3700 OP_CONVERT_TUNNEL_TO_NORMAL);
3701
3702 if (adapter->vxlan_port)
3703 be_cmd_set_vxlan_port(adapter, 0);
3704
3705 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3706 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003707
3708 netdev->hw_enc_features = 0;
3709 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303710 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303711}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303712#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303713
Vasundhara Volamf2858732015-03-04 00:44:33 -05003714static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3715{
3716 struct be_resources res = adapter->pool_res;
3717 u16 num_vf_qs = 1;
3718
3719 /* Distribute the queue resources equally among the PF and it's VFs
3720 * Do not distribute queue resources in multi-channel configuration.
3721 */
3722 if (num_vfs && !be_is_mc(adapter)) {
3723 /* If number of VFs requested is 8 less than max supported,
3724 * assign 8 queue pairs to the PF and divide the remaining
3725 * resources evenly among the VFs
3726 */
3727 if (num_vfs < (be_max_vfs(adapter) - 8))
3728 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3729 else
3730 num_vf_qs = res.max_rss_qs / num_vfs;
3731
3732 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3733 * interfaces per port. Provide RSS on VFs, only if number
3734 * of VFs requested is less than MAX_RSS_IFACES limit.
3735 */
3736 if (num_vfs >= MAX_RSS_IFACES)
3737 num_vf_qs = 1;
3738 }
3739 return num_vf_qs;
3740}
3741
Somnath Koturb05004a2013-12-05 12:08:16 +05303742static int be_clear(struct be_adapter *adapter)
3743{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003744 struct pci_dev *pdev = adapter->pdev;
3745 u16 num_vf_qs;
3746
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303747 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003748
Sathya Perla11ac75e2011-12-13 00:58:50 +00003749 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003750 be_vf_clear(adapter);
3751
Vasundhara Volambec84e62014-06-30 13:01:32 +05303752 /* Re-configure FW to distribute resources evenly across max-supported
3753 * number of VFs, only when VFs are not already enabled.
3754 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003755 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3756 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003757 num_vf_qs = be_calculate_vf_qs(adapter,
3758 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303759 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003760 pci_sriov_get_totalvfs(pdev),
3761 num_vf_qs);
3762 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303763
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303764#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303765 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303766#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303767 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303768 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003769
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003770 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003771
Sathya Perla77071332013-08-27 16:57:34 +05303772 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003773
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003774 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303775 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003776 return 0;
3777}
3778
Kalesh AP0700d812015-01-20 03:51:43 -05003779static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3780 u32 cap_flags, u32 vf)
3781{
3782 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003783
3784 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3785 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003786 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003787
3788 en_flags &= cap_flags;
3789
Vasundhara Volam435452a2015-03-20 06:28:23 -04003790 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003791}
3792
Sathya Perla4c876612013-02-03 20:30:11 +00003793static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003794{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303795 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003796 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003797 u32 cap_flags, vf;
3798 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003799
Kalesh AP0700d812015-01-20 03:51:43 -05003800 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003801 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003802 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003803
Sathya Perla4c876612013-02-03 20:30:11 +00003804 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303805 if (!BE3_chip(adapter)) {
3806 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003807 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303808 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003809 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303810 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003811 /* Prevent VFs from enabling VLAN promiscuous
3812 * mode
3813 */
3814 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3815 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303816 }
Sathya Perla4c876612013-02-03 20:30:11 +00003817
Kalesh AP0700d812015-01-20 03:51:43 -05003818 status = be_if_create(adapter, &vf_cfg->if_handle,
3819 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003820 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003821 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003822 }
Kalesh AP0700d812015-01-20 03:51:43 -05003823
3824 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003825}
3826
Sathya Perla39f1d942012-05-08 19:41:24 +00003827static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003828{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003829 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003830 int vf;
3831
Sathya Perla39f1d942012-05-08 19:41:24 +00003832 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3833 GFP_KERNEL);
3834 if (!adapter->vf_cfg)
3835 return -ENOMEM;
3836
Sathya Perla11ac75e2011-12-13 00:58:50 +00003837 for_all_vfs(adapter, vf_cfg, vf) {
3838 vf_cfg->if_handle = -1;
3839 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003840 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003841 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003842}
3843
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003844static int be_vf_setup(struct be_adapter *adapter)
3845{
Sathya Perla4c876612013-02-03 20:30:11 +00003846 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303847 struct be_vf_cfg *vf_cfg;
3848 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003849 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003850
Sathya Perla257a3fe2013-06-14 15:54:51 +05303851 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003852
3853 status = be_vf_setup_init(adapter);
3854 if (status)
3855 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003856
Sathya Perla4c876612013-02-03 20:30:11 +00003857 if (old_vfs) {
3858 for_all_vfs(adapter, vf_cfg, vf) {
3859 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3860 if (status)
3861 goto err;
3862 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003863
Sathya Perla4c876612013-02-03 20:30:11 +00003864 status = be_vfs_mac_query(adapter);
3865 if (status)
3866 goto err;
3867 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303868 status = be_vfs_if_create(adapter);
3869 if (status)
3870 goto err;
3871
Sathya Perla39f1d942012-05-08 19:41:24 +00003872 status = be_vf_eth_addr_config(adapter);
3873 if (status)
3874 goto err;
3875 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003876
Sathya Perla11ac75e2011-12-13 00:58:50 +00003877 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303878 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003879 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3880 vf + 1);
3881 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303882 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003883 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303884 BE_PRIV_FILTMGMT,
3885 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003886 if (!status) {
3887 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303888 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3889 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003890 }
Sathya Perla04a06022013-07-23 15:25:00 +05303891 }
3892
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303893 /* Allow full available bandwidth */
3894 if (!old_vfs)
3895 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003896
Kalesh APe7bcbd72015-05-06 05:30:32 -04003897 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3898 vf_cfg->if_handle, NULL,
3899 &spoofchk);
3900 if (!status)
3901 vf_cfg->spoofchk = spoofchk;
3902
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303903 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303904 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303905 be_cmd_set_logical_link_config(adapter,
3906 IFLA_VF_LINK_STATE_AUTO,
3907 vf+1);
3908 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003909 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003910
3911 if (!old_vfs) {
3912 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3913 if (status) {
3914 dev_err(dev, "SRIOV enable failed\n");
3915 adapter->num_vfs = 0;
3916 goto err;
3917 }
3918 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303919
3920 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003921 return 0;
3922err:
Sathya Perla4c876612013-02-03 20:30:11 +00003923 dev_err(dev, "VF setup failed\n");
3924 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003925 return status;
3926}
3927
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303928/* Converting function_mode bits on BE3 to SH mc_type enums */
3929
3930static u8 be_convert_mc_type(u32 function_mode)
3931{
Suresh Reddy66064db2014-06-23 16:41:29 +05303932 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303933 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303934 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303935 return FLEX10;
3936 else if (function_mode & VNIC_MODE)
3937 return vNIC2;
3938 else if (function_mode & UMC_ENABLED)
3939 return UMC;
3940 else
3941 return MC_NONE;
3942}
3943
Sathya Perla92bf14a2013-08-27 16:57:32 +05303944/* On BE2/BE3 FW does not suggest the supported limits */
3945static void BEx_get_resources(struct be_adapter *adapter,
3946 struct be_resources *res)
3947{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303948 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303949
3950 if (be_physfn(adapter))
3951 res->max_uc_mac = BE_UC_PMAC_COUNT;
3952 else
3953 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3954
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303955 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3956
3957 if (be_is_mc(adapter)) {
3958 /* Assuming that there are 4 channels per port,
3959 * when multi-channel is enabled
3960 */
3961 if (be_is_qnq_mode(adapter))
3962 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3963 else
3964 /* In a non-qnq multichannel mode, the pvid
3965 * takes up one vlan entry
3966 */
3967 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3968 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303969 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303970 }
3971
Sathya Perla92bf14a2013-08-27 16:57:32 +05303972 res->max_mcast_mac = BE_MAX_MC;
3973
Vasundhara Volama5243da2014-03-11 18:53:07 +05303974 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3975 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3976 * *only* if it is RSS-capable.
3977 */
3978 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04003979 be_virtfn(adapter) ||
3980 (be_is_mc(adapter) &&
3981 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303982 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303983 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3984 struct be_resources super_nic_res = {0};
3985
3986 /* On a SuperNIC profile, the driver needs to use the
3987 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3988 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003989 be_cmd_get_profile_config(adapter, &super_nic_res,
3990 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303991 /* Some old versions of BE3 FW don't report max_tx_qs value */
3992 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3993 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303994 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303995 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303996
3997 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3998 !use_sriov && be_physfn(adapter))
3999 res->max_rss_qs = (adapter->be3_native) ?
4000 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4001 res->max_rx_qs = res->max_rss_qs + 1;
4002
Suresh Reddye3dc8672014-01-06 13:02:25 +05304003 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304004 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304005 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4006 else
4007 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304008
4009 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004010 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304011 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4012 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4013}
4014
Sathya Perla30128032011-11-10 19:17:57 +00004015static void be_setup_init(struct be_adapter *adapter)
4016{
4017 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004018 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004019 adapter->if_handle = -1;
4020 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004021 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004022 if (be_physfn(adapter))
4023 adapter->cmd_privileges = MAX_PRIVILEGES;
4024 else
4025 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004026}
4027
Vasundhara Volambec84e62014-06-30 13:01:32 +05304028static int be_get_sriov_config(struct be_adapter *adapter)
4029{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304030 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304031 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304032
Vasundhara Volamf2858732015-03-04 00:44:33 -05004033 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304034
Vasundhara Volamace40af2015-03-04 00:44:34 -05004035 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304036 if (BE3_chip(adapter) && !res.max_vfs) {
4037 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4038 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4039 }
4040
Sathya Perlad3d18312014-08-01 17:47:30 +05304041 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304042
Vasundhara Volamace40af2015-03-04 00:44:34 -05004043 /* If during previous unload of the driver, the VFs were not disabled,
4044 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4045 * Instead use the TotalVFs value stored in the pci-dev struct.
4046 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304047 old_vfs = pci_num_vf(adapter->pdev);
4048 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004049 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4050 old_vfs);
4051
4052 adapter->pool_res.max_vfs =
4053 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304054 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304055 }
4056
4057 return 0;
4058}
4059
Vasundhara Volamace40af2015-03-04 00:44:34 -05004060static void be_alloc_sriov_res(struct be_adapter *adapter)
4061{
4062 int old_vfs = pci_num_vf(adapter->pdev);
4063 u16 num_vf_qs;
4064 int status;
4065
4066 be_get_sriov_config(adapter);
4067
4068 if (!old_vfs)
4069 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4070
4071 /* When the HW is in SRIOV capable configuration, the PF-pool
4072 * resources are given to PF during driver load, if there are no
4073 * old VFs. This facility is not available in BE3 FW.
4074 * Also, this is done by FW in Lancer chip.
4075 */
4076 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4077 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4078 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4079 num_vf_qs);
4080 if (status)
4081 dev_err(&adapter->pdev->dev,
4082 "Failed to optimize SRIOV resources\n");
4083 }
4084}
4085
Sathya Perla92bf14a2013-08-27 16:57:32 +05304086static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004087{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304088 struct device *dev = &adapter->pdev->dev;
4089 struct be_resources res = {0};
4090 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004091
Sathya Perla92bf14a2013-08-27 16:57:32 +05304092 if (BEx_chip(adapter)) {
4093 BEx_get_resources(adapter, &res);
4094 adapter->res = res;
4095 }
4096
Sathya Perla92bf14a2013-08-27 16:57:32 +05304097 /* For Lancer, SH etc read per-function resource limits from FW.
4098 * GET_FUNC_CONFIG returns per function guaranteed limits.
4099 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4100 */
Sathya Perla4c876612013-02-03 20:30:11 +00004101 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304102 status = be_cmd_get_func_config(adapter, &res);
4103 if (status)
4104 return status;
4105
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004106 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4107 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4108 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4109 res.max_rss_qs -= 1;
4110
Sathya Perla92bf14a2013-08-27 16:57:32 +05304111 /* If RoCE may be enabled stash away half the EQs for RoCE */
4112 if (be_roce_supported(adapter))
4113 res.max_evt_qs /= 2;
4114 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004115 }
4116
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004117 /* If FW supports RSS default queue, then skip creating non-RSS
4118 * queue for non-IP traffic.
4119 */
4120 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4121 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4122
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304123 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4124 be_max_txqs(adapter), be_max_rxqs(adapter),
4125 be_max_rss(adapter), be_max_eqs(adapter),
4126 be_max_vfs(adapter));
4127 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4128 be_max_uc(adapter), be_max_mc(adapter),
4129 be_max_vlans(adapter));
4130
Vasundhara Volamace40af2015-03-04 00:44:34 -05004131 /* Sanitize cfg_num_qs based on HW and platform limits */
4132 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4133 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304134 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004135}
4136
Sathya Perla39f1d942012-05-08 19:41:24 +00004137static int be_get_config(struct be_adapter *adapter)
4138{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004139 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304140 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004141
4142 status = be_cmd_get_cntl_attributes(adapter);
4143 if (status)
4144 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004145
Kalesh APe97e3cd2014-07-17 16:20:26 +05304146 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004147 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304148 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004149
Sathya Perla6b085ba2015-02-23 04:20:09 -05004150 if (BEx_chip(adapter)) {
4151 level = be_cmd_get_fw_log_level(adapter);
4152 adapter->msg_enable =
4153 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4154 }
4155
4156 be_cmd_get_acpi_wol_cap(adapter);
4157
Vasundhara Volam21252372015-02-06 08:18:42 -05004158 be_cmd_query_port_name(adapter);
4159
4160 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304161 status = be_cmd_get_active_profile(adapter, &profile_id);
4162 if (!status)
4163 dev_info(&adapter->pdev->dev,
4164 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304165 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304166
Sathya Perla92bf14a2013-08-27 16:57:32 +05304167 status = be_get_resources(adapter);
4168 if (status)
4169 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004170
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304171 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4172 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304173 if (!adapter->pmac_id)
4174 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004175
Sathya Perla92bf14a2013-08-27 16:57:32 +05304176 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004177}
4178
Sathya Perla95046b92013-07-23 15:25:02 +05304179static int be_mac_setup(struct be_adapter *adapter)
4180{
4181 u8 mac[ETH_ALEN];
4182 int status;
4183
4184 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4185 status = be_cmd_get_perm_mac(adapter, mac);
4186 if (status)
4187 return status;
4188
4189 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4190 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4191 } else {
4192 /* Maybe the HW was reset; dev_addr must be re-programmed */
4193 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4194 }
4195
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06004196 /* For BE3-R VFs, the PF programs the initial MAC address */
4197 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4198 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4199 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05304200 return 0;
4201}
4202
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304203static void be_schedule_worker(struct be_adapter *adapter)
4204{
4205 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4206 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4207}
4208
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004209static void be_schedule_err_detection(struct be_adapter *adapter)
4210{
4211 schedule_delayed_work(&adapter->be_err_detection_work,
4212 msecs_to_jiffies(1000));
4213 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4214}
4215
Sathya Perla77071332013-08-27 16:57:34 +05304216static int be_setup_queues(struct be_adapter *adapter)
4217{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304218 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304219 int status;
4220
4221 status = be_evt_queues_create(adapter);
4222 if (status)
4223 goto err;
4224
4225 status = be_tx_qs_create(adapter);
4226 if (status)
4227 goto err;
4228
4229 status = be_rx_cqs_create(adapter);
4230 if (status)
4231 goto err;
4232
4233 status = be_mcc_queues_create(adapter);
4234 if (status)
4235 goto err;
4236
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304237 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4238 if (status)
4239 goto err;
4240
4241 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4242 if (status)
4243 goto err;
4244
Sathya Perla77071332013-08-27 16:57:34 +05304245 return 0;
4246err:
4247 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4248 return status;
4249}
4250
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304251int be_update_queues(struct be_adapter *adapter)
4252{
4253 struct net_device *netdev = adapter->netdev;
4254 int status;
4255
4256 if (netif_running(netdev))
4257 be_close(netdev);
4258
4259 be_cancel_worker(adapter);
4260
4261 /* If any vectors have been shared with RoCE we cannot re-program
4262 * the MSIx table.
4263 */
4264 if (!adapter->num_msix_roce_vec)
4265 be_msix_disable(adapter);
4266
4267 be_clear_queues(adapter);
4268
4269 if (!msix_enabled(adapter)) {
4270 status = be_msix_enable(adapter);
4271 if (status)
4272 return status;
4273 }
4274
4275 status = be_setup_queues(adapter);
4276 if (status)
4277 return status;
4278
4279 be_schedule_worker(adapter);
4280
4281 if (netif_running(netdev))
4282 status = be_open(netdev);
4283
4284 return status;
4285}
4286
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004287static inline int fw_major_num(const char *fw_ver)
4288{
4289 int fw_major = 0, i;
4290
4291 i = sscanf(fw_ver, "%d.", &fw_major);
4292 if (i != 1)
4293 return 0;
4294
4295 return fw_major;
4296}
4297
Sathya Perlaf962f842015-02-23 04:20:16 -05004298/* If any VFs are already enabled don't FLR the PF */
4299static bool be_reset_required(struct be_adapter *adapter)
4300{
4301 return pci_num_vf(adapter->pdev) ? false : true;
4302}
4303
4304/* Wait for the FW to be ready and perform the required initialization */
4305static int be_func_init(struct be_adapter *adapter)
4306{
4307 int status;
4308
4309 status = be_fw_wait_ready(adapter);
4310 if (status)
4311 return status;
4312
4313 if (be_reset_required(adapter)) {
4314 status = be_cmd_reset_function(adapter);
4315 if (status)
4316 return status;
4317
4318 /* Wait for interrupts to quiesce after an FLR */
4319 msleep(100);
4320
4321 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304322 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004323 }
4324
4325 /* Tell FW we're ready to fire cmds */
4326 status = be_cmd_fw_init(adapter);
4327 if (status)
4328 return status;
4329
4330 /* Allow interrupts for other ULPs running on NIC function */
4331 be_intr_set(adapter, true);
4332
4333 return 0;
4334}
4335
Sathya Perla5fb379e2009-06-18 00:02:59 +00004336static int be_setup(struct be_adapter *adapter)
4337{
Sathya Perla39f1d942012-05-08 19:41:24 +00004338 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004339 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004340
Sathya Perlaf962f842015-02-23 04:20:16 -05004341 status = be_func_init(adapter);
4342 if (status)
4343 return status;
4344
Sathya Perla30128032011-11-10 19:17:57 +00004345 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004346
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004347 if (!lancer_chip(adapter))
4348 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004349
Vasundhara Volamace40af2015-03-04 00:44:34 -05004350 if (!BE2_chip(adapter) && be_physfn(adapter))
4351 be_alloc_sriov_res(adapter);
4352
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004353 status = be_get_config(adapter);
4354 if (status)
4355 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004356
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004357 status = be_msix_enable(adapter);
4358 if (status)
4359 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004360
Kalesh AP0700d812015-01-20 03:51:43 -05004361 status = be_if_create(adapter, &adapter->if_handle,
4362 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004363 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004364 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004365
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304366 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4367 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304368 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304369 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004370 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004371 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004372
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004373 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004374
Sathya Perla95046b92013-07-23 15:25:02 +05304375 status = be_mac_setup(adapter);
4376 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004377 goto err;
4378
Kalesh APe97e3cd2014-07-17 16:20:26 +05304379 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304380 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004381
Somnath Koture9e2a902013-10-24 14:37:53 +05304382 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304383 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304384 adapter->fw_ver);
4385 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4386 }
4387
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004388 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004389 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004390
4391 be_set_rx_mode(adapter->netdev);
4392
Kalesh AP00d594c2015-01-20 03:51:44 -05004393 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4394 adapter->rx_fc);
4395 if (status)
4396 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4397 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004398
Kalesh AP00d594c2015-01-20 03:51:44 -05004399 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4400 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004401
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304402 if (be_physfn(adapter))
4403 be_cmd_set_logical_link_config(adapter,
4404 IFLA_VF_LINK_STATE_AUTO, 0);
4405
Vasundhara Volambec84e62014-06-30 13:01:32 +05304406 if (adapter->num_vfs)
4407 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004408
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004409 status = be_cmd_get_phy_info(adapter);
4410 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004411 adapter->phy.fc_autoneg = 1;
4412
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304413 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304414 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004415 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004416err:
4417 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004418 return status;
4419}
4420
Ivan Vecera66268732011-12-08 01:31:21 +00004421#ifdef CONFIG_NET_POLL_CONTROLLER
4422static void be_netpoll(struct net_device *netdev)
4423{
4424 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004425 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004426 int i;
4427
Sathya Perlae49cc342012-11-27 19:50:02 +00004428 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004429 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004430 napi_schedule(&eqo->napi);
4431 }
Ivan Vecera66268732011-12-08 01:31:21 +00004432}
4433#endif
4434
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304435static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004436
Sathya Perla306f1342011-08-02 19:57:45 +00004437static bool phy_flashing_required(struct be_adapter *adapter)
4438{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004439 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004440 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004441}
4442
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004443static bool is_comp_in_ufi(struct be_adapter *adapter,
4444 struct flash_section_info *fsec, int type)
4445{
4446 int i = 0, img_type = 0;
4447 struct flash_section_info_g2 *fsec_g2 = NULL;
4448
Sathya Perlaca34fe32012-11-06 17:48:56 +00004449 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004450 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4451
4452 for (i = 0; i < MAX_FLASH_COMP; i++) {
4453 if (fsec_g2)
4454 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4455 else
4456 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4457
4458 if (img_type == type)
4459 return true;
4460 }
4461 return false;
4462
4463}
4464
Jingoo Han4188e7d2013-08-05 18:02:02 +09004465static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304466 int header_size,
4467 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004468{
4469 struct flash_section_info *fsec = NULL;
4470 const u8 *p = fw->data;
4471
4472 p += header_size;
4473 while (p < (fw->data + fw->size)) {
4474 fsec = (struct flash_section_info *)p;
4475 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4476 return fsec;
4477 p += 32;
4478 }
4479 return NULL;
4480}
4481
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304482static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4483 u32 img_offset, u32 img_size, int hdr_size,
4484 u16 img_optype, bool *crc_match)
4485{
4486 u32 crc_offset;
4487 int status;
4488 u8 crc[4];
4489
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004490 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4491 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304492 if (status)
4493 return status;
4494
4495 crc_offset = hdr_size + img_offset + img_size - 4;
4496
4497 /* Skip flashing, if crc of flashed region matches */
4498 if (!memcmp(crc, p + crc_offset, 4))
4499 *crc_match = true;
4500 else
4501 *crc_match = false;
4502
4503 return status;
4504}
4505
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004506static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004507 struct be_dma_mem *flash_cmd, int optype, int img_size,
4508 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004509{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004510 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004511 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304512 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004513
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004514 while (total_bytes) {
4515 num_bytes = min_t(u32, 32*1024, total_bytes);
4516
4517 total_bytes -= num_bytes;
4518
4519 if (!total_bytes) {
4520 if (optype == OPTYPE_PHY_FW)
4521 flash_op = FLASHROM_OPER_PHY_FLASH;
4522 else
4523 flash_op = FLASHROM_OPER_FLASH;
4524 } else {
4525 if (optype == OPTYPE_PHY_FW)
4526 flash_op = FLASHROM_OPER_PHY_SAVE;
4527 else
4528 flash_op = FLASHROM_OPER_SAVE;
4529 }
4530
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004531 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004532 img += num_bytes;
4533 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004534 flash_op, img_offset +
4535 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304536 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304537 optype == OPTYPE_PHY_FW)
4538 break;
4539 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004540 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004541
4542 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004543 }
4544 return 0;
4545}
4546
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004547/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004548static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304549 const struct firmware *fw,
4550 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004551{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004552 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304553 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004554 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304555 int status, i, filehdr_size, num_comp;
4556 const struct flash_comp *pflashcomp;
4557 bool crc_match;
4558 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004559
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004560 struct flash_comp gen3_flash_types[] = {
4561 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4562 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4563 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4564 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4565 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4566 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4567 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4568 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4569 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4570 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4571 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4572 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4573 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4574 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4575 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4576 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4577 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4578 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4579 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4580 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004581 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004582
4583 struct flash_comp gen2_flash_types[] = {
4584 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4585 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4586 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4587 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4588 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4589 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4590 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4591 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4592 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4593 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4594 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4595 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4596 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4597 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4598 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4599 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004600 };
4601
Sathya Perlaca34fe32012-11-06 17:48:56 +00004602 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004603 pflashcomp = gen3_flash_types;
4604 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004605 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004606 } else {
4607 pflashcomp = gen2_flash_types;
4608 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004609 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004610 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004611 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004612
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004613 /* Get flash section info*/
4614 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4615 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304616 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004617 return -1;
4618 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004619 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004620 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004621 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004622
4623 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4624 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4625 continue;
4626
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004627 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4628 !phy_flashing_required(adapter))
4629 continue;
4630
4631 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304632 status = be_check_flash_crc(adapter, fw->data,
4633 pflashcomp[i].offset,
4634 pflashcomp[i].size,
4635 filehdr_size +
4636 img_hdrs_size,
4637 OPTYPE_REDBOOT, &crc_match);
4638 if (status) {
4639 dev_err(dev,
4640 "Could not get CRC for 0x%x region\n",
4641 pflashcomp[i].optype);
4642 continue;
4643 }
4644
4645 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004646 continue;
4647 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004648
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304649 p = fw->data + filehdr_size + pflashcomp[i].offset +
4650 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004651 if (p + pflashcomp[i].size > fw->data + fw->size)
4652 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004653
4654 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004655 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004656 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304657 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004658 pflashcomp[i].img_type);
4659 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004660 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004661 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004662 return 0;
4663}
4664
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304665static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4666{
4667 u32 img_type = le32_to_cpu(fsec_entry.type);
4668 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4669
4670 if (img_optype != 0xFFFF)
4671 return img_optype;
4672
4673 switch (img_type) {
4674 case IMAGE_FIRMWARE_iSCSI:
4675 img_optype = OPTYPE_ISCSI_ACTIVE;
4676 break;
4677 case IMAGE_BOOT_CODE:
4678 img_optype = OPTYPE_REDBOOT;
4679 break;
4680 case IMAGE_OPTION_ROM_ISCSI:
4681 img_optype = OPTYPE_BIOS;
4682 break;
4683 case IMAGE_OPTION_ROM_PXE:
4684 img_optype = OPTYPE_PXE_BIOS;
4685 break;
4686 case IMAGE_OPTION_ROM_FCoE:
4687 img_optype = OPTYPE_FCOE_BIOS;
4688 break;
4689 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4690 img_optype = OPTYPE_ISCSI_BACKUP;
4691 break;
4692 case IMAGE_NCSI:
4693 img_optype = OPTYPE_NCSI_FW;
4694 break;
4695 case IMAGE_FLASHISM_JUMPVECTOR:
4696 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4697 break;
4698 case IMAGE_FIRMWARE_PHY:
4699 img_optype = OPTYPE_SH_PHY_FW;
4700 break;
4701 case IMAGE_REDBOOT_DIR:
4702 img_optype = OPTYPE_REDBOOT_DIR;
4703 break;
4704 case IMAGE_REDBOOT_CONFIG:
4705 img_optype = OPTYPE_REDBOOT_CONFIG;
4706 break;
4707 case IMAGE_UFI_DIR:
4708 img_optype = OPTYPE_UFI_DIR;
4709 break;
4710 default:
4711 break;
4712 }
4713
4714 return img_optype;
4715}
4716
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004717static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304718 const struct firmware *fw,
4719 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004720{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004721 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004722 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304723 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004724 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304725 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004726 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304727 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304728 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004729
4730 filehdr_size = sizeof(struct flash_file_hdr_g3);
4731 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4732 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304733 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304734 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004735 }
4736
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004737retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004738 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4739 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4740 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304741 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4742 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4743 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004744
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304745 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004746 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004747
4748 if (flash_offset_support)
4749 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4750 else
4751 flash_optype = img_optype;
4752
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304753 /* Don't bother verifying CRC if an old FW image is being
4754 * flashed
4755 */
4756 if (old_fw_img)
4757 goto flash;
4758
4759 status = be_check_flash_crc(adapter, fw->data, img_offset,
4760 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004761 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304762 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304763 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4764 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004765 /* The current FW image on the card does not support
4766 * OFFSET based flashing. Retry using older mechanism
4767 * of OPTYPE based flashing
4768 */
4769 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4770 flash_offset_support = false;
4771 goto retry_flash;
4772 }
4773
4774 /* The current FW image on the card does not recognize
4775 * the new FLASH op_type. The FW download is partially
4776 * complete. Reboot the server now to enable FW image
4777 * to recognize the new FLASH op_type. To complete the
4778 * remaining process, download the same FW again after
4779 * the reboot.
4780 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304781 dev_err(dev, "Flash incomplete. Reset the server\n");
4782 dev_err(dev, "Download FW image again after reset\n");
4783 return -EAGAIN;
4784 } else if (status) {
4785 dev_err(dev, "Could not get CRC for 0x%x region\n",
4786 img_optype);
4787 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004788 }
4789
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304790 if (crc_match)
4791 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004792
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304793flash:
4794 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004795 if (p + img_size > fw->data + fw->size)
4796 return -1;
4797
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004798 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4799 img_offset);
4800
4801 /* The current FW image on the card does not support OFFSET
4802 * based flashing. Retry using older mechanism of OPTYPE based
4803 * flashing
4804 */
4805 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4806 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4807 flash_offset_support = false;
4808 goto retry_flash;
4809 }
4810
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304811 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4812 * UFI_DIR region
4813 */
Kalesh AP4c600052014-05-30 19:06:26 +05304814 if (old_fw_img &&
4815 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4816 (img_optype == OPTYPE_UFI_DIR &&
4817 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304818 continue;
4819 } else if (status) {
4820 dev_err(dev, "Flashing section type 0x%x failed\n",
4821 img_type);
4822 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004823 }
4824 }
4825 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004826}
4827
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004828static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304829 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004830{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004831#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4832#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304833 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004834 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004835 const u8 *data_ptr = NULL;
4836 u8 *dest_image_ptr = NULL;
4837 size_t image_size = 0;
4838 u32 chunk_size = 0;
4839 u32 data_written = 0;
4840 u32 offset = 0;
4841 int status = 0;
4842 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004843 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004844
4845 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304846 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304847 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004848 }
4849
4850 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4851 + LANCER_FW_DOWNLOAD_CHUNK;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304852 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4853 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304854 if (!flash_cmd.va)
4855 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004856
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004857 dest_image_ptr = flash_cmd.va +
4858 sizeof(struct lancer_cmd_req_write_object);
4859 image_size = fw->size;
4860 data_ptr = fw->data;
4861
4862 while (image_size) {
4863 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4864
4865 /* Copy the image chunk content. */
4866 memcpy(dest_image_ptr, data_ptr, chunk_size);
4867
4868 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004869 chunk_size, offset,
4870 LANCER_FW_DOWNLOAD_LOCATION,
4871 &data_written, &change_status,
4872 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004873 if (status)
4874 break;
4875
4876 offset += data_written;
4877 data_ptr += data_written;
4878 image_size -= data_written;
4879 }
4880
4881 if (!status) {
4882 /* Commit the FW written */
4883 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004884 0, offset,
4885 LANCER_FW_DOWNLOAD_LOCATION,
4886 &data_written, &change_status,
4887 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004888 }
4889
Kalesh APbb864e02014-09-02 09:56:51 +05304890 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004891 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304892 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304893 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004894 }
4895
Kalesh APbb864e02014-09-02 09:56:51 +05304896 dev_info(dev, "Firmware flashed successfully\n");
4897
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004898 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304899 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004900 status = lancer_physdev_ctrl(adapter,
4901 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004902 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304903 dev_err(dev, "Adapter busy, could not reset FW\n");
4904 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004905 }
4906 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304907 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004908 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304909
4910 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004911}
4912
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004913/* Check if the flash image file is compatible with the adapter that
4914 * is being flashed.
4915 */
4916static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4917 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004918{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004919 if (!fhdr) {
4920 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
Vasundhara Volam887a65c2015-07-10 05:32:46 -04004921 return false;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004922 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004923
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004924 /* First letter of the build version is used to identify
4925 * which chip this image file is meant for.
4926 */
4927 switch (fhdr->build[0]) {
4928 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004929 if (!skyhawk_chip(adapter))
4930 return false;
4931 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004932 case BLD_STR_UFI_TYPE_BE3:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004933 if (!BE3_chip(adapter))
4934 return false;
4935 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004936 case BLD_STR_UFI_TYPE_BE2:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004937 if (!BE2_chip(adapter))
4938 return false;
4939 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004940 default:
4941 return false;
4942 }
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004943
4944 return (fhdr->asic_type_rev >= adapter->asic_rev);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004945}
4946
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004947static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4948{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004949 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004950 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004951 struct image_hdr *img_hdr_ptr;
4952 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004953 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004954
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004955 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4956 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4957 dev_err(dev, "Flash image is not compatible with adapter\n");
4958 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004959 }
4960
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004961 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304962 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4963 GFP_KERNEL);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004964 if (!flash_cmd.va)
4965 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004966
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004967 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4968 for (i = 0; i < num_imgs; i++) {
4969 img_hdr_ptr = (struct image_hdr *)(fw->data +
4970 (sizeof(struct flash_file_hdr_g3) +
4971 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004972 if (!BE2_chip(adapter) &&
4973 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4974 continue;
4975
4976 if (skyhawk_chip(adapter))
4977 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4978 num_imgs);
4979 else
4980 status = be_flash_BEx(adapter, fw, &flash_cmd,
4981 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004982 }
4983
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004984 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4985 if (!status)
4986 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004987
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004988 return status;
4989}
4990
4991int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4992{
4993 const struct firmware *fw;
4994 int status;
4995
4996 if (!netif_running(adapter->netdev)) {
4997 dev_err(&adapter->pdev->dev,
4998 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304999 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005000 }
5001
5002 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5003 if (status)
5004 goto fw_exit;
5005
5006 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5007
5008 if (lancer_chip(adapter))
5009 status = lancer_fw_download(adapter, fw);
5010 else
5011 status = be_fw_download(adapter, fw);
5012
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005013 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05305014 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005015
Ajit Khaparde84517482009-09-04 03:12:16 +00005016fw_exit:
5017 release_firmware(fw);
5018 return status;
5019}
5020
Roopa Prabhuadd511b2015-01-29 22:40:12 -08005021static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5022 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005023{
5024 struct be_adapter *adapter = netdev_priv(dev);
5025 struct nlattr *attr, *br_spec;
5026 int rem;
5027 int status = 0;
5028 u16 mode = 0;
5029
5030 if (!sriov_enabled(adapter))
5031 return -EOPNOTSUPP;
5032
5033 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01005034 if (!br_spec)
5035 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005036
5037 nla_for_each_nested(attr, br_spec, rem) {
5038 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5039 continue;
5040
Thomas Grafb7c1a312014-11-26 13:42:17 +01005041 if (nla_len(attr) < sizeof(mode))
5042 return -EINVAL;
5043
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005044 mode = nla_get_u16(attr);
5045 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5046 return -EINVAL;
5047
5048 status = be_cmd_set_hsw_config(adapter, 0, 0,
5049 adapter->if_handle,
5050 mode == BRIDGE_MODE_VEPA ?
5051 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04005052 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005053 if (status)
5054 goto err;
5055
5056 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5057 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5058
5059 return status;
5060 }
5061err:
5062 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5063 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5064
5065 return status;
5066}
5067
5068static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02005069 struct net_device *dev, u32 filter_mask,
5070 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005071{
5072 struct be_adapter *adapter = netdev_priv(dev);
5073 int status = 0;
5074 u8 hsw_mode;
5075
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005076 /* BE and Lancer chips support VEB mode only */
5077 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5078 hsw_mode = PORT_FWD_TYPE_VEB;
5079 } else {
5080 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005081 adapter->if_handle, &hsw_mode,
5082 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005083 if (status)
5084 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04005085
5086 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5087 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005088 }
5089
5090 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5091 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005092 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07005093 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005094}
5095
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305096#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005097/* VxLAN offload Notes:
5098 *
5099 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5100 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5101 * is expected to work across all types of IP tunnels once exported. Skyhawk
5102 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305103 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5104 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5105 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005106 *
5107 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5108 * adds more than one port, disable offloads and don't re-enable them again
5109 * until after all the tunnels are removed.
5110 */
Sathya Perlac9c47142014-03-27 10:46:19 +05305111static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5112 __be16 port)
5113{
5114 struct be_adapter *adapter = netdev_priv(netdev);
5115 struct device *dev = &adapter->pdev->dev;
5116 int status;
5117
5118 if (lancer_chip(adapter) || BEx_chip(adapter))
5119 return;
5120
5121 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305122 dev_info(dev,
5123 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005124 dev_info(dev, "Disabling VxLAN offloads\n");
5125 adapter->vxlan_port_count++;
5126 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305127 }
5128
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005129 if (adapter->vxlan_port_count++ >= 1)
5130 return;
5131
Sathya Perlac9c47142014-03-27 10:46:19 +05305132 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5133 OP_CONVERT_NORMAL_TO_TUNNEL);
5134 if (status) {
5135 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5136 goto err;
5137 }
5138
5139 status = be_cmd_set_vxlan_port(adapter, port);
5140 if (status) {
5141 dev_warn(dev, "Failed to add VxLAN port\n");
5142 goto err;
5143 }
5144 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5145 adapter->vxlan_port = port;
5146
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005147 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5148 NETIF_F_TSO | NETIF_F_TSO6 |
5149 NETIF_F_GSO_UDP_TUNNEL;
5150 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305151 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005152
Sathya Perlac9c47142014-03-27 10:46:19 +05305153 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5154 be16_to_cpu(port));
5155 return;
5156err:
5157 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305158}
5159
5160static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5161 __be16 port)
5162{
5163 struct be_adapter *adapter = netdev_priv(netdev);
5164
5165 if (lancer_chip(adapter) || BEx_chip(adapter))
5166 return;
5167
5168 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005169 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305170
5171 be_disable_vxlan_offloads(adapter);
5172
5173 dev_info(&adapter->pdev->dev,
5174 "Disabled VxLAN offloads for UDP port %d\n",
5175 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005176done:
5177 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305178}
Joe Stringer725d5482014-11-13 16:38:13 -08005179
Jesse Gross5f352272014-12-23 22:37:26 -08005180static netdev_features_t be_features_check(struct sk_buff *skb,
5181 struct net_device *dev,
5182 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005183{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305184 struct be_adapter *adapter = netdev_priv(dev);
5185 u8 l4_hdr = 0;
5186
5187 /* The code below restricts offload features for some tunneled packets.
5188 * Offload features for normal (non tunnel) packets are unchanged.
5189 */
5190 if (!skb->encapsulation ||
5191 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5192 return features;
5193
5194 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5195 * should disable tunnel offload features if it's not a VxLAN packet,
5196 * as tunnel offloads have been enabled only for VxLAN. This is done to
5197 * allow other tunneled traffic like GRE work fine while VxLAN
5198 * offloads are configured in Skyhawk-R.
5199 */
5200 switch (vlan_get_protocol(skb)) {
5201 case htons(ETH_P_IP):
5202 l4_hdr = ip_hdr(skb)->protocol;
5203 break;
5204 case htons(ETH_P_IPV6):
5205 l4_hdr = ipv6_hdr(skb)->nexthdr;
5206 break;
5207 default:
5208 return features;
5209 }
5210
5211 if (l4_hdr != IPPROTO_UDP ||
5212 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5213 skb->inner_protocol != htons(ETH_P_TEB) ||
5214 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5215 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5216 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5217
5218 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005219}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305220#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305221
stephen hemmingere5686ad2012-01-05 19:10:25 +00005222static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005223 .ndo_open = be_open,
5224 .ndo_stop = be_close,
5225 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005226 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005227 .ndo_set_mac_address = be_mac_addr_set,
5228 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005229 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005230 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005231 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5232 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005233 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005234 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005235 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005236 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305237 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005238 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005239#ifdef CONFIG_NET_POLL_CONTROLLER
5240 .ndo_poll_controller = be_netpoll,
5241#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005242 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5243 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305244#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305245 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305246#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305247#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305248 .ndo_add_vxlan_port = be_add_vxlan_port,
5249 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005250 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305251#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005252};
5253
5254static void be_netdev_init(struct net_device *netdev)
5255{
5256 struct be_adapter *adapter = netdev_priv(netdev);
5257
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005258 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005259 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005260 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005261 if (be_multi_rxq(adapter))
5262 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005263
5264 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005265 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005266
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005267 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005268 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005269
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005270 netdev->priv_flags |= IFF_UNICAST_FLT;
5271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005272 netdev->flags |= IFF_MULTICAST;
5273
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005274 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005275
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005276 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005277
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005278 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005279}
5280
Kalesh AP87ac1a52015-02-23 04:20:15 -05005281static void be_cleanup(struct be_adapter *adapter)
5282{
5283 struct net_device *netdev = adapter->netdev;
5284
5285 rtnl_lock();
5286 netif_device_detach(netdev);
5287 if (netif_running(netdev))
5288 be_close(netdev);
5289 rtnl_unlock();
5290
5291 be_clear(adapter);
5292}
5293
Kalesh AP484d76f2015-02-23 04:20:14 -05005294static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005295{
Kalesh APd0e1b312015-02-23 04:20:12 -05005296 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005297 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005298
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005299 status = be_setup(adapter);
5300 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005301 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005302
Kalesh APd0e1b312015-02-23 04:20:12 -05005303 if (netif_running(netdev)) {
5304 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005305 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005306 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005307 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005308
Kalesh APd0e1b312015-02-23 04:20:12 -05005309 netif_device_attach(netdev);
5310
Kalesh AP484d76f2015-02-23 04:20:14 -05005311 return 0;
5312}
5313
5314static int be_err_recover(struct be_adapter *adapter)
5315{
5316 struct device *dev = &adapter->pdev->dev;
5317 int status;
5318
5319 status = be_resume(adapter);
5320 if (status)
5321 goto err;
5322
Sathya Perla9fa465c2015-02-23 04:20:13 -05005323 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005324 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005325err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005326 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305327 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005328 else
5329 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005330
5331 return status;
5332}
5333
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005334static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005335{
5336 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005337 container_of(work, struct be_adapter,
5338 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005339 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005340
5341 be_detect_error(adapter);
5342
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305343 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005344 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005345
5346 /* As of now error recovery support is in Lancer only */
5347 if (lancer_chip(adapter))
5348 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005349 }
5350
Sathya Perla9fa465c2015-02-23 04:20:13 -05005351 /* Always attempt recovery on VFs */
5352 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005353 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005354}
5355
Vasundhara Volam21252372015-02-06 08:18:42 -05005356static void be_log_sfp_info(struct be_adapter *adapter)
5357{
5358 int status;
5359
5360 status = be_cmd_query_sfp_info(adapter);
5361 if (!status) {
5362 dev_err(&adapter->pdev->dev,
5363 "Unqualified SFP+ detected on %c from %s part no: %s",
5364 adapter->port_name, adapter->phy.vendor_name,
5365 adapter->phy.vendor_pn);
5366 }
5367 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5368}
5369
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005370static void be_worker(struct work_struct *work)
5371{
5372 struct be_adapter *adapter =
5373 container_of(work, struct be_adapter, work.work);
5374 struct be_rx_obj *rxo;
5375 int i;
5376
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005377 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005378 * mcc completions
5379 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005380 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005381 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005382 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005383 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005384 goto reschedule;
5385 }
5386
5387 if (!adapter->stats_cmd_sent) {
5388 if (lancer_chip(adapter))
5389 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305390 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005391 else
5392 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5393 }
5394
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305395 if (be_physfn(adapter) &&
5396 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005397 be_cmd_get_die_temperature(adapter);
5398
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005399 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305400 /* Replenish RX-queues starved due to memory
5401 * allocation failures.
5402 */
5403 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305404 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005405 }
5406
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005407 /* EQ-delay update for Skyhawk is done while notifying EQ */
5408 if (!skyhawk_chip(adapter))
5409 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005410
Vasundhara Volam21252372015-02-06 08:18:42 -05005411 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5412 be_log_sfp_info(adapter);
5413
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005414reschedule:
5415 adapter->work_counter++;
5416 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5417}
5418
Sathya Perla78fad34e2015-02-23 04:20:08 -05005419static void be_unmap_pci_bars(struct be_adapter *adapter)
5420{
5421 if (adapter->csr)
5422 pci_iounmap(adapter->pdev, adapter->csr);
5423 if (adapter->db)
5424 pci_iounmap(adapter->pdev, adapter->db);
5425}
5426
5427static int db_bar(struct be_adapter *adapter)
5428{
Kalesh AP18c57c72015-05-06 05:30:38 -04005429 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005430 return 0;
5431 else
5432 return 4;
5433}
5434
5435static int be_roce_map_pci_bars(struct be_adapter *adapter)
5436{
5437 if (skyhawk_chip(adapter)) {
5438 adapter->roce_db.size = 4096;
5439 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5440 db_bar(adapter));
5441 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5442 db_bar(adapter));
5443 }
5444 return 0;
5445}
5446
5447static int be_map_pci_bars(struct be_adapter *adapter)
5448{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005449 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005450 u8 __iomem *addr;
5451 u32 sli_intf;
5452
5453 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5454 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5455 SLI_INTF_FAMILY_SHIFT;
5456 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5457
5458 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005459 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005460 if (!adapter->csr)
5461 return -ENOMEM;
5462 }
5463
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005464 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005465 if (!addr)
5466 goto pci_map_err;
5467 adapter->db = addr;
5468
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005469 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5470 if (be_physfn(adapter)) {
5471 /* PCICFG is the 2nd BAR in BE2 */
5472 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5473 if (!addr)
5474 goto pci_map_err;
5475 adapter->pcicfg = addr;
5476 } else {
5477 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5478 }
5479 }
5480
Sathya Perla78fad34e2015-02-23 04:20:08 -05005481 be_roce_map_pci_bars(adapter);
5482 return 0;
5483
5484pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005485 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005486 be_unmap_pci_bars(adapter);
5487 return -ENOMEM;
5488}
5489
5490static void be_drv_cleanup(struct be_adapter *adapter)
5491{
5492 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5493 struct device *dev = &adapter->pdev->dev;
5494
5495 if (mem->va)
5496 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5497
5498 mem = &adapter->rx_filter;
5499 if (mem->va)
5500 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5501
5502 mem = &adapter->stats_cmd;
5503 if (mem->va)
5504 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5505}
5506
5507/* Allocate and initialize various fields in be_adapter struct */
5508static int be_drv_init(struct be_adapter *adapter)
5509{
5510 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5511 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5512 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5513 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5514 struct device *dev = &adapter->pdev->dev;
5515 int status = 0;
5516
5517 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305518 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5519 &mbox_mem_alloc->dma,
5520 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005521 if (!mbox_mem_alloc->va)
5522 return -ENOMEM;
5523
5524 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5525 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5526 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005527
5528 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5529 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5530 &rx_filter->dma, GFP_KERNEL);
5531 if (!rx_filter->va) {
5532 status = -ENOMEM;
5533 goto free_mbox;
5534 }
5535
5536 if (lancer_chip(adapter))
5537 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5538 else if (BE2_chip(adapter))
5539 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5540 else if (BE3_chip(adapter))
5541 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5542 else
5543 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5544 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5545 &stats_cmd->dma, GFP_KERNEL);
5546 if (!stats_cmd->va) {
5547 status = -ENOMEM;
5548 goto free_rx_filter;
5549 }
5550
5551 mutex_init(&adapter->mbox_lock);
5552 spin_lock_init(&adapter->mcc_lock);
5553 spin_lock_init(&adapter->mcc_cq_lock);
5554 init_completion(&adapter->et_cmd_compl);
5555
5556 pci_save_state(adapter->pdev);
5557
5558 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005559 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5560 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005561
5562 adapter->rx_fc = true;
5563 adapter->tx_fc = true;
5564
5565 /* Must be a power of 2 or else MODULO will BUG_ON */
5566 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005567
5568 return 0;
5569
5570free_rx_filter:
5571 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5572free_mbox:
5573 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5574 mbox_mem_alloc->dma);
5575 return status;
5576}
5577
5578static void be_remove(struct pci_dev *pdev)
5579{
5580 struct be_adapter *adapter = pci_get_drvdata(pdev);
5581
5582 if (!adapter)
5583 return;
5584
5585 be_roce_dev_remove(adapter);
5586 be_intr_set(adapter, false);
5587
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005588 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005589
5590 unregister_netdev(adapter->netdev);
5591
5592 be_clear(adapter);
5593
5594 /* tell fw we're done with firing cmds */
5595 be_cmd_fw_clean(adapter);
5596
5597 be_unmap_pci_bars(adapter);
5598 be_drv_cleanup(adapter);
5599
5600 pci_disable_pcie_error_reporting(pdev);
5601
5602 pci_release_regions(pdev);
5603 pci_disable_device(pdev);
5604
5605 free_netdev(adapter->netdev);
5606}
5607
Arnd Bergmann9a032592015-05-18 23:06:45 +02005608static ssize_t be_hwmon_show_temp(struct device *dev,
5609 struct device_attribute *dev_attr,
5610 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305611{
5612 struct be_adapter *adapter = dev_get_drvdata(dev);
5613
5614 /* Unit: millidegree Celsius */
5615 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5616 return -EIO;
5617 else
5618 return sprintf(buf, "%u\n",
5619 adapter->hwmon_info.be_on_die_temp * 1000);
5620}
5621
5622static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5623 be_hwmon_show_temp, NULL, 1);
5624
5625static struct attribute *be_hwmon_attrs[] = {
5626 &sensor_dev_attr_temp1_input.dev_attr.attr,
5627 NULL
5628};
5629
5630ATTRIBUTE_GROUPS(be_hwmon);
5631
Sathya Perlad3791422012-09-28 04:39:44 +00005632static char *mc_name(struct be_adapter *adapter)
5633{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305634 char *str = ""; /* default */
5635
5636 switch (adapter->mc_type) {
5637 case UMC:
5638 str = "UMC";
5639 break;
5640 case FLEX10:
5641 str = "FLEX10";
5642 break;
5643 case vNIC1:
5644 str = "vNIC-1";
5645 break;
5646 case nPAR:
5647 str = "nPAR";
5648 break;
5649 case UFP:
5650 str = "UFP";
5651 break;
5652 case vNIC2:
5653 str = "vNIC-2";
5654 break;
5655 default:
5656 str = "";
5657 }
5658
5659 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005660}
5661
5662static inline char *func_name(struct be_adapter *adapter)
5663{
5664 return be_physfn(adapter) ? "PF" : "VF";
5665}
5666
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005667static inline char *nic_name(struct pci_dev *pdev)
5668{
5669 switch (pdev->device) {
5670 case OC_DEVICE_ID1:
5671 return OC_NAME;
5672 case OC_DEVICE_ID2:
5673 return OC_NAME_BE;
5674 case OC_DEVICE_ID3:
5675 case OC_DEVICE_ID4:
5676 return OC_NAME_LANCER;
5677 case BE_DEVICE_ID2:
5678 return BE3_NAME;
5679 case OC_DEVICE_ID5:
5680 case OC_DEVICE_ID6:
5681 return OC_NAME_SH;
5682 default:
5683 return BE_NAME;
5684 }
5685}
5686
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005687static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005688{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005689 struct be_adapter *adapter;
5690 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005691 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005692
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305693 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5694
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005695 status = pci_enable_device(pdev);
5696 if (status)
5697 goto do_none;
5698
5699 status = pci_request_regions(pdev, DRV_NAME);
5700 if (status)
5701 goto disable_dev;
5702 pci_set_master(pdev);
5703
Sathya Perla7f640062012-06-05 19:37:20 +00005704 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305705 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005706 status = -ENOMEM;
5707 goto rel_reg;
5708 }
5709 adapter = netdev_priv(netdev);
5710 adapter->pdev = pdev;
5711 pci_set_drvdata(pdev, adapter);
5712 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005713 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005714
Russell King4c15c242013-06-26 23:49:11 +01005715 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005716 if (!status) {
5717 netdev->features |= NETIF_F_HIGHDMA;
5718 } else {
Russell King4c15c242013-06-26 23:49:11 +01005719 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005720 if (status) {
5721 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5722 goto free_netdev;
5723 }
5724 }
5725
Kalesh AP2f951a92014-09-12 17:39:21 +05305726 status = pci_enable_pcie_error_reporting(pdev);
5727 if (!status)
5728 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005729
Sathya Perla78fad34e2015-02-23 04:20:08 -05005730 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005731 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005732 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005733
Sathya Perla78fad34e2015-02-23 04:20:08 -05005734 status = be_drv_init(adapter);
5735 if (status)
5736 goto unmap_bars;
5737
Sathya Perla5fb379e2009-06-18 00:02:59 +00005738 status = be_setup(adapter);
5739 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005740 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005741
Sathya Perla3abcded2010-10-03 22:12:27 -07005742 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005743 status = register_netdev(netdev);
5744 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005745 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005746
Parav Pandit045508a2012-03-26 14:27:13 +00005747 be_roce_dev_add(adapter);
5748
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005749 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005750
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305751 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005752 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305753 adapter->hwmon_info.hwmon_dev =
5754 devm_hwmon_device_register_with_groups(&pdev->dev,
5755 DRV_NAME,
5756 adapter,
5757 be_hwmon_groups);
5758 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5759 }
5760
Sathya Perlad3791422012-09-28 04:39:44 +00005761 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005762 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005764 return 0;
5765
Sathya Perla5fb379e2009-06-18 00:02:59 +00005766unsetup:
5767 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005768drv_cleanup:
5769 be_drv_cleanup(adapter);
5770unmap_bars:
5771 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005772free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005773 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005774rel_reg:
5775 pci_release_regions(pdev);
5776disable_dev:
5777 pci_disable_device(pdev);
5778do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005779 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005780 return status;
5781}
5782
5783static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5784{
5785 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005786
Suresh Reddy76a9e082014-01-15 13:23:40 +05305787 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005788 be_setup_wol(adapter, true);
5789
Ajit Khaparded4360d62013-11-22 12:51:09 -06005790 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005791 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005792
Kalesh AP87ac1a52015-02-23 04:20:15 -05005793 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005794
5795 pci_save_state(pdev);
5796 pci_disable_device(pdev);
5797 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5798 return 0;
5799}
5800
Kalesh AP484d76f2015-02-23 04:20:14 -05005801static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005802{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005803 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005804 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005805
5806 status = pci_enable_device(pdev);
5807 if (status)
5808 return status;
5809
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005810 pci_restore_state(pdev);
5811
Kalesh AP484d76f2015-02-23 04:20:14 -05005812 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005813 if (status)
5814 return status;
5815
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005816 be_schedule_err_detection(adapter);
5817
Suresh Reddy76a9e082014-01-15 13:23:40 +05305818 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005819 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005821 return 0;
5822}
5823
Sathya Perla82456b02010-02-17 01:35:37 +00005824/*
5825 * An FLR will stop BE from DMAing any data.
5826 */
5827static void be_shutdown(struct pci_dev *pdev)
5828{
5829 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005830
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005831 if (!adapter)
5832 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005833
Devesh Sharmad114f992014-06-10 19:32:15 +05305834 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005835 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005836 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005837
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005838 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005839
Ajit Khaparde57841862011-04-06 18:08:43 +00005840 be_cmd_reset_function(adapter);
5841
Sathya Perla82456b02010-02-17 01:35:37 +00005842 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005843}
5844
Sathya Perlacf588472010-02-14 21:22:01 +00005845static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305846 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005847{
5848 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005849
5850 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5851
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305852 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5853 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005854
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005855 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005856
Kalesh AP87ac1a52015-02-23 04:20:15 -05005857 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005858 }
Sathya Perlacf588472010-02-14 21:22:01 +00005859
5860 if (state == pci_channel_io_perm_failure)
5861 return PCI_ERS_RESULT_DISCONNECT;
5862
5863 pci_disable_device(pdev);
5864
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005865 /* The error could cause the FW to trigger a flash debug dump.
5866 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005867 * can cause it not to recover; wait for it to finish.
5868 * Wait only for first function as it is needed only once per
5869 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005870 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005871 if (pdev->devfn == 0)
5872 ssleep(30);
5873
Sathya Perlacf588472010-02-14 21:22:01 +00005874 return PCI_ERS_RESULT_NEED_RESET;
5875}
5876
5877static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5878{
5879 struct be_adapter *adapter = pci_get_drvdata(pdev);
5880 int status;
5881
5882 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005883
5884 status = pci_enable_device(pdev);
5885 if (status)
5886 return PCI_ERS_RESULT_DISCONNECT;
5887
5888 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005889 pci_restore_state(pdev);
5890
5891 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005892 dev_info(&adapter->pdev->dev,
5893 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005894 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005895 if (status)
5896 return PCI_ERS_RESULT_DISCONNECT;
5897
Sathya Perlad6b6d982012-09-05 01:56:48 +00005898 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305899 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005900 return PCI_ERS_RESULT_RECOVERED;
5901}
5902
5903static void be_eeh_resume(struct pci_dev *pdev)
5904{
5905 int status = 0;
5906 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005907
5908 dev_info(&adapter->pdev->dev, "EEH resume\n");
5909
5910 pci_save_state(pdev);
5911
Kalesh AP484d76f2015-02-23 04:20:14 -05005912 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005913 if (status)
5914 goto err;
5915
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005916 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005917 return;
5918err:
5919 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005920}
5921
Vasundhara Volamace40af2015-03-04 00:44:34 -05005922static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5923{
5924 struct be_adapter *adapter = pci_get_drvdata(pdev);
5925 u16 num_vf_qs;
5926 int status;
5927
5928 if (!num_vfs)
5929 be_vf_clear(adapter);
5930
5931 adapter->num_vfs = num_vfs;
5932
5933 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5934 dev_warn(&pdev->dev,
5935 "Cannot disable VFs while they are assigned\n");
5936 return -EBUSY;
5937 }
5938
5939 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5940 * are equally distributed across the max-number of VFs. The user may
5941 * request only a subset of the max-vfs to be enabled.
5942 * Based on num_vfs, redistribute the resources across num_vfs so that
5943 * each VF will have access to more number of resources.
5944 * This facility is not available in BE3 FW.
5945 * Also, this is done by FW in Lancer chip.
5946 */
5947 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5948 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5949 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5950 adapter->num_vfs, num_vf_qs);
5951 if (status)
5952 dev_err(&pdev->dev,
5953 "Failed to optimize SR-IOV resources\n");
5954 }
5955
5956 status = be_get_resources(adapter);
5957 if (status)
5958 return be_cmd_status(status);
5959
5960 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5961 rtnl_lock();
5962 status = be_update_queues(adapter);
5963 rtnl_unlock();
5964 if (status)
5965 return be_cmd_status(status);
5966
5967 if (adapter->num_vfs)
5968 status = be_vf_setup(adapter);
5969
5970 if (!status)
5971 return adapter->num_vfs;
5972
5973 return 0;
5974}
5975
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005976static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005977 .error_detected = be_eeh_err_detected,
5978 .slot_reset = be_eeh_reset,
5979 .resume = be_eeh_resume,
5980};
5981
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005982static struct pci_driver be_driver = {
5983 .name = DRV_NAME,
5984 .id_table = be_dev_ids,
5985 .probe = be_probe,
5986 .remove = be_remove,
5987 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005988 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005989 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005990 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005991 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005992};
5993
5994static int __init be_init_module(void)
5995{
Joe Perches8e95a202009-12-03 07:58:21 +00005996 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5997 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005998 printk(KERN_WARNING DRV_NAME
5999 " : Module param rx_frag_size must be 2048/4096/8192."
6000 " Using 2048\n");
6001 rx_frag_size = 2048;
6002 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006003
Vasundhara Volamace40af2015-03-04 00:44:34 -05006004 if (num_vfs > 0) {
6005 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6006 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6007 }
6008
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006009 return pci_register_driver(&be_driver);
6010}
6011module_init(be_init_module);
6012
6013static void __exit be_exit_module(void)
6014{
6015 pci_unregister_driver(&be_driver);
6016}
6017module_exit(be_exit_module);