blob: d86bc5d5224627a812ba0a430c21f7a4f23513b3 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Sathya Perla5a712c12013-07-23 15:24:59 +0530276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the
279 * FILTMGMT privilege. This failure is OK, only if the PF programmed
280 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle, &adapter->pmac_id[0], 0);
284 if (!status) {
285 curr_pmac_id = adapter->pmac_id[0];
286
287 /* Delete the old programmed MAC. This call may fail if the
288 * old MAC was already deleted by the PF driver.
289 */
290 if (adapter->pmac_id[0] != old_pmac_id)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000293 }
294
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 /* Decide if the new MAC is successfully activated only after
296 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530298 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
299 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000300 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302
Sathya Perla5a712c12013-07-23 15:24:59 +0530303 /* The MAC change did not happen, either due to lack of privilege
304 * or PF didn't pre-provision.
305 */
dingtianhong61d23e92013-12-30 15:40:43 +0800306 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 status = -EPERM;
308 goto err;
309 }
310
Somnath Koture3a7ae22011-10-27 07:14:05 +0000311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530312 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000313 return 0;
314err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 return status;
317}
318
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319/* BE2 supports only v0 cmd */
320static void *hw_stats_from_cmd(struct be_adapter *adapter)
321{
322 if (BE2_chip(adapter)) {
323 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500326 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000327 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else {
331 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000334 }
335}
336
337/* BE2 supports only v0 cmd */
338static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
339{
340 if (BE2_chip(adapter)) {
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500344 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000345 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else {
349 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000352 }
353}
354
355static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000357 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
358 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
359 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 &rxf_stats->port[adapter->port_num];
362 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000363
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_pause_frames = port_stats->rx_pause_frames;
366 drvs->rx_crc_errors = port_stats->rx_crc_errors;
367 drvs->rx_control_frames = port_stats->rx_control_frames;
368 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
370 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
371 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
372 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
373 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
374 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
375 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
376 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
377 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
378 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_dropped_header_too_small =
381 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000382 drvs->rx_address_filtered =
383 port_stats->rx_address_filtered +
384 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_alignment_symbol_errors =
386 port_stats->rx_alignment_symbol_errors;
387
388 drvs->tx_pauseframes = port_stats->tx_pauseframes;
389 drvs->tx_controlframes = port_stats->tx_controlframes;
390
391 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 drvs->forwarded_packets = rxf_stats->forwarded_packets;
398 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
400 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
402}
403
Sathya Perlaca34fe32012-11-06 17:48:56 +0000404static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000406 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
407 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
408 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 &rxf_stats->port[adapter->port_num];
411 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
Sathya Perlaac124ff2011-07-25 19:10:14 +0000413 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000414 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
415 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_pause_frames = port_stats->rx_pause_frames;
417 drvs->rx_crc_errors = port_stats->rx_crc_errors;
418 drvs->rx_control_frames = port_stats->rx_control_frames;
419 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
420 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
421 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
422 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
423 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
424 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
429 drvs->rx_dropped_header_too_small =
430 port_stats->rx_dropped_header_too_small;
431 drvs->rx_input_fifo_overflow_drop =
432 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000433 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_alignment_symbol_errors =
435 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->tx_pauseframes = port_stats->tx_pauseframes;
438 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000439 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000440 drvs->jabber_events = port_stats->jabber_events;
441 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->forwarded_packets = rxf_stats->forwarded_packets;
444 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000445 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
446 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
448}
449
Ajit Khaparde61000862013-10-03 16:16:33 -0500450static void populate_be_v2_stats(struct be_adapter *adapter)
451{
452 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
453 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
454 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
455 struct be_port_rxf_stats_v2 *port_stats =
456 &rxf_stats->port[adapter->port_num];
457 struct be_drv_stats *drvs = &adapter->drv_stats;
458
459 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
460 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
461 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
462 drvs->rx_pause_frames = port_stats->rx_pause_frames;
463 drvs->rx_crc_errors = port_stats->rx_crc_errors;
464 drvs->rx_control_frames = port_stats->rx_control_frames;
465 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
466 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
467 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
468 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
469 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
470 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
471 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
472 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
473 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
474 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
475 drvs->rx_dropped_header_too_small =
476 port_stats->rx_dropped_header_too_small;
477 drvs->rx_input_fifo_overflow_drop =
478 port_stats->rx_input_fifo_overflow_drop;
479 drvs->rx_address_filtered = port_stats->rx_address_filtered;
480 drvs->rx_alignment_symbol_errors =
481 port_stats->rx_alignment_symbol_errors;
482 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
483 drvs->tx_pauseframes = port_stats->tx_pauseframes;
484 drvs->tx_controlframes = port_stats->tx_controlframes;
485 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
486 drvs->jabber_events = port_stats->jabber_events;
487 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
488 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
489 drvs->forwarded_packets = rxf_stats->forwarded_packets;
490 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
491 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
492 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
493 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530494 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500495 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
496 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
497 drvs->rx_roce_frames = port_stats->roce_frames_received;
498 drvs->roce_drops_crc = port_stats->roce_drops_crc;
499 drvs->roce_drops_payload_len =
500 port_stats->roce_drops_payload_len;
501 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500502}
503
Selvin Xavier005d5692011-05-16 07:36:35 +0000504static void populate_lancer_stats(struct be_adapter *adapter)
505{
Selvin Xavier005d5692011-05-16 07:36:35 +0000506 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530507 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000508
509 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
510 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
511 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
512 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000513 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000514 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000515 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
516 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
517 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
518 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
519 drvs->rx_dropped_tcp_length =
520 pport_stats->rx_dropped_invalid_tcp_length;
521 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
522 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
523 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
524 drvs->rx_dropped_header_too_small =
525 pport_stats->rx_dropped_header_too_small;
526 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000527 drvs->rx_address_filtered =
528 pport_stats->rx_address_filtered +
529 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000530 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
533 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000534 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000535 drvs->forwarded_packets = pport_stats->num_forwards_lo;
536 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000537 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000538 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000539}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000540
Sathya Perla09c1c682011-08-22 19:41:53 +0000541static void accumulate_16bit_val(u32 *acc, u16 val)
542{
543#define lo(x) (x & 0xFFFF)
544#define hi(x) (x & 0xFFFF0000)
545 bool wrapped = val < lo(*acc);
546 u32 newacc = hi(*acc) + val;
547
548 if (wrapped)
549 newacc += 65536;
550 ACCESS_ONCE(*acc) = newacc;
551}
552
Jingoo Han4188e7d2013-08-05 18:02:02 +0900553static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530554 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000555{
556 if (!BEx_chip(adapter))
557 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
558 else
559 /* below erx HW counter can actually wrap around after
560 * 65535. Driver accumulates a 32-bit value
561 */
562 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
563 (u16)erx_stat);
564}
565
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000566void be_parse_stats(struct be_adapter *adapter)
567{
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569 struct be_rx_obj *rxo;
570 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000571 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 if (lancer_chip(adapter)) {
574 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000575 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 if (BE2_chip(adapter))
577 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500578 else if (BE3_chip(adapter))
579 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500581 else
582 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000583
Ajit Khaparde61000862013-10-03 16:16:33 -0500584 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000585 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000586 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
587 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000588 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000589 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590}
591
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530593 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000595 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000596 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700597 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000598 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64 pkts, bytes;
600 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530605
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700607 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 pkts = rx_stats(rxo)->rx_pkts;
609 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700610 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 stats->rx_packets += pkts;
612 stats->rx_bytes += bytes;
613 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
614 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
615 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700616 }
617
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000619 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530620
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700622 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 pkts = tx_stats(txo)->tx_pkts;
624 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700625 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 stats->tx_packets += pkts;
627 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000628 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629
630 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000632 drvs->rx_alignment_symbol_errors +
633 drvs->rx_in_range_errors +
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long +
636 drvs->rx_dropped_too_small +
637 drvs->rx_dropped_too_short +
638 drvs->rx_dropped_header_too_small +
639 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000644 drvs->rx_out_range_errors +
645 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000646
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
649 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000650 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 /* receiver fifo overrun */
653 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000655 drvs->rx_input_fifo_overflow_drop +
656 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000657 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658}
659
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 struct net_device *netdev = adapter->netdev;
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000665 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000666 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530669 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 netif_carrier_on(netdev);
671 else
672 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200673
674 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675}
676
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500677static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla3c8def92011-06-12 20:01:58 +0000679 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530680 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000681
Sathya Perlaab1594e2011-07-25 19:10:15 +0000682 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000683 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500684 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530685 stats->tx_pkts += tx_pkts;
686 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
687 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000688 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689}
690
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500691/* Returns number of WRBs needed for the skb */
692static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500694 /* +1 for the header wrb */
695 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696}
697
698static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
699{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500700 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
701 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
702 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
703 wrb->rsvd0 = 0;
704}
705
706/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
707 * to avoid the swap and shift/mask operations in wrb_fill().
708 */
709static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
710{
711 wrb->frag_pa_hi = 0;
712 wrb->frag_pa_lo = 0;
713 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000714 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715}
716
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000717static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530718 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000719{
720 u8 vlan_prio;
721 u16 vlan_tag;
722
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100723 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
725 /* If vlan priority provided by OS is NOT in available bmap */
726 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
727 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
728 adapter->recommended_prio;
729
730 return vlan_tag;
731}
732
Sathya Perlac9c47142014-03-27 10:46:19 +0530733/* Used only for IP tunnel packets */
734static u16 skb_inner_ip_proto(struct sk_buff *skb)
735{
736 return (inner_ip_hdr(skb)->version == 4) ?
737 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
738}
739
740static u16 skb_ip_proto(struct sk_buff *skb)
741{
742 return (ip_hdr(skb)->version == 4) ?
743 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
744}
745
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530746static inline bool be_is_txq_full(struct be_tx_obj *txo)
747{
748 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
749}
750
751static inline bool be_can_txq_wake(struct be_tx_obj *txo)
752{
753 return atomic_read(&txo->q.used) < txo->q.len / 2;
754}
755
756static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
757{
758 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
759}
760
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530761static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
762 struct sk_buff *skb,
763 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000767 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530768 BE_WRB_F_SET(wrb_params->features, LSO, 1);
769 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000770 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530773 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530774 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530775 proto = skb_inner_ip_proto(skb);
776 } else {
777 proto = skb_ip_proto(skb);
778 }
779 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530780 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530781 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530782 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783 }
784
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100785 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530786 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
787 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 }
789
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530790 BE_WRB_F_SET(wrb_params->features, CRC, 1);
791}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500792
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530793static void wrb_fill_hdr(struct be_adapter *adapter,
794 struct be_eth_hdr_wrb *hdr,
795 struct be_wrb_params *wrb_params,
796 struct sk_buff *skb)
797{
798 memset(hdr, 0, sizeof(*hdr));
799
800 SET_TX_WRB_HDR_BITS(crc, hdr,
801 BE_WRB_F_GET(wrb_params->features, CRC));
802 SET_TX_WRB_HDR_BITS(ipcs, hdr,
803 BE_WRB_F_GET(wrb_params->features, IPCS));
804 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
805 BE_WRB_F_GET(wrb_params->features, TCPCS));
806 SET_TX_WRB_HDR_BITS(udpcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, UDPCS));
808
809 SET_TX_WRB_HDR_BITS(lso, hdr,
810 BE_WRB_F_GET(wrb_params->features, LSO));
811 SET_TX_WRB_HDR_BITS(lso6, hdr,
812 BE_WRB_F_GET(wrb_params->features, LSO6));
813 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
814
815 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
816 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500817 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530818 SET_TX_WRB_HDR_BITS(event, hdr,
819 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
820 SET_TX_WRB_HDR_BITS(vlan, hdr,
821 BE_WRB_F_GET(wrb_params->features, VLAN));
822 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
823
824 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
825 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530826 SET_TX_WRB_HDR_BITS(mgmt, hdr,
827 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828}
829
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000830static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530831 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000832{
833 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500834 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000835
Sathya Perla7101e112010-03-22 20:41:12 +0000836
Sathya Perlaf986afc2015-02-06 08:18:43 -0500837 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
838 (u64)le32_to_cpu(wrb->frag_pa_lo);
839 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000840 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500841 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000842 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500843 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000844 }
845}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530847/* Grab a WRB header for xmit */
848static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530850 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530852 queue_head_inc(&txo->q);
853 return head;
854}
855
856/* Set up the WRB header for xmit */
857static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
858 struct be_tx_obj *txo,
859 struct be_wrb_params *wrb_params,
860 struct sk_buff *skb, u16 head)
861{
862 u32 num_frags = skb_wrb_cnt(skb);
863 struct be_queue_info *txq = &txo->q;
864 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
865
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530866 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500867 be_dws_cpu_to_le(hdr, sizeof(*hdr));
868
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500869 BUG_ON(txo->sent_skb_list[head]);
870 txo->sent_skb_list[head] = skb;
871 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530872 atomic_add(num_frags, &txq->used);
873 txo->last_req_wrb_cnt = num_frags;
874 txo->pend_wrb_cnt += num_frags;
875}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530877/* Setup a WRB fragment (buffer descriptor) for xmit */
878static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
879 int len)
880{
881 struct be_eth_wrb *wrb;
882 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700883
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530884 wrb = queue_head_node(txq);
885 wrb_fill(wrb, busaddr, len);
886 queue_head_inc(txq);
887}
888
889/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
890 * was invoked. The producer index is restored to the previous packet and the
891 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
892 */
893static void be_xmit_restore(struct be_adapter *adapter,
894 struct be_tx_obj *txo, u16 head, bool map_single,
895 u32 copied)
896{
897 struct device *dev;
898 struct be_eth_wrb *wrb;
899 struct be_queue_info *txq = &txo->q;
900
901 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500902 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530903
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500904 /* skip the first wrb (hdr); it's not mapped */
905 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000906 while (copied) {
907 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000908 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000909 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500910 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000911 queue_head_inc(txq);
912 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530913
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500914 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530915}
916
917/* Enqueue the given packet for transmit. This routine allocates WRBs for the
918 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
919 * of WRBs used up by the packet.
920 */
921static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
922 struct sk_buff *skb,
923 struct be_wrb_params *wrb_params)
924{
925 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
926 struct device *dev = &adapter->pdev->dev;
927 struct be_queue_info *txq = &txo->q;
928 bool map_single = false;
929 u16 head = txq->head;
930 dma_addr_t busaddr;
931 int len;
932
933 head = be_tx_get_wrb_hdr(txo);
934
935 if (skb->len > skb->data_len) {
936 len = skb_headlen(skb);
937
938 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
939 if (dma_mapping_error(dev, busaddr))
940 goto dma_err;
941 map_single = true;
942 be_tx_setup_wrb_frag(txo, busaddr, len);
943 copied += len;
944 }
945
946 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
947 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
948 len = skb_frag_size(frag);
949
950 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
951 if (dma_mapping_error(dev, busaddr))
952 goto dma_err;
953 be_tx_setup_wrb_frag(txo, busaddr, len);
954 copied += len;
955 }
956
957 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
958
959 be_tx_stats_update(txo, skb);
960 return wrb_cnt;
961
962dma_err:
963 adapter->drv_stats.dma_map_errors++;
964 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000965 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700966}
967
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500968static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
969{
970 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
971}
972
Somnath Kotur93040ae2012-06-26 22:32:10 +0000973static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000974 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530975 struct be_wrb_params
976 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000977{
978 u16 vlan_tag = 0;
979
980 skb = skb_share_check(skb, GFP_ATOMIC);
981 if (unlikely(!skb))
982 return skb;
983
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100984 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000985 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530986
987 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
988 if (!vlan_tag)
989 vlan_tag = adapter->pvid;
990 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
991 * skip VLAN insertion
992 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530993 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530994 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000995
996 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100997 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
998 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000999 if (unlikely(!skb))
1000 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001001 skb->vlan_tci = 0;
1002 }
1003
1004 /* Insert the outer VLAN, if any */
1005 if (adapter->qnq_vid) {
1006 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001007 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1008 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001009 if (unlikely(!skb))
1010 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301011 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001012 }
1013
Somnath Kotur93040ae2012-06-26 22:32:10 +00001014 return skb;
1015}
1016
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001017static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1018{
1019 struct ethhdr *eh = (struct ethhdr *)skb->data;
1020 u16 offset = ETH_HLEN;
1021
1022 if (eh->h_proto == htons(ETH_P_IPV6)) {
1023 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1024
1025 offset += sizeof(struct ipv6hdr);
1026 if (ip6h->nexthdr != NEXTHDR_TCP &&
1027 ip6h->nexthdr != NEXTHDR_UDP) {
1028 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301029 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001030
1031 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1032 if (ehdr->hdrlen == 0xff)
1033 return true;
1034 }
1035 }
1036 return false;
1037}
1038
1039static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1040{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001041 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042}
1043
Sathya Perla748b5392014-05-09 13:29:13 +05301044static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001045{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001046 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001047}
1048
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301049static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1050 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301051 struct be_wrb_params
1052 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001054 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001055 unsigned int eth_hdr_len;
1056 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001057
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001058 /* For padded packets, BE HW modifies tot_len field in IP header
1059 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001060 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001061 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001062 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1063 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001064 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001065 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001066 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001067 ip = (struct iphdr *)ip_hdr(skb);
1068 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1069 }
1070
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001071 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301072 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001073 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301074 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001075 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301076 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001077
Somnath Kotur93040ae2012-06-26 22:32:10 +00001078 /* HW has a bug wherein it will calculate CSUM for VLAN
1079 * pkts even though it is disabled.
1080 * Manually insert VLAN in pkt.
1081 */
1082 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001083 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301084 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001085 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301086 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001087 }
1088
1089 /* HW may lockup when VLAN HW tagging is requested on
1090 * certain ipv6 packets. Drop such pkts if the HW workaround to
1091 * skip HW tagging is not enabled by FW.
1092 */
1093 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301094 (adapter->pvid || adapter->qnq_vid) &&
1095 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001096 goto tx_drop;
1097
1098 /* Manual VLAN tag insertion to prevent:
1099 * ASIC lockup when the ASIC inserts VLAN tag into
1100 * certain ipv6 packets. Insert VLAN tags in driver,
1101 * and set event, completion, vlan bits accordingly
1102 * in the Tx WRB.
1103 */
1104 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1105 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301106 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001107 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301108 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001109 }
1110
Sathya Perlaee9c7992013-05-22 23:04:55 +00001111 return skb;
1112tx_drop:
1113 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301114err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001115 return NULL;
1116}
1117
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301118static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1119 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301120 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301121{
1122 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1123 * less may cause a transmit stall on that port. So the work-around is
1124 * to pad short packets (<= 32 bytes) to a 36-byte length.
1125 */
1126 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001127 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301128 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301129 }
1130
1131 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301132 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301133 if (!skb)
1134 return NULL;
1135 }
1136
1137 return skb;
1138}
1139
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001140static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1141{
1142 struct be_queue_info *txq = &txo->q;
1143 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1144
1145 /* Mark the last request eventable if it hasn't been marked already */
1146 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1147 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1148
1149 /* compose a dummy wrb if there are odd set of wrbs to notify */
1150 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001151 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001152 queue_head_inc(txq);
1153 atomic_inc(&txq->used);
1154 txo->pend_wrb_cnt++;
1155 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1156 TX_HDR_WRB_NUM_SHIFT);
1157 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1158 TX_HDR_WRB_NUM_SHIFT);
1159 }
1160 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1161 txo->pend_wrb_cnt = 0;
1162}
1163
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301164/* OS2BMC related */
1165
1166#define DHCP_CLIENT_PORT 68
1167#define DHCP_SERVER_PORT 67
1168#define NET_BIOS_PORT1 137
1169#define NET_BIOS_PORT2 138
1170#define DHCPV6_RAS_PORT 547
1171
1172#define is_mc_allowed_on_bmc(adapter, eh) \
1173 (!is_multicast_filt_enabled(adapter) && \
1174 is_multicast_ether_addr(eh->h_dest) && \
1175 !is_broadcast_ether_addr(eh->h_dest))
1176
1177#define is_bc_allowed_on_bmc(adapter, eh) \
1178 (!is_broadcast_filt_enabled(adapter) && \
1179 is_broadcast_ether_addr(eh->h_dest))
1180
1181#define is_arp_allowed_on_bmc(adapter, skb) \
1182 (is_arp(skb) && is_arp_filt_enabled(adapter))
1183
1184#define is_broadcast_packet(eh, adapter) \
1185 (is_multicast_ether_addr(eh->h_dest) && \
1186 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1187
1188#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1189
1190#define is_arp_filt_enabled(adapter) \
1191 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1192
1193#define is_dhcp_client_filt_enabled(adapter) \
1194 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1195
1196#define is_dhcp_srvr_filt_enabled(adapter) \
1197 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1198
1199#define is_nbios_filt_enabled(adapter) \
1200 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1201
1202#define is_ipv6_na_filt_enabled(adapter) \
1203 (adapter->bmc_filt_mask & \
1204 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1205
1206#define is_ipv6_ra_filt_enabled(adapter) \
1207 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1208
1209#define is_ipv6_ras_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1211
1212#define is_broadcast_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1214
1215#define is_multicast_filt_enabled(adapter) \
1216 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1217
1218static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1219 struct sk_buff **skb)
1220{
1221 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1222 bool os2bmc = false;
1223
1224 if (!be_is_os2bmc_enabled(adapter))
1225 goto done;
1226
1227 if (!is_multicast_ether_addr(eh->h_dest))
1228 goto done;
1229
1230 if (is_mc_allowed_on_bmc(adapter, eh) ||
1231 is_bc_allowed_on_bmc(adapter, eh) ||
1232 is_arp_allowed_on_bmc(adapter, (*skb))) {
1233 os2bmc = true;
1234 goto done;
1235 }
1236
1237 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1238 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1239 u8 nexthdr = hdr->nexthdr;
1240
1241 if (nexthdr == IPPROTO_ICMPV6) {
1242 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1243
1244 switch (icmp6->icmp6_type) {
1245 case NDISC_ROUTER_ADVERTISEMENT:
1246 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1247 goto done;
1248 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1249 os2bmc = is_ipv6_na_filt_enabled(adapter);
1250 goto done;
1251 default:
1252 break;
1253 }
1254 }
1255 }
1256
1257 if (is_udp_pkt((*skb))) {
1258 struct udphdr *udp = udp_hdr((*skb));
1259
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001260 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301261 case DHCP_CLIENT_PORT:
1262 os2bmc = is_dhcp_client_filt_enabled(adapter);
1263 goto done;
1264 case DHCP_SERVER_PORT:
1265 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1266 goto done;
1267 case NET_BIOS_PORT1:
1268 case NET_BIOS_PORT2:
1269 os2bmc = is_nbios_filt_enabled(adapter);
1270 goto done;
1271 case DHCPV6_RAS_PORT:
1272 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1273 goto done;
1274 default:
1275 break;
1276 }
1277 }
1278done:
1279 /* For packets over a vlan, which are destined
1280 * to BMC, asic expects the vlan to be inline in the packet.
1281 */
1282 if (os2bmc)
1283 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1284
1285 return os2bmc;
1286}
1287
Sathya Perlaee9c7992013-05-22 23:04:55 +00001288static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1289{
1290 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001291 u16 q_idx = skb_get_queue_mapping(skb);
1292 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301293 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301294 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001295 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001296
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301297 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001298 if (unlikely(!skb))
1299 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001300
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301301 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1302
1303 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001304 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001305 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001306 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001308
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301309 /* if os2bmc is enabled and if the pkt is destined to bmc,
1310 * enqueue the pkt a 2nd time with mgmt bit set.
1311 */
1312 if (be_send_pkt_to_bmc(adapter, &skb)) {
1313 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1314 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1315 if (unlikely(!wrb_cnt))
1316 goto drop;
1317 else
1318 skb_get(skb);
1319 }
1320
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301321 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001322 netif_stop_subqueue(netdev, q_idx);
1323 tx_stats(txo)->tx_stops++;
1324 }
1325
1326 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1327 be_xmit_flush(adapter, txo);
1328
1329 return NETDEV_TX_OK;
1330drop:
1331 tx_stats(txo)->tx_drv_drops++;
1332 /* Flush the already enqueued tx requests */
1333 if (flush && txo->pend_wrb_cnt)
1334 be_xmit_flush(adapter, txo);
1335
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 return NETDEV_TX_OK;
1337}
1338
1339static int be_change_mtu(struct net_device *netdev, int new_mtu)
1340{
1341 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301342 struct device *dev = &adapter->pdev->dev;
1343
1344 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1345 dev_info(dev, "MTU must be between %d and %d bytes\n",
1346 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 return -EINVAL;
1348 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301349
1350 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301351 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 netdev->mtu = new_mtu;
1353 return 0;
1354}
1355
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001356static inline bool be_in_all_promisc(struct be_adapter *adapter)
1357{
1358 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1359 BE_IF_FLAGS_ALL_PROMISCUOUS;
1360}
1361
1362static int be_set_vlan_promisc(struct be_adapter *adapter)
1363{
1364 struct device *dev = &adapter->pdev->dev;
1365 int status;
1366
1367 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1368 return 0;
1369
1370 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1371 if (!status) {
1372 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1373 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1374 } else {
1375 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1376 }
1377 return status;
1378}
1379
1380static int be_clear_vlan_promisc(struct be_adapter *adapter)
1381{
1382 struct device *dev = &adapter->pdev->dev;
1383 int status;
1384
1385 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1386 if (!status) {
1387 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1388 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1389 }
1390 return status;
1391}
1392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001394 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1395 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 */
Sathya Perla10329df2012-06-05 19:37:18 +00001397static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398{
Vasundhara Volam50762662014-09-12 17:39:14 +05301399 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001400 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301401 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001402 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001403
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001404 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001405 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001406 return 0;
1407
Sathya Perla92bf14a2013-08-27 16:57:32 +05301408 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001409 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001410
1411 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301412 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1413 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001414
Vasundhara Volam435452a2015-03-20 06:28:23 -04001415 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001416 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001417 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001418 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001419 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1420 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301421 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001422 return be_set_vlan_promisc(adapter);
1423 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1424 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001426 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427}
1428
Patrick McHardy80d5c362013-04-19 02:04:28 +00001429static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430{
1431 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001432 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001434 /* Packets with VID 0 are always received by Lancer by default */
1435 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301436 return status;
1437
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301438 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301439 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001440
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301441 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301442 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001443
Somnath Kotura6b74e02014-01-21 15:50:55 +05301444 status = be_vid_config(adapter);
1445 if (status) {
1446 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301447 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301448 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301449
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001450 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451}
1452
Patrick McHardy80d5c362013-04-19 02:04:28 +00001453static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454{
1455 struct be_adapter *adapter = netdev_priv(netdev);
1456
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001457 /* Packets with VID 0 are always received by Lancer by default */
1458 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301459 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001460
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301461 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301462 adapter->vlans_added--;
1463
1464 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465}
1466
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001467static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301468{
Sathya Perlaac34b742015-02-06 08:18:40 -05001469 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001470 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1471}
1472
1473static void be_set_all_promisc(struct be_adapter *adapter)
1474{
1475 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1476 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1477}
1478
1479static void be_set_mc_promisc(struct be_adapter *adapter)
1480{
1481 int status;
1482
1483 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1484 return;
1485
1486 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1487 if (!status)
1488 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1489}
1490
1491static void be_set_mc_list(struct be_adapter *adapter)
1492{
1493 int status;
1494
1495 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1496 if (!status)
1497 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1498 else
1499 be_set_mc_promisc(adapter);
1500}
1501
1502static void be_set_uc_list(struct be_adapter *adapter)
1503{
1504 struct netdev_hw_addr *ha;
1505 int i = 1; /* First slot is claimed by the Primary MAC */
1506
1507 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1508 be_cmd_pmac_del(adapter, adapter->if_handle,
1509 adapter->pmac_id[i], 0);
1510
1511 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1512 be_set_all_promisc(adapter);
1513 return;
1514 }
1515
1516 netdev_for_each_uc_addr(ha, adapter->netdev) {
1517 adapter->uc_macs++; /* First slot is for Primary MAC */
1518 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1519 &adapter->pmac_id[adapter->uc_macs], 0);
1520 }
1521}
1522
1523static void be_clear_uc_list(struct be_adapter *adapter)
1524{
1525 int i;
1526
1527 for (i = 1; i < (adapter->uc_macs + 1); i++)
1528 be_cmd_pmac_del(adapter, adapter->if_handle,
1529 adapter->pmac_id[i], 0);
1530 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301531}
1532
Sathya Perlaa54769f2011-10-24 02:45:00 +00001533static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534{
1535 struct be_adapter *adapter = netdev_priv(netdev);
1536
1537 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001538 be_set_all_promisc(adapter);
1539 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001541
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001542 /* Interface was previously in promiscuous mode; disable it */
1543 if (be_in_all_promisc(adapter)) {
1544 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001545 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001546 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001547 }
1548
Sathya Perlae7b909a2009-11-22 22:01:10 +00001549 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001550 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001551 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1552 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301553 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001554 }
Kalesh APa0794882014-05-30 19:06:23 +05301555
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001556 if (netdev_uc_count(netdev) != adapter->uc_macs)
1557 be_set_uc_list(adapter);
1558
1559 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560}
1561
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001562static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1563{
1564 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001565 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001566 int status;
1567
Sathya Perla11ac75e2011-12-13 00:58:50 +00001568 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001569 return -EPERM;
1570
Sathya Perla11ac75e2011-12-13 00:58:50 +00001571 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001572 return -EINVAL;
1573
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301574 /* Proceed further only if user provided MAC is different
1575 * from active MAC
1576 */
1577 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1578 return 0;
1579
Sathya Perla3175d8c2013-07-23 15:25:03 +05301580 if (BEx_chip(adapter)) {
1581 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1582 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001583
Sathya Perla11ac75e2011-12-13 00:58:50 +00001584 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1585 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301586 } else {
1587 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1588 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001589 }
1590
Kalesh APabccf232014-07-17 16:20:24 +05301591 if (status) {
1592 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1593 mac, vf, status);
1594 return be_cmd_status(status);
1595 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001596
Kalesh APabccf232014-07-17 16:20:24 +05301597 ether_addr_copy(vf_cfg->mac_addr, mac);
1598
1599 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001600}
1601
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001602static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301603 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001604{
1605 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001606 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001607
Sathya Perla11ac75e2011-12-13 00:58:50 +00001608 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001609 return -EPERM;
1610
Sathya Perla11ac75e2011-12-13 00:58:50 +00001611 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001612 return -EINVAL;
1613
1614 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001615 vi->max_tx_rate = vf_cfg->tx_rate;
1616 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001617 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1618 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001619 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301620 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001621 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001622
1623 return 0;
1624}
1625
Vasundhara Volam435452a2015-03-20 06:28:23 -04001626static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1627{
1628 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1629 u16 vids[BE_NUM_VLANS_SUPPORTED];
1630 int vf_if_id = vf_cfg->if_handle;
1631 int status;
1632
1633 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001634 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001635 if (status)
1636 return status;
1637
1638 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1639 vids[0] = 0;
1640 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1641 if (!status)
1642 dev_info(&adapter->pdev->dev,
1643 "Cleared guest VLANs on VF%d", vf);
1644
1645 /* After TVT is enabled, disallow VFs to program VLAN filters */
1646 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1647 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1648 ~BE_PRIV_FILTMGMT, vf + 1);
1649 if (!status)
1650 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1651 }
1652 return 0;
1653}
1654
1655static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1656{
1657 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1658 struct device *dev = &adapter->pdev->dev;
1659 int status;
1660
1661 /* Reset Transparent VLAN Tagging. */
1662 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001663 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001664 if (status)
1665 return status;
1666
1667 /* Allow VFs to program VLAN filtering */
1668 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1669 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1670 BE_PRIV_FILTMGMT, vf + 1);
1671 if (!status) {
1672 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1673 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1674 }
1675 }
1676
1677 dev_info(dev,
1678 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1679 return 0;
1680}
1681
Sathya Perla748b5392014-05-09 13:29:13 +05301682static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001683{
1684 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001685 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001686 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001687
Sathya Perla11ac75e2011-12-13 00:58:50 +00001688 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001689 return -EPERM;
1690
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001691 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001692 return -EINVAL;
1693
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001694 if (vlan || qos) {
1695 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001696 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001697 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001698 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001699 }
1700
Kalesh APabccf232014-07-17 16:20:24 +05301701 if (status) {
1702 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001703 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1704 status);
Kalesh APabccf232014-07-17 16:20:24 +05301705 return be_cmd_status(status);
1706 }
1707
1708 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301709 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001710}
1711
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001712static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1713 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001714{
1715 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301716 struct device *dev = &adapter->pdev->dev;
1717 int percent_rate, status = 0;
1718 u16 link_speed = 0;
1719 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001720
Sathya Perla11ac75e2011-12-13 00:58:50 +00001721 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001722 return -EPERM;
1723
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001724 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001725 return -EINVAL;
1726
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001727 if (min_tx_rate)
1728 return -EINVAL;
1729
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301730 if (!max_tx_rate)
1731 goto config_qos;
1732
1733 status = be_cmd_link_status_query(adapter, &link_speed,
1734 &link_status, 0);
1735 if (status)
1736 goto err;
1737
1738 if (!link_status) {
1739 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301740 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301741 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001742 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001743
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301744 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1745 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1746 link_speed);
1747 status = -EINVAL;
1748 goto err;
1749 }
1750
1751 /* On Skyhawk the QOS setting must be done only as a % value */
1752 percent_rate = link_speed / 100;
1753 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1754 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1755 percent_rate);
1756 status = -EINVAL;
1757 goto err;
1758 }
1759
1760config_qos:
1761 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001762 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301763 goto err;
1764
1765 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1766 return 0;
1767
1768err:
1769 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1770 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301771 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001772}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301773
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301774static int be_set_vf_link_state(struct net_device *netdev, int vf,
1775 int link_state)
1776{
1777 struct be_adapter *adapter = netdev_priv(netdev);
1778 int status;
1779
1780 if (!sriov_enabled(adapter))
1781 return -EPERM;
1782
1783 if (vf >= adapter->num_vfs)
1784 return -EINVAL;
1785
1786 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301787 if (status) {
1788 dev_err(&adapter->pdev->dev,
1789 "Link state change on VF %d failed: %#x\n", vf, status);
1790 return be_cmd_status(status);
1791 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301792
Kalesh APabccf232014-07-17 16:20:24 +05301793 adapter->vf_cfg[vf].plink_tracking = link_state;
1794
1795 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301796}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001797
Kalesh APe7bcbd72015-05-06 05:30:32 -04001798static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1799{
1800 struct be_adapter *adapter = netdev_priv(netdev);
1801 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1802 u8 spoofchk;
1803 int status;
1804
1805 if (!sriov_enabled(adapter))
1806 return -EPERM;
1807
1808 if (vf >= adapter->num_vfs)
1809 return -EINVAL;
1810
1811 if (BEx_chip(adapter))
1812 return -EOPNOTSUPP;
1813
1814 if (enable == vf_cfg->spoofchk)
1815 return 0;
1816
1817 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1818
1819 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1820 0, spoofchk);
1821 if (status) {
1822 dev_err(&adapter->pdev->dev,
1823 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1824 return be_cmd_status(status);
1825 }
1826
1827 vf_cfg->spoofchk = enable;
1828 return 0;
1829}
1830
Sathya Perla2632baf2013-10-01 16:00:00 +05301831static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1832 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833{
Sathya Perla2632baf2013-10-01 16:00:00 +05301834 aic->rx_pkts_prev = rx_pkts;
1835 aic->tx_reqs_prev = tx_pkts;
1836 aic->jiffies = now;
1837}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001838
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001839static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301840{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001841 struct be_adapter *adapter = eqo->adapter;
1842 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301843 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301844 struct be_rx_obj *rxo;
1845 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001846 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301847 ulong now;
1848 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001849 int i;
1850
1851 aic = &adapter->aic_obj[eqo->idx];
1852 if (!aic->enable) {
1853 if (aic->jiffies)
1854 aic->jiffies = 0;
1855 eqd = aic->et_eqd;
1856 return eqd;
1857 }
1858
1859 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1860 do {
1861 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1862 rx_pkts += rxo->stats.rx_pkts;
1863 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1864 }
1865
1866 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1867 do {
1868 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1869 tx_pkts += txo->stats.tx_reqs;
1870 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1871 }
1872
1873 /* Skip, if wrapped around or first calculation */
1874 now = jiffies;
1875 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1876 rx_pkts < aic->rx_pkts_prev ||
1877 tx_pkts < aic->tx_reqs_prev) {
1878 be_aic_update(aic, rx_pkts, tx_pkts, now);
1879 return aic->prev_eqd;
1880 }
1881
1882 delta = jiffies_to_msecs(now - aic->jiffies);
1883 if (delta == 0)
1884 return aic->prev_eqd;
1885
1886 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1887 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1888 eqd = (pps / 15000) << 2;
1889
1890 if (eqd < 8)
1891 eqd = 0;
1892 eqd = min_t(u32, eqd, aic->max_eqd);
1893 eqd = max_t(u32, eqd, aic->min_eqd);
1894
1895 be_aic_update(aic, rx_pkts, tx_pkts, now);
1896
1897 return eqd;
1898}
1899
1900/* For Skyhawk-R only */
1901static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1902{
1903 struct be_adapter *adapter = eqo->adapter;
1904 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1905 ulong now = jiffies;
1906 int eqd;
1907 u32 mult_enc;
1908
1909 if (!aic->enable)
1910 return 0;
1911
1912 if (time_before_eq(now, aic->jiffies) ||
1913 jiffies_to_msecs(now - aic->jiffies) < 1)
1914 eqd = aic->prev_eqd;
1915 else
1916 eqd = be_get_new_eqd(eqo);
1917
1918 if (eqd > 100)
1919 mult_enc = R2I_DLY_ENC_1;
1920 else if (eqd > 60)
1921 mult_enc = R2I_DLY_ENC_2;
1922 else if (eqd > 20)
1923 mult_enc = R2I_DLY_ENC_3;
1924 else
1925 mult_enc = R2I_DLY_ENC_0;
1926
1927 aic->prev_eqd = eqd;
1928
1929 return mult_enc;
1930}
1931
1932void be_eqd_update(struct be_adapter *adapter, bool force_update)
1933{
1934 struct be_set_eqd set_eqd[MAX_EVT_QS];
1935 struct be_aic_obj *aic;
1936 struct be_eq_obj *eqo;
1937 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001938
Sathya Perla2632baf2013-10-01 16:00:00 +05301939 for_all_evt_queues(adapter, eqo, i) {
1940 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001941 eqd = be_get_new_eqd(eqo);
1942 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301943 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1944 set_eqd[num].eq_id = eqo->q.id;
1945 aic->prev_eqd = eqd;
1946 num++;
1947 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001948 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301949
1950 if (num)
1951 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001952}
1953
Sathya Perla3abcded2010-10-03 22:12:27 -07001954static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301955 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001956{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001957 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001958
Sathya Perlaab1594e2011-07-25 19:10:15 +00001959 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001960 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001961 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001962 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05301963 if (rxcp->tunneled)
1964 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001965 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001966 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001967 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001968 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001969 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970}
1971
Sathya Perla2e588f82011-03-11 02:49:26 +00001972static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001973{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001974 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301975 * Also ignore ipcksm for ipv6 pkts
1976 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001977 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301978 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001979}
1980
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301981static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301986 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987
Sathya Perla3abcded2010-10-03 22:12:27 -07001988 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989 BUG_ON(!rx_page_info->page);
1990
Sathya Perlae50287b2014-03-04 12:14:38 +05301991 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001992 dma_unmap_page(&adapter->pdev->dev,
1993 dma_unmap_addr(rx_page_info, bus),
1994 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301995 rx_page_info->last_frag = false;
1996 } else {
1997 dma_sync_single_for_cpu(&adapter->pdev->dev,
1998 dma_unmap_addr(rx_page_info, bus),
1999 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002000 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302002 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003 atomic_dec(&rxq->used);
2004 return rx_page_info;
2005}
2006
2007/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002008static void be_rx_compl_discard(struct be_rx_obj *rxo,
2009 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002012 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002014 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302015 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002016 put_page(page_info->page);
2017 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018 }
2019}
2020
2021/*
2022 * skb_fill_rx_data forms a complete skb for an ether frame
2023 * indicated by rxcp.
2024 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2026 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002029 u16 i, j;
2030 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031 u8 *start;
2032
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302033 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034 start = page_address(page_info->page) + page_info->page_offset;
2035 prefetch(start);
2036
2037 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002038 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040 skb->len = curr_frag_len;
2041 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002042 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043 /* Complete packet has now been moved to data */
2044 put_page(page_info->page);
2045 skb->data_len = 0;
2046 skb->tail += curr_frag_len;
2047 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002048 hdr_len = ETH_HLEN;
2049 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002051 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052 skb_shinfo(skb)->frags[0].page_offset =
2053 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302054 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2055 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002057 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058 skb->tail += hdr_len;
2059 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002060 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061
Sathya Perla2e588f82011-03-11 02:49:26 +00002062 if (rxcp->pkt_size <= rx_frag_size) {
2063 BUG_ON(rxcp->num_rcvd != 1);
2064 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065 }
2066
2067 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002068 remaining = rxcp->pkt_size - curr_frag_len;
2069 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302070 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002071 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002073 /* Coalesce all frags from the same physical page in one slot */
2074 if (page_info->page_offset == 0) {
2075 /* Fresh page */
2076 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002077 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002078 skb_shinfo(skb)->frags[j].page_offset =
2079 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002080 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002081 skb_shinfo(skb)->nr_frags++;
2082 } else {
2083 put_page(page_info->page);
2084 }
2085
Eric Dumazet9e903e02011-10-18 21:00:24 +00002086 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087 skb->len += curr_frag_len;
2088 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002089 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002090 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002091 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002093 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094}
2095
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002096/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302097static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002098 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002100 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002101 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002103
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002104 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002105 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002106 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002107 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108 return;
2109 }
2110
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002111 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002113 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002114 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002115 else
2116 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002118 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002119 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002120 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002121 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302122
Tom Herbertb6c0e892014-08-27 21:27:17 -07002123 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302124 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125
Jiri Pirko343e43c2011-08-25 02:50:51 +00002126 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002127 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002128
2129 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002130}
2131
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002132/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002133static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2134 struct napi_struct *napi,
2135 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002139 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002140 u16 remaining, curr_frag_len;
2141 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002142
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002143 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002144 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002146 return;
2147 }
2148
Sathya Perla2e588f82011-03-11 02:49:26 +00002149 remaining = rxcp->pkt_size;
2150 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302151 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152
2153 curr_frag_len = min(remaining, rx_frag_size);
2154
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002155 /* Coalesce all frags from the same physical page in one slot */
2156 if (i == 0 || page_info->page_offset == 0) {
2157 /* First frag or Fresh page */
2158 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002159 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002160 skb_shinfo(skb)->frags[j].page_offset =
2161 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002162 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002163 } else {
2164 put_page(page_info->page);
2165 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002166 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002167 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169 memset(page_info, 0, sizeof(*page_info));
2170 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002171 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002173 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002174 skb->len = rxcp->pkt_size;
2175 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002176 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002177 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002178 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002179 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302180
Tom Herbertb6c0e892014-08-27 21:27:17 -07002181 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302182 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002183
Jiri Pirko343e43c2011-08-25 02:50:51 +00002184 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002185 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002186
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188}
2189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002190static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2191 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302193 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2194 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2195 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2196 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2197 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2198 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2199 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2200 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2201 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2202 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2203 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002204 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302205 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2206 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002207 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302208 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302209 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302210 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002211}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2214 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002215{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302216 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2217 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2218 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2219 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2220 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2221 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2222 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2223 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2224 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2225 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2226 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002227 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302228 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2229 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002230 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302231 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2232 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002233}
2234
2235static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2236{
2237 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2238 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2239 struct be_adapter *adapter = rxo->adapter;
2240
2241 /* For checking the valid bit it is Ok to use either definition as the
2242 * valid bit is at the same position in both v0 and v1 Rx compl */
2243 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244 return NULL;
2245
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002246 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002247 be_dws_le_to_cpu(compl, sizeof(*compl));
2248
2249 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002251 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002253
Somnath Koture38b1702013-05-29 22:55:56 +00002254 if (rxcp->ip_frag)
2255 rxcp->l4_csum = 0;
2256
Sathya Perla15d72182011-03-21 20:49:26 +00002257 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302258 /* In QNQ modes, if qnq bit is not set, then the packet was
2259 * tagged only with the transparent outer vlan-tag and must
2260 * not be treated as a vlan packet by host
2261 */
2262 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002263 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002264
Sathya Perla15d72182011-03-21 20:49:26 +00002265 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002266 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002267
Somnath Kotur939cf302011-08-18 21:51:49 -07002268 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302269 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002270 rxcp->vlanf = 0;
2271 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002272
2273 /* As the compl has been parsed, reset it; we wont touch it again */
2274 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275
Sathya Perla3abcded2010-10-03 22:12:27 -07002276 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 return rxcp;
2278}
2279
Eric Dumazet1829b082011-03-01 05:48:12 +00002280static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002285 gfp |= __GFP_COMP;
2286 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287}
2288
2289/*
2290 * Allocate a page, split it to fragments of size rx_frag_size and post as
2291 * receive buffers to BE
2292 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302293static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294{
Sathya Perla3abcded2010-10-03 22:12:27 -07002295 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002296 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002297 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002299 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300 struct be_eth_rx_d *rxd;
2301 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302302 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302305 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002307 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002309 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 break;
2311 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002312 page_dmaaddr = dma_map_page(dev, pagep, 0,
2313 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002314 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002315 if (dma_mapping_error(dev, page_dmaaddr)) {
2316 put_page(pagep);
2317 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302318 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002319 break;
2320 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302321 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322 } else {
2323 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302324 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302326 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328
2329 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302330 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2332 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333
2334 /* Any space left in the current big page for another frag? */
2335 if ((page_offset + rx_frag_size + rx_frag_size) >
2336 adapter->big_page_size) {
2337 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302338 page_info->last_frag = true;
2339 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2340 } else {
2341 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002343
2344 prev_page_info = page_info;
2345 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002346 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302348
2349 /* Mark the last frag of a page when we break out of the above loop
2350 * with no more slots available in the RXQ
2351 */
2352 if (pagep) {
2353 prev_page_info->last_frag = true;
2354 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2355 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356
2357 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002358 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302359 if (rxo->rx_post_starved)
2360 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302361 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002362 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302363 be_rxq_notify(adapter, rxq->id, notify);
2364 posted -= notify;
2365 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002366 } else if (atomic_read(&rxq->used) == 0) {
2367 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002368 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370}
2371
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302372static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302374 struct be_queue_info *tx_cq = &txo->cq;
2375 struct be_tx_compl_info *txcp = &txo->txcp;
2376 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302378 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379 return NULL;
2380
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302381 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002382 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302383 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302385 txcp->status = GET_TX_COMPL_BITS(status, compl);
2386 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302388 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389 queue_tail_inc(tx_cq);
2390 return txcp;
2391}
2392
Sathya Perla3c8def92011-06-12 20:01:58 +00002393static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302394 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395{
Sathya Perla3c8def92011-06-12 20:01:58 +00002396 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002397 struct be_queue_info *txq = &txo->q;
2398 u16 frag_index, num_wrbs = 0;
2399 struct sk_buff *skb = NULL;
2400 bool unmap_skb_hdr = false;
2401 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002402
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002403 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002404 if (sent_skbs[txq->tail]) {
2405 /* Free skb from prev req */
2406 if (skb)
2407 dev_consume_skb_any(skb);
2408 skb = sent_skbs[txq->tail];
2409 sent_skbs[txq->tail] = NULL;
2410 queue_tail_inc(txq); /* skip hdr wrb */
2411 num_wrbs++;
2412 unmap_skb_hdr = true;
2413 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002414 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002415 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002416 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002417 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002418 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002419 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002420 num_wrbs++;
2421 } while (frag_index != last_index);
2422 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002424 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002425}
2426
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427/* Return the number of events in the event queue */
2428static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002429{
2430 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002432
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 do {
2434 eqe = queue_tail_node(&eqo->q);
2435 if (eqe->evt == 0)
2436 break;
2437
2438 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002439 eqe->evt = 0;
2440 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002441 queue_tail_inc(&eqo->q);
2442 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002443
2444 return num;
2445}
2446
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002447/* Leaves the EQ is disarmed state */
2448static void be_eq_clean(struct be_eq_obj *eqo)
2449{
2450 int num = events_get(eqo);
2451
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002452 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002453}
2454
2455static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002456{
2457 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002458 struct be_queue_info *rxq = &rxo->q;
2459 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002460 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002461 struct be_adapter *adapter = rxo->adapter;
2462 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002463
Sathya Perlad23e9462012-12-17 19:38:51 +00002464 /* Consume pending rx completions.
2465 * Wait for the flush completion (identified by zero num_rcvd)
2466 * to arrive. Notify CQ even when there are no more CQ entries
2467 * for HW to flush partially coalesced CQ entries.
2468 * In Lancer, there is no need to wait for flush compl.
2469 */
2470 for (;;) {
2471 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302472 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002473 if (lancer_chip(adapter))
2474 break;
2475
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302476 if (flush_wait++ > 50 ||
2477 be_check_error(adapter,
2478 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002479 dev_warn(&adapter->pdev->dev,
2480 "did not receive flush compl\n");
2481 break;
2482 }
2483 be_cq_notify(adapter, rx_cq->id, true, 0);
2484 mdelay(1);
2485 } else {
2486 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002487 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002488 if (rxcp->num_rcvd == 0)
2489 break;
2490 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002491 }
2492
Sathya Perlad23e9462012-12-17 19:38:51 +00002493 /* After cleanup, leave the CQ in unarmed state */
2494 be_cq_notify(adapter, rx_cq->id, false, 0);
2495
2496 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302497 while (atomic_read(&rxq->used) > 0) {
2498 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499 put_page(page_info->page);
2500 memset(page_info, 0, sizeof(*page_info));
2501 }
2502 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302503 rxq->tail = 0;
2504 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002505}
2506
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002507static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002509 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2510 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302511 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002512 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302513 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002514 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002515
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302516 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002517 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002518 pending_txqs = adapter->num_tx_qs;
2519
2520 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302521 cmpl = 0;
2522 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002523 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302524 while ((txcp = be_tx_compl_get(txo))) {
2525 num_wrbs +=
2526 be_tx_compl_process(adapter, txo,
2527 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002528 cmpl++;
2529 }
2530 if (cmpl) {
2531 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2532 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302533 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002534 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302535 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002536 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002537 }
2538
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302539 if (pending_txqs == 0 || ++timeo > 10 ||
2540 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002541 break;
2542
2543 mdelay(1);
2544 } while (true);
2545
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002546 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002547 for_all_tx_queues(adapter, txo, i) {
2548 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002549
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002550 if (atomic_read(&txq->used)) {
2551 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2552 i, atomic_read(&txq->used));
2553 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002554 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002555 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2556 txq->len);
2557 /* Use the tx-compl process logic to handle requests
2558 * that were not sent to the HW.
2559 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002560 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2561 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002562 BUG_ON(atomic_read(&txq->used));
2563 txo->pend_wrb_cnt = 0;
2564 /* Since hw was never notified of these requests,
2565 * reset TXQ indices
2566 */
2567 txq->head = notified_idx;
2568 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002569 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002570 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571}
2572
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573static void be_evt_queues_destroy(struct be_adapter *adapter)
2574{
2575 struct be_eq_obj *eqo;
2576 int i;
2577
2578 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002579 if (eqo->q.created) {
2580 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302582 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302583 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002584 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002585 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002586 be_queue_free(adapter, &eqo->q);
2587 }
2588}
2589
2590static int be_evt_queues_create(struct be_adapter *adapter)
2591{
2592 struct be_queue_info *eq;
2593 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302594 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002595 int i, rc;
2596
Sathya Perla92bf14a2013-08-27 16:57:32 +05302597 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2598 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002599
2600 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302601 int numa_node = dev_to_node(&adapter->pdev->dev);
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002602 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2603 return -ENOMEM;
Rusty Russellf36963c2015-05-09 03:14:13 +09302604 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2605 eqo->affinity_mask);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302606 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2607 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302608 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302609 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002610 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302612 aic->max_eqd = BE_MAX_EQD;
2613 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002614
2615 eq = &eqo->q;
2616 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302617 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002618 if (rc)
2619 return rc;
2620
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302621 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002622 if (rc)
2623 return rc;
2624 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002625 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626}
2627
Sathya Perla5fb379e2009-06-18 00:02:59 +00002628static void be_mcc_queues_destroy(struct be_adapter *adapter)
2629{
2630 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002631
Sathya Perla8788fdc2009-07-27 22:52:03 +00002632 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002634 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002635 be_queue_free(adapter, q);
2636
Sathya Perla8788fdc2009-07-27 22:52:03 +00002637 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002638 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002639 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002640 be_queue_free(adapter, q);
2641}
2642
2643/* Must be called only after TX qs are created as MCC shares TX EQ */
2644static int be_mcc_queues_create(struct be_adapter *adapter)
2645{
2646 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002647
Sathya Perla8788fdc2009-07-27 22:52:03 +00002648 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002649 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302650 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002651 goto err;
2652
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002653 /* Use the default EQ for MCC completions */
2654 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002655 goto mcc_cq_free;
2656
Sathya Perla8788fdc2009-07-27 22:52:03 +00002657 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002658 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2659 goto mcc_cq_destroy;
2660
Sathya Perla8788fdc2009-07-27 22:52:03 +00002661 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002662 goto mcc_q_free;
2663
2664 return 0;
2665
2666mcc_q_free:
2667 be_queue_free(adapter, q);
2668mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002669 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002670mcc_cq_free:
2671 be_queue_free(adapter, cq);
2672err:
2673 return -1;
2674}
2675
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002676static void be_tx_queues_destroy(struct be_adapter *adapter)
2677{
2678 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002679 struct be_tx_obj *txo;
2680 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681
Sathya Perla3c8def92011-06-12 20:01:58 +00002682 for_all_tx_queues(adapter, txo, i) {
2683 q = &txo->q;
2684 if (q->created)
2685 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2686 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002687
Sathya Perla3c8def92011-06-12 20:01:58 +00002688 q = &txo->cq;
2689 if (q->created)
2690 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2691 be_queue_free(adapter, q);
2692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002693}
2694
Sathya Perla77071332013-08-27 16:57:34 +05302695static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696{
Sathya Perla73f394e2015-03-26 03:05:09 -04002697 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002698 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002699 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302700 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701
Sathya Perla92bf14a2013-08-27 16:57:32 +05302702 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002703
Sathya Perla3c8def92011-06-12 20:01:58 +00002704 for_all_tx_queues(adapter, txo, i) {
2705 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002706 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2707 sizeof(struct be_eth_tx_compl));
2708 if (status)
2709 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710
John Stultz827da442013-10-07 15:51:58 -07002711 u64_stats_init(&txo->stats.sync);
2712 u64_stats_init(&txo->stats.sync_compl);
2713
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002714 /* If num_evt_qs is less than num_tx_qs, then more than
2715 * one txq share an eq
2716 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002717 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2718 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 if (status)
2720 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002721
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002722 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2723 sizeof(struct be_eth_wrb));
2724 if (status)
2725 return status;
2726
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002727 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 if (status)
2729 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002730
2731 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2732 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733 }
2734
Sathya Perlad3791422012-09-28 04:39:44 +00002735 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2736 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002737 return 0;
2738}
2739
2740static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741{
2742 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002743 struct be_rx_obj *rxo;
2744 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002745
Sathya Perla3abcded2010-10-03 22:12:27 -07002746 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002747 q = &rxo->cq;
2748 if (q->created)
2749 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2750 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002751 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752}
2753
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002754static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002755{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002756 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002757 struct be_rx_obj *rxo;
2758 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002759
Sathya Perla92bf14a2013-08-27 16:57:32 +05302760 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002761 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302762
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002763 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2764 if (adapter->num_rss_qs <= 1)
2765 adapter->num_rss_qs = 0;
2766
2767 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2768
2769 /* When the interface is not capable of RSS rings (and there is no
2770 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002771 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002772 if (adapter->num_rx_qs == 0)
2773 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302774
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002775 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002776 for_all_rx_queues(adapter, rxo, i) {
2777 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002778 cq = &rxo->cq;
2779 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302780 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002781 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002782 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002783
John Stultz827da442013-10-07 15:51:58 -07002784 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002785 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2786 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002787 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002788 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002789 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002790
Sathya Perlad3791422012-09-28 04:39:44 +00002791 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002792 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002793 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002794}
2795
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796static irqreturn_t be_intx(int irq, void *dev)
2797{
Sathya Perlae49cc342012-11-27 19:50:02 +00002798 struct be_eq_obj *eqo = dev;
2799 struct be_adapter *adapter = eqo->adapter;
2800 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002801
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002802 /* IRQ is not expected when NAPI is scheduled as the EQ
2803 * will not be armed.
2804 * But, this can happen on Lancer INTx where it takes
2805 * a while to de-assert INTx or in BE2 where occasionaly
2806 * an interrupt may be raised even when EQ is unarmed.
2807 * If NAPI is already scheduled, then counting & notifying
2808 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002809 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002810 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002811 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002812 __napi_schedule(&eqo->napi);
2813 if (num_evts)
2814 eqo->spurious_intr = 0;
2815 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002816 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002817
2818 /* Return IRQ_HANDLED only for the the first spurious intr
2819 * after a valid intr to stop the kernel from branding
2820 * this irq as a bad one!
2821 */
2822 if (num_evts || eqo->spurious_intr++ == 0)
2823 return IRQ_HANDLED;
2824 else
2825 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002826}
2827
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002828static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002829{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002830 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002831
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002832 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002833 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002834 return IRQ_HANDLED;
2835}
2836
Sathya Perla2e588f82011-03-11 02:49:26 +00002837static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002838{
Somnath Koture38b1702013-05-29 22:55:56 +00002839 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002840}
2841
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002842static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302843 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002844{
Sathya Perla3abcded2010-10-03 22:12:27 -07002845 struct be_adapter *adapter = rxo->adapter;
2846 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002847 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002848 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302849 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002850
2851 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002852 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853 if (!rxcp)
2854 break;
2855
Sathya Perla12004ae2011-08-02 19:57:46 +00002856 /* Is it a flush compl that has no data */
2857 if (unlikely(rxcp->num_rcvd == 0))
2858 goto loop_continue;
2859
2860 /* Discard compl with partial DMA Lancer B0 */
2861 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002862 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002863 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002864 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002865
Sathya Perla12004ae2011-08-02 19:57:46 +00002866 /* On BE drop pkts that arrive due to imperfect filtering in
2867 * promiscuous mode on some skews
2868 */
2869 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302870 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002871 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002872 goto loop_continue;
2873 }
2874
Sathya Perla6384a4d2013-10-25 10:40:16 +05302875 /* Don't do gro when we're busy_polling */
2876 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002877 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002878 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302879 be_rx_compl_process(rxo, napi, rxcp);
2880
Sathya Perla12004ae2011-08-02 19:57:46 +00002881loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302882 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002883 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002884 }
2885
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002886 if (work_done) {
2887 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002888
Sathya Perla6384a4d2013-10-25 10:40:16 +05302889 /* When an rx-obj gets into post_starved state, just
2890 * let be_worker do the posting.
2891 */
2892 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2893 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302894 be_post_rx_frags(rxo, GFP_ATOMIC,
2895 max_t(u32, MAX_RX_POST,
2896 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002897 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002898
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899 return work_done;
2900}
2901
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302902static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302903{
2904 switch (status) {
2905 case BE_TX_COMP_HDR_PARSE_ERR:
2906 tx_stats(txo)->tx_hdr_parse_err++;
2907 break;
2908 case BE_TX_COMP_NDMA_ERR:
2909 tx_stats(txo)->tx_dma_err++;
2910 break;
2911 case BE_TX_COMP_ACL_ERR:
2912 tx_stats(txo)->tx_spoof_check_err++;
2913 break;
2914 }
2915}
2916
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302917static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302918{
2919 switch (status) {
2920 case LANCER_TX_COMP_LSO_ERR:
2921 tx_stats(txo)->tx_tso_err++;
2922 break;
2923 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2924 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2925 tx_stats(txo)->tx_spoof_check_err++;
2926 break;
2927 case LANCER_TX_COMP_QINQ_ERR:
2928 tx_stats(txo)->tx_qinq_err++;
2929 break;
2930 case LANCER_TX_COMP_PARITY_ERR:
2931 tx_stats(txo)->tx_internal_parity_err++;
2932 break;
2933 case LANCER_TX_COMP_DMA_ERR:
2934 tx_stats(txo)->tx_dma_err++;
2935 break;
2936 }
2937}
2938
Sathya Perlac8f64612014-09-02 09:56:55 +05302939static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2940 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002941{
Sathya Perlac8f64612014-09-02 09:56:55 +05302942 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302943 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302945 while ((txcp = be_tx_compl_get(txo))) {
2946 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302947 work_done++;
2948
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302949 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302950 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302951 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302952 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302953 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302954 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002955 }
2956
2957 if (work_done) {
2958 be_cq_notify(adapter, txo->cq.id, true, work_done);
2959 atomic_sub(num_wrbs, &txo->q.used);
2960
2961 /* As Tx wrbs have been freed up, wake up netdev queue
2962 * if it was stopped due to lack of tx wrbs. */
2963 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302964 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002965 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002966 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002967
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002968 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2969 tx_stats(txo)->tx_compl += work_done;
2970 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2971 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002972}
Sathya Perla3c8def92011-06-12 20:01:58 +00002973
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002974#ifdef CONFIG_NET_RX_BUSY_POLL
2975static inline bool be_lock_napi(struct be_eq_obj *eqo)
2976{
2977 bool status = true;
2978
2979 spin_lock(&eqo->lock); /* BH is already disabled */
2980 if (eqo->state & BE_EQ_LOCKED) {
2981 WARN_ON(eqo->state & BE_EQ_NAPI);
2982 eqo->state |= BE_EQ_NAPI_YIELD;
2983 status = false;
2984 } else {
2985 eqo->state = BE_EQ_NAPI;
2986 }
2987 spin_unlock(&eqo->lock);
2988 return status;
2989}
2990
2991static inline void be_unlock_napi(struct be_eq_obj *eqo)
2992{
2993 spin_lock(&eqo->lock); /* BH is already disabled */
2994
2995 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2996 eqo->state = BE_EQ_IDLE;
2997
2998 spin_unlock(&eqo->lock);
2999}
3000
3001static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3002{
3003 bool status = true;
3004
3005 spin_lock_bh(&eqo->lock);
3006 if (eqo->state & BE_EQ_LOCKED) {
3007 eqo->state |= BE_EQ_POLL_YIELD;
3008 status = false;
3009 } else {
3010 eqo->state |= BE_EQ_POLL;
3011 }
3012 spin_unlock_bh(&eqo->lock);
3013 return status;
3014}
3015
3016static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3017{
3018 spin_lock_bh(&eqo->lock);
3019
3020 WARN_ON(eqo->state & (BE_EQ_NAPI));
3021 eqo->state = BE_EQ_IDLE;
3022
3023 spin_unlock_bh(&eqo->lock);
3024}
3025
3026static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3027{
3028 spin_lock_init(&eqo->lock);
3029 eqo->state = BE_EQ_IDLE;
3030}
3031
3032static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3033{
3034 local_bh_disable();
3035
3036 /* It's enough to just acquire napi lock on the eqo to stop
3037 * be_busy_poll() from processing any queueus.
3038 */
3039 while (!be_lock_napi(eqo))
3040 mdelay(1);
3041
3042 local_bh_enable();
3043}
3044
3045#else /* CONFIG_NET_RX_BUSY_POLL */
3046
3047static inline bool be_lock_napi(struct be_eq_obj *eqo)
3048{
3049 return true;
3050}
3051
3052static inline void be_unlock_napi(struct be_eq_obj *eqo)
3053{
3054}
3055
3056static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3057{
3058 return false;
3059}
3060
3061static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3062{
3063}
3064
3065static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3066{
3067}
3068
3069static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3070{
3071}
3072#endif /* CONFIG_NET_RX_BUSY_POLL */
3073
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303074int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003075{
3076 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3077 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003078 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303079 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303080 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003081 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003082
Sathya Perla0b545a62012-11-23 00:27:18 +00003083 num_evts = events_get(eqo);
3084
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303085 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3086 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087
Sathya Perla6384a4d2013-10-25 10:40:16 +05303088 if (be_lock_napi(eqo)) {
3089 /* This loop will iterate twice for EQ0 in which
3090 * completions of the last RXQ (default one) are also processed
3091 * For other EQs the loop iterates only once
3092 */
3093 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3094 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3095 max_work = max(work, max_work);
3096 }
3097 be_unlock_napi(eqo);
3098 } else {
3099 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003100 }
3101
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003102 if (is_mcc_eqo(eqo))
3103 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003104
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003105 if (max_work < budget) {
3106 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003107
3108 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3109 * delay via a delay multiplier encoding value
3110 */
3111 if (skyhawk_chip(adapter))
3112 mult_enc = be_get_eq_delay_mult_enc(eqo);
3113
3114 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3115 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003116 } else {
3117 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003118 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003119 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003120 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121}
3122
Sathya Perla6384a4d2013-10-25 10:40:16 +05303123#ifdef CONFIG_NET_RX_BUSY_POLL
3124static int be_busy_poll(struct napi_struct *napi)
3125{
3126 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3127 struct be_adapter *adapter = eqo->adapter;
3128 struct be_rx_obj *rxo;
3129 int i, work = 0;
3130
3131 if (!be_lock_busy_poll(eqo))
3132 return LL_FLUSH_BUSY;
3133
3134 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3135 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3136 if (work)
3137 break;
3138 }
3139
3140 be_unlock_busy_poll(eqo);
3141 return work;
3142}
3143#endif
3144
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003145void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003146{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003147 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3148 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003149 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303150 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003151
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303152 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003153 return;
3154
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003155 if (lancer_chip(adapter)) {
3156 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3157 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303158 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003159 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303160 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003161 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303162 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303163 /* Do not log error messages if its a FW reset */
3164 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3165 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3166 dev_info(dev, "Firmware update in progress\n");
3167 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303168 dev_err(dev, "Error detected in the card\n");
3169 dev_err(dev, "ERR: sliport status 0x%x\n",
3170 sliport_status);
3171 dev_err(dev, "ERR: sliport error1 0x%x\n",
3172 sliport_err1);
3173 dev_err(dev, "ERR: sliport error2 0x%x\n",
3174 sliport_err2);
3175 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003176 }
3177 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003178 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3179 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3180 ue_lo_mask = ioread32(adapter->pcicfg +
3181 PCICFG_UE_STATUS_LOW_MASK);
3182 ue_hi_mask = ioread32(adapter->pcicfg +
3183 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003184
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003185 ue_lo = (ue_lo & ~ue_lo_mask);
3186 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003187
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303188 /* On certain platforms BE hardware can indicate spurious UEs.
3189 * Allow HW to stop working completely in case of a real UE.
3190 * Hence not setting the hw_error for UE detection.
3191 */
3192
3193 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303194 dev_err(dev,
3195 "Unrecoverable Error detected in the adapter");
3196 dev_err(dev, "Please reboot server to recover");
3197 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303198 be_set_error(adapter, BE_ERROR_UE);
3199
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303200 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3201 if (ue_lo & 1)
3202 dev_err(dev, "UE: %s bit set\n",
3203 ue_status_low_desc[i]);
3204 }
3205 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3206 if (ue_hi & 1)
3207 dev_err(dev, "UE: %s bit set\n",
3208 ue_status_hi_desc[i]);
3209 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303210 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003211 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003212}
3213
Sathya Perla8d56ff12009-11-22 22:02:26 +00003214static void be_msix_disable(struct be_adapter *adapter)
3215{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003216 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003217 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003218 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303219 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003220 }
3221}
3222
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003223static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003224{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003225 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003226 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003227
Sathya Perla92bf14a2013-08-27 16:57:32 +05303228 /* If RoCE is supported, program the max number of NIC vectors that
3229 * may be configured via set-channels, along with vectors needed for
3230 * RoCe. Else, just program the number we'll use initially.
3231 */
3232 if (be_roce_supported(adapter))
3233 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3234 2 * num_online_cpus());
3235 else
3236 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003237
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003238 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003239 adapter->msix_entries[i].entry = i;
3240
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003241 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3242 MIN_MSIX_VECTORS, num_vec);
3243 if (num_vec < 0)
3244 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003245
Sathya Perla92bf14a2013-08-27 16:57:32 +05303246 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3247 adapter->num_msix_roce_vec = num_vec / 2;
3248 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3249 adapter->num_msix_roce_vec);
3250 }
3251
3252 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3253
3254 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3255 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003256 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003257
3258fail:
3259 dev_warn(dev, "MSIx enable failed\n");
3260
3261 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003262 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003263 return num_vec;
3264 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003265}
3266
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003267static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303268 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003269{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303270 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003271}
3272
3273static int be_msix_register(struct be_adapter *adapter)
3274{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003275 struct net_device *netdev = adapter->netdev;
3276 struct be_eq_obj *eqo;
3277 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003279 for_all_evt_queues(adapter, eqo, i) {
3280 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3281 vec = be_msix_vec_get(adapter, eqo);
3282 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003283 if (status)
3284 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003285
3286 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003287 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003289 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003290err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003291 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3292 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3293 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303294 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003295 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003296 return status;
3297}
3298
3299static int be_irq_register(struct be_adapter *adapter)
3300{
3301 struct net_device *netdev = adapter->netdev;
3302 int status;
3303
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003304 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305 status = be_msix_register(adapter);
3306 if (status == 0)
3307 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003308 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003309 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003310 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311 }
3312
Sathya Perlae49cc342012-11-27 19:50:02 +00003313 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314 netdev->irq = adapter->pdev->irq;
3315 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003316 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317 if (status) {
3318 dev_err(&adapter->pdev->dev,
3319 "INTx request IRQ failed - err %d\n", status);
3320 return status;
3321 }
3322done:
3323 adapter->isr_registered = true;
3324 return 0;
3325}
3326
3327static void be_irq_unregister(struct be_adapter *adapter)
3328{
3329 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003330 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003331 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003332
3333 if (!adapter->isr_registered)
3334 return;
3335
3336 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003337 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003338 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003339 goto done;
3340 }
3341
3342 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003343 for_all_evt_queues(adapter, eqo, i) {
3344 vec = be_msix_vec_get(adapter, eqo);
3345 irq_set_affinity_hint(vec, NULL);
3346 free_irq(vec, eqo);
3347 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003348
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349done:
3350 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003351}
3352
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003353static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003354{
3355 struct be_queue_info *q;
3356 struct be_rx_obj *rxo;
3357 int i;
3358
3359 for_all_rx_queues(adapter, rxo, i) {
3360 q = &rxo->q;
3361 if (q->created) {
3362 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003363 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003364 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003365 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003366 }
3367}
3368
Sathya Perla889cd4b2010-05-30 23:33:45 +00003369static int be_close(struct net_device *netdev)
3370{
3371 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003372 struct be_eq_obj *eqo;
3373 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003374
Kalesh APe1ad8e32014-04-14 16:12:41 +05303375 /* This protection is needed as be_close() may be called even when the
3376 * adapter is in cleared state (after eeh perm failure)
3377 */
3378 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3379 return 0;
3380
Parav Pandit045508a2012-03-26 14:27:13 +00003381 be_roce_dev_close(adapter);
3382
Ivan Veceradff345c52013-11-27 08:59:32 +01003383 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3384 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003385 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303386 be_disable_busy_poll(eqo);
3387 }
David S. Miller71237b62013-11-28 18:53:36 -05003388 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003389 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003390
3391 be_async_mcc_disable(adapter);
3392
3393 /* Wait for all pending tx completions to arrive so that
3394 * all tx skbs are freed.
3395 */
Sathya Perlafba87552013-05-08 02:05:50 +00003396 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303397 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003398
3399 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003400 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003401
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003402 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003403 if (msix_enabled(adapter))
3404 synchronize_irq(be_msix_vec_get(adapter, eqo));
3405 else
3406 synchronize_irq(netdev->irq);
3407 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003408 }
3409
Sathya Perla889cd4b2010-05-30 23:33:45 +00003410 be_irq_unregister(adapter);
3411
Sathya Perla482c9e72011-06-29 23:33:17 +00003412 return 0;
3413}
3414
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003415static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003416{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003417 struct rss_info *rss = &adapter->rss_info;
3418 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003419 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003420 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003421
3422 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003423 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3424 sizeof(struct be_eth_rx_d));
3425 if (rc)
3426 return rc;
3427 }
3428
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003429 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3430 rxo = default_rxo(adapter);
3431 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3432 rx_frag_size, adapter->if_handle,
3433 false, &rxo->rss_id);
3434 if (rc)
3435 return rc;
3436 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003437
3438 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003439 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003440 rx_frag_size, adapter->if_handle,
3441 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003442 if (rc)
3443 return rc;
3444 }
3445
3446 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003447 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003448 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303449 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003450 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303451 rss->rsstable[j + i] = rxo->rss_id;
3452 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003453 }
3454 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303455 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3456 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003457
3458 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303459 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3460 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303461 } else {
3462 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303463 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303464 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003465
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003466 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303467 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003468 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303469 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303470 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303471 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003472 }
3473
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003474 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303475
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003476 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3477 * which is a queue empty condition
3478 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003479 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003480 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3481
Sathya Perla889cd4b2010-05-30 23:33:45 +00003482 return 0;
3483}
3484
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003485static int be_open(struct net_device *netdev)
3486{
3487 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003488 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003489 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003490 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003491 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003492 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003493
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003494 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003495 if (status)
3496 goto err;
3497
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003498 status = be_irq_register(adapter);
3499 if (status)
3500 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003501
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003502 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003503 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003504
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003505 for_all_tx_queues(adapter, txo, i)
3506 be_cq_notify(adapter, txo->cq.id, true, 0);
3507
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003508 be_async_mcc_enable(adapter);
3509
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003510 for_all_evt_queues(adapter, eqo, i) {
3511 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303512 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003513 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003514 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003515 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003516
Sathya Perla323ff712012-09-28 04:39:43 +00003517 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003518 if (!status)
3519 be_link_status_update(adapter, link_status);
3520
Sathya Perlafba87552013-05-08 02:05:50 +00003521 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003522 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303523
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303524#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303525 if (skyhawk_chip(adapter))
3526 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303527#endif
3528
Sathya Perla889cd4b2010-05-30 23:33:45 +00003529 return 0;
3530err:
3531 be_close(adapter->netdev);
3532 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003533}
3534
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003535static int be_setup_wol(struct be_adapter *adapter, bool enable)
3536{
Kalesh Purayil145155e2015-07-10 05:32:43 -04003537 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003538 struct be_dma_mem cmd;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003539 u8 mac[ETH_ALEN];
Kalesh Purayil145155e2015-07-10 05:32:43 -04003540 int status;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003541
Joe Perchesc7bf7162015-03-02 19:54:47 -08003542 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003543
3544 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Kalesh Purayil145155e2015-07-10 05:32:43 -04003545 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303546 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303547 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003548
3549 if (enable) {
3550 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303551 PCICFG_PM_CONTROL_OFFSET,
3552 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003553 if (status) {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003554 dev_err(dev, "Could not enable Wake-on-lan\n");
3555 goto err;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003556 }
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003557 } else {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003558 ether_addr_copy(mac, adapter->netdev->dev_addr);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003559 }
3560
Kalesh Purayil145155e2015-07-10 05:32:43 -04003561 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3562 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3563 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3564err:
3565 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003566 return status;
3567}
3568
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003569static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3570{
3571 u32 addr;
3572
3573 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3574
3575 mac[5] = (u8)(addr & 0xFF);
3576 mac[4] = (u8)((addr >> 8) & 0xFF);
3577 mac[3] = (u8)((addr >> 16) & 0xFF);
3578 /* Use the OUI from the current MAC address */
3579 memcpy(mac, adapter->netdev->dev_addr, 3);
3580}
3581
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003582/*
3583 * Generate a seed MAC address from the PF MAC Address using jhash.
3584 * MAC Address for VFs are assigned incrementally starting from the seed.
3585 * These addresses are programmed in the ASIC by the PF and the VF driver
3586 * queries for the MAC address during its probe.
3587 */
Sathya Perla4c876612013-02-03 20:30:11 +00003588static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003589{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003590 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003591 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003592 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003593 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003594
3595 be_vf_eth_addr_generate(adapter, mac);
3596
Sathya Perla11ac75e2011-12-13 00:58:50 +00003597 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303598 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003599 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003600 vf_cfg->if_handle,
3601 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303602 else
3603 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3604 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003605
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003606 if (status)
3607 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303608 "Mac address assignment failed for VF %d\n",
3609 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003610 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003611 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003612
3613 mac[5] += 1;
3614 }
3615 return status;
3616}
3617
Sathya Perla4c876612013-02-03 20:30:11 +00003618static int be_vfs_mac_query(struct be_adapter *adapter)
3619{
3620 int status, vf;
3621 u8 mac[ETH_ALEN];
3622 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003623
3624 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303625 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3626 mac, vf_cfg->if_handle,
3627 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003628 if (status)
3629 return status;
3630 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3631 }
3632 return 0;
3633}
3634
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003635static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003636{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003637 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003638 u32 vf;
3639
Sathya Perla257a3fe2013-06-14 15:54:51 +05303640 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003641 dev_warn(&adapter->pdev->dev,
3642 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003643 goto done;
3644 }
3645
Sathya Perlab4c1df92013-05-08 02:05:47 +00003646 pci_disable_sriov(adapter->pdev);
3647
Sathya Perla11ac75e2011-12-13 00:58:50 +00003648 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303649 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003650 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3651 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303652 else
3653 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3654 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003655
Sathya Perla11ac75e2011-12-13 00:58:50 +00003656 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3657 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003658done:
3659 kfree(adapter->vf_cfg);
3660 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303661 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003662}
3663
Sathya Perla77071332013-08-27 16:57:34 +05303664static void be_clear_queues(struct be_adapter *adapter)
3665{
3666 be_mcc_queues_destroy(adapter);
3667 be_rx_cqs_destroy(adapter);
3668 be_tx_queues_destroy(adapter);
3669 be_evt_queues_destroy(adapter);
3670}
3671
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303672static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003673{
Sathya Perla191eb752012-02-23 18:50:13 +00003674 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3675 cancel_delayed_work_sync(&adapter->work);
3676 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3677 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303678}
3679
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003680static void be_cancel_err_detection(struct be_adapter *adapter)
3681{
3682 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3683 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3684 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3685 }
3686}
3687
Somnath Koturb05004a2013-12-05 12:08:16 +05303688static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303689{
Somnath Koturb05004a2013-12-05 12:08:16 +05303690 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003691 be_cmd_pmac_del(adapter, adapter->if_handle,
3692 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303693 kfree(adapter->pmac_id);
3694 adapter->pmac_id = NULL;
3695 }
3696}
3697
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303698#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303699static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3700{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003701 struct net_device *netdev = adapter->netdev;
3702
Sathya Perlac9c47142014-03-27 10:46:19 +05303703 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3704 be_cmd_manage_iface(adapter, adapter->if_handle,
3705 OP_CONVERT_TUNNEL_TO_NORMAL);
3706
3707 if (adapter->vxlan_port)
3708 be_cmd_set_vxlan_port(adapter, 0);
3709
3710 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3711 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003712
3713 netdev->hw_enc_features = 0;
3714 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303715 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303716}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303717#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303718
Vasundhara Volamf2858732015-03-04 00:44:33 -05003719static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3720{
3721 struct be_resources res = adapter->pool_res;
3722 u16 num_vf_qs = 1;
3723
3724 /* Distribute the queue resources equally among the PF and it's VFs
3725 * Do not distribute queue resources in multi-channel configuration.
3726 */
3727 if (num_vfs && !be_is_mc(adapter)) {
3728 /* If number of VFs requested is 8 less than max supported,
3729 * assign 8 queue pairs to the PF and divide the remaining
3730 * resources evenly among the VFs
3731 */
3732 if (num_vfs < (be_max_vfs(adapter) - 8))
3733 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3734 else
3735 num_vf_qs = res.max_rss_qs / num_vfs;
3736
3737 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3738 * interfaces per port. Provide RSS on VFs, only if number
3739 * of VFs requested is less than MAX_RSS_IFACES limit.
3740 */
3741 if (num_vfs >= MAX_RSS_IFACES)
3742 num_vf_qs = 1;
3743 }
3744 return num_vf_qs;
3745}
3746
Somnath Koturb05004a2013-12-05 12:08:16 +05303747static int be_clear(struct be_adapter *adapter)
3748{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003749 struct pci_dev *pdev = adapter->pdev;
3750 u16 num_vf_qs;
3751
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303752 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003753
Sathya Perla11ac75e2011-12-13 00:58:50 +00003754 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003755 be_vf_clear(adapter);
3756
Vasundhara Volambec84e62014-06-30 13:01:32 +05303757 /* Re-configure FW to distribute resources evenly across max-supported
3758 * number of VFs, only when VFs are not already enabled.
3759 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003760 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3761 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003762 num_vf_qs = be_calculate_vf_qs(adapter,
3763 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303764 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003765 pci_sriov_get_totalvfs(pdev),
3766 num_vf_qs);
3767 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303768
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303769#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303770 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303771#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303772 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303773 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003774
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003775 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003776
Sathya Perla77071332013-08-27 16:57:34 +05303777 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003778
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003779 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303780 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003781 return 0;
3782}
3783
Kalesh AP0700d812015-01-20 03:51:43 -05003784static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3785 u32 cap_flags, u32 vf)
3786{
3787 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003788
3789 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3790 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003791 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003792
3793 en_flags &= cap_flags;
3794
Vasundhara Volam435452a2015-03-20 06:28:23 -04003795 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003796}
3797
Sathya Perla4c876612013-02-03 20:30:11 +00003798static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003799{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303800 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003801 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003802 u32 cap_flags, vf;
3803 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003804
Kalesh AP0700d812015-01-20 03:51:43 -05003805 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003806 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003807 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003808
Sathya Perla4c876612013-02-03 20:30:11 +00003809 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303810 if (!BE3_chip(adapter)) {
3811 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003812 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303813 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003814 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303815 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003816 /* Prevent VFs from enabling VLAN promiscuous
3817 * mode
3818 */
3819 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3820 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303821 }
Sathya Perla4c876612013-02-03 20:30:11 +00003822
Kalesh AP0700d812015-01-20 03:51:43 -05003823 status = be_if_create(adapter, &vf_cfg->if_handle,
3824 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003825 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003826 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003827 }
Kalesh AP0700d812015-01-20 03:51:43 -05003828
3829 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003830}
3831
Sathya Perla39f1d942012-05-08 19:41:24 +00003832static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003833{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003834 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003835 int vf;
3836
Sathya Perla39f1d942012-05-08 19:41:24 +00003837 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3838 GFP_KERNEL);
3839 if (!adapter->vf_cfg)
3840 return -ENOMEM;
3841
Sathya Perla11ac75e2011-12-13 00:58:50 +00003842 for_all_vfs(adapter, vf_cfg, vf) {
3843 vf_cfg->if_handle = -1;
3844 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003845 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003846 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003847}
3848
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003849static int be_vf_setup(struct be_adapter *adapter)
3850{
Sathya Perla4c876612013-02-03 20:30:11 +00003851 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303852 struct be_vf_cfg *vf_cfg;
3853 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003854 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003855
Sathya Perla257a3fe2013-06-14 15:54:51 +05303856 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003857
3858 status = be_vf_setup_init(adapter);
3859 if (status)
3860 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003861
Sathya Perla4c876612013-02-03 20:30:11 +00003862 if (old_vfs) {
3863 for_all_vfs(adapter, vf_cfg, vf) {
3864 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3865 if (status)
3866 goto err;
3867 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003868
Sathya Perla4c876612013-02-03 20:30:11 +00003869 status = be_vfs_mac_query(adapter);
3870 if (status)
3871 goto err;
3872 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303873 status = be_vfs_if_create(adapter);
3874 if (status)
3875 goto err;
3876
Sathya Perla39f1d942012-05-08 19:41:24 +00003877 status = be_vf_eth_addr_config(adapter);
3878 if (status)
3879 goto err;
3880 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003881
Sathya Perla11ac75e2011-12-13 00:58:50 +00003882 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303883 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003884 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3885 vf + 1);
3886 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303887 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003888 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303889 BE_PRIV_FILTMGMT,
3890 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003891 if (!status) {
3892 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303893 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3894 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003895 }
Sathya Perla04a06022013-07-23 15:25:00 +05303896 }
3897
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303898 /* Allow full available bandwidth */
3899 if (!old_vfs)
3900 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003901
Kalesh APe7bcbd72015-05-06 05:30:32 -04003902 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3903 vf_cfg->if_handle, NULL,
3904 &spoofchk);
3905 if (!status)
3906 vf_cfg->spoofchk = spoofchk;
3907
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303908 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303909 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303910 be_cmd_set_logical_link_config(adapter,
3911 IFLA_VF_LINK_STATE_AUTO,
3912 vf+1);
3913 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003914 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003915
3916 if (!old_vfs) {
3917 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3918 if (status) {
3919 dev_err(dev, "SRIOV enable failed\n");
3920 adapter->num_vfs = 0;
3921 goto err;
3922 }
3923 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303924
3925 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003926 return 0;
3927err:
Sathya Perla4c876612013-02-03 20:30:11 +00003928 dev_err(dev, "VF setup failed\n");
3929 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003930 return status;
3931}
3932
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303933/* Converting function_mode bits on BE3 to SH mc_type enums */
3934
3935static u8 be_convert_mc_type(u32 function_mode)
3936{
Suresh Reddy66064db2014-06-23 16:41:29 +05303937 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303938 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303939 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303940 return FLEX10;
3941 else if (function_mode & VNIC_MODE)
3942 return vNIC2;
3943 else if (function_mode & UMC_ENABLED)
3944 return UMC;
3945 else
3946 return MC_NONE;
3947}
3948
Sathya Perla92bf14a2013-08-27 16:57:32 +05303949/* On BE2/BE3 FW does not suggest the supported limits */
3950static void BEx_get_resources(struct be_adapter *adapter,
3951 struct be_resources *res)
3952{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303953 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303954
3955 if (be_physfn(adapter))
3956 res->max_uc_mac = BE_UC_PMAC_COUNT;
3957 else
3958 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3959
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303960 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3961
3962 if (be_is_mc(adapter)) {
3963 /* Assuming that there are 4 channels per port,
3964 * when multi-channel is enabled
3965 */
3966 if (be_is_qnq_mode(adapter))
3967 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3968 else
3969 /* In a non-qnq multichannel mode, the pvid
3970 * takes up one vlan entry
3971 */
3972 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3973 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303974 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303975 }
3976
Sathya Perla92bf14a2013-08-27 16:57:32 +05303977 res->max_mcast_mac = BE_MAX_MC;
3978
Vasundhara Volama5243da2014-03-11 18:53:07 +05303979 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3980 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3981 * *only* if it is RSS-capable.
3982 */
3983 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04003984 be_virtfn(adapter) ||
3985 (be_is_mc(adapter) &&
3986 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303987 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303988 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3989 struct be_resources super_nic_res = {0};
3990
3991 /* On a SuperNIC profile, the driver needs to use the
3992 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3993 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003994 be_cmd_get_profile_config(adapter, &super_nic_res,
3995 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303996 /* Some old versions of BE3 FW don't report max_tx_qs value */
3997 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3998 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303999 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304000 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304001
4002 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4003 !use_sriov && be_physfn(adapter))
4004 res->max_rss_qs = (adapter->be3_native) ?
4005 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4006 res->max_rx_qs = res->max_rss_qs + 1;
4007
Suresh Reddye3dc8672014-01-06 13:02:25 +05304008 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304009 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304010 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4011 else
4012 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304013
4014 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004015 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304016 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4017 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4018}
4019
Sathya Perla30128032011-11-10 19:17:57 +00004020static void be_setup_init(struct be_adapter *adapter)
4021{
4022 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004023 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004024 adapter->if_handle = -1;
4025 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004026 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004027 if (be_physfn(adapter))
4028 adapter->cmd_privileges = MAX_PRIVILEGES;
4029 else
4030 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004031}
4032
Vasundhara Volambec84e62014-06-30 13:01:32 +05304033static int be_get_sriov_config(struct be_adapter *adapter)
4034{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304035 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304036 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304037
Vasundhara Volamf2858732015-03-04 00:44:33 -05004038 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304039
Vasundhara Volamace40af2015-03-04 00:44:34 -05004040 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304041 if (BE3_chip(adapter) && !res.max_vfs) {
4042 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4043 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4044 }
4045
Sathya Perlad3d18312014-08-01 17:47:30 +05304046 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304047
Vasundhara Volamace40af2015-03-04 00:44:34 -05004048 /* If during previous unload of the driver, the VFs were not disabled,
4049 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4050 * Instead use the TotalVFs value stored in the pci-dev struct.
4051 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304052 old_vfs = pci_num_vf(adapter->pdev);
4053 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004054 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4055 old_vfs);
4056
4057 adapter->pool_res.max_vfs =
4058 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304059 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304060 }
4061
4062 return 0;
4063}
4064
Vasundhara Volamace40af2015-03-04 00:44:34 -05004065static void be_alloc_sriov_res(struct be_adapter *adapter)
4066{
4067 int old_vfs = pci_num_vf(adapter->pdev);
4068 u16 num_vf_qs;
4069 int status;
4070
4071 be_get_sriov_config(adapter);
4072
4073 if (!old_vfs)
4074 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4075
4076 /* When the HW is in SRIOV capable configuration, the PF-pool
4077 * resources are given to PF during driver load, if there are no
4078 * old VFs. This facility is not available in BE3 FW.
4079 * Also, this is done by FW in Lancer chip.
4080 */
4081 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4082 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4083 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4084 num_vf_qs);
4085 if (status)
4086 dev_err(&adapter->pdev->dev,
4087 "Failed to optimize SRIOV resources\n");
4088 }
4089}
4090
Sathya Perla92bf14a2013-08-27 16:57:32 +05304091static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004092{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304093 struct device *dev = &adapter->pdev->dev;
4094 struct be_resources res = {0};
4095 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004096
Sathya Perla92bf14a2013-08-27 16:57:32 +05304097 if (BEx_chip(adapter)) {
4098 BEx_get_resources(adapter, &res);
4099 adapter->res = res;
4100 }
4101
Sathya Perla92bf14a2013-08-27 16:57:32 +05304102 /* For Lancer, SH etc read per-function resource limits from FW.
4103 * GET_FUNC_CONFIG returns per function guaranteed limits.
4104 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4105 */
Sathya Perla4c876612013-02-03 20:30:11 +00004106 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304107 status = be_cmd_get_func_config(adapter, &res);
4108 if (status)
4109 return status;
4110
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004111 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4112 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4113 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4114 res.max_rss_qs -= 1;
4115
Sathya Perla92bf14a2013-08-27 16:57:32 +05304116 /* If RoCE may be enabled stash away half the EQs for RoCE */
4117 if (be_roce_supported(adapter))
4118 res.max_evt_qs /= 2;
4119 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004120 }
4121
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004122 /* If FW supports RSS default queue, then skip creating non-RSS
4123 * queue for non-IP traffic.
4124 */
4125 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4126 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4127
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304128 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4129 be_max_txqs(adapter), be_max_rxqs(adapter),
4130 be_max_rss(adapter), be_max_eqs(adapter),
4131 be_max_vfs(adapter));
4132 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4133 be_max_uc(adapter), be_max_mc(adapter),
4134 be_max_vlans(adapter));
4135
Vasundhara Volamace40af2015-03-04 00:44:34 -05004136 /* Sanitize cfg_num_qs based on HW and platform limits */
4137 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4138 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304139 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004140}
4141
Sathya Perla39f1d942012-05-08 19:41:24 +00004142static int be_get_config(struct be_adapter *adapter)
4143{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004144 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304145 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004146
4147 status = be_cmd_get_cntl_attributes(adapter);
4148 if (status)
4149 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004150
Kalesh APe97e3cd2014-07-17 16:20:26 +05304151 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004152 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304153 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004154
Sathya Perla6b085ba2015-02-23 04:20:09 -05004155 if (BEx_chip(adapter)) {
4156 level = be_cmd_get_fw_log_level(adapter);
4157 adapter->msg_enable =
4158 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4159 }
4160
4161 be_cmd_get_acpi_wol_cap(adapter);
4162
Vasundhara Volam21252372015-02-06 08:18:42 -05004163 be_cmd_query_port_name(adapter);
4164
4165 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304166 status = be_cmd_get_active_profile(adapter, &profile_id);
4167 if (!status)
4168 dev_info(&adapter->pdev->dev,
4169 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304170 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304171
Sathya Perla92bf14a2013-08-27 16:57:32 +05304172 status = be_get_resources(adapter);
4173 if (status)
4174 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004175
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304176 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4177 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304178 if (!adapter->pmac_id)
4179 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004180
Sathya Perla92bf14a2013-08-27 16:57:32 +05304181 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004182}
4183
Sathya Perla95046b92013-07-23 15:25:02 +05304184static int be_mac_setup(struct be_adapter *adapter)
4185{
4186 u8 mac[ETH_ALEN];
4187 int status;
4188
4189 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4190 status = be_cmd_get_perm_mac(adapter, mac);
4191 if (status)
4192 return status;
4193
4194 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4195 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4196 } else {
4197 /* Maybe the HW was reset; dev_addr must be re-programmed */
4198 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4199 }
4200
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06004201 /* For BE3-R VFs, the PF programs the initial MAC address */
4202 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4203 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4204 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05304205 return 0;
4206}
4207
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304208static void be_schedule_worker(struct be_adapter *adapter)
4209{
4210 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4211 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4212}
4213
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004214static void be_schedule_err_detection(struct be_adapter *adapter)
4215{
4216 schedule_delayed_work(&adapter->be_err_detection_work,
4217 msecs_to_jiffies(1000));
4218 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4219}
4220
Sathya Perla77071332013-08-27 16:57:34 +05304221static int be_setup_queues(struct be_adapter *adapter)
4222{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304223 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304224 int status;
4225
4226 status = be_evt_queues_create(adapter);
4227 if (status)
4228 goto err;
4229
4230 status = be_tx_qs_create(adapter);
4231 if (status)
4232 goto err;
4233
4234 status = be_rx_cqs_create(adapter);
4235 if (status)
4236 goto err;
4237
4238 status = be_mcc_queues_create(adapter);
4239 if (status)
4240 goto err;
4241
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304242 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4243 if (status)
4244 goto err;
4245
4246 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4247 if (status)
4248 goto err;
4249
Sathya Perla77071332013-08-27 16:57:34 +05304250 return 0;
4251err:
4252 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4253 return status;
4254}
4255
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304256int be_update_queues(struct be_adapter *adapter)
4257{
4258 struct net_device *netdev = adapter->netdev;
4259 int status;
4260
4261 if (netif_running(netdev))
4262 be_close(netdev);
4263
4264 be_cancel_worker(adapter);
4265
4266 /* If any vectors have been shared with RoCE we cannot re-program
4267 * the MSIx table.
4268 */
4269 if (!adapter->num_msix_roce_vec)
4270 be_msix_disable(adapter);
4271
4272 be_clear_queues(adapter);
4273
4274 if (!msix_enabled(adapter)) {
4275 status = be_msix_enable(adapter);
4276 if (status)
4277 return status;
4278 }
4279
4280 status = be_setup_queues(adapter);
4281 if (status)
4282 return status;
4283
4284 be_schedule_worker(adapter);
4285
4286 if (netif_running(netdev))
4287 status = be_open(netdev);
4288
4289 return status;
4290}
4291
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004292static inline int fw_major_num(const char *fw_ver)
4293{
4294 int fw_major = 0, i;
4295
4296 i = sscanf(fw_ver, "%d.", &fw_major);
4297 if (i != 1)
4298 return 0;
4299
4300 return fw_major;
4301}
4302
Sathya Perlaf962f842015-02-23 04:20:16 -05004303/* If any VFs are already enabled don't FLR the PF */
4304static bool be_reset_required(struct be_adapter *adapter)
4305{
4306 return pci_num_vf(adapter->pdev) ? false : true;
4307}
4308
4309/* Wait for the FW to be ready and perform the required initialization */
4310static int be_func_init(struct be_adapter *adapter)
4311{
4312 int status;
4313
4314 status = be_fw_wait_ready(adapter);
4315 if (status)
4316 return status;
4317
4318 if (be_reset_required(adapter)) {
4319 status = be_cmd_reset_function(adapter);
4320 if (status)
4321 return status;
4322
4323 /* Wait for interrupts to quiesce after an FLR */
4324 msleep(100);
4325
4326 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304327 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004328 }
4329
4330 /* Tell FW we're ready to fire cmds */
4331 status = be_cmd_fw_init(adapter);
4332 if (status)
4333 return status;
4334
4335 /* Allow interrupts for other ULPs running on NIC function */
4336 be_intr_set(adapter, true);
4337
4338 return 0;
4339}
4340
Sathya Perla5fb379e2009-06-18 00:02:59 +00004341static int be_setup(struct be_adapter *adapter)
4342{
Sathya Perla39f1d942012-05-08 19:41:24 +00004343 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004344 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004345
Sathya Perlaf962f842015-02-23 04:20:16 -05004346 status = be_func_init(adapter);
4347 if (status)
4348 return status;
4349
Sathya Perla30128032011-11-10 19:17:57 +00004350 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004351
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004352 if (!lancer_chip(adapter))
4353 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004354
Vasundhara Volamace40af2015-03-04 00:44:34 -05004355 if (!BE2_chip(adapter) && be_physfn(adapter))
4356 be_alloc_sriov_res(adapter);
4357
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004358 status = be_get_config(adapter);
4359 if (status)
4360 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004361
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004362 status = be_msix_enable(adapter);
4363 if (status)
4364 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004365
Kalesh AP0700d812015-01-20 03:51:43 -05004366 status = be_if_create(adapter, &adapter->if_handle,
4367 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004368 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004369 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004370
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304371 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4372 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304373 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304374 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004375 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004376 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004377
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004378 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004379
Sathya Perla95046b92013-07-23 15:25:02 +05304380 status = be_mac_setup(adapter);
4381 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004382 goto err;
4383
Kalesh APe97e3cd2014-07-17 16:20:26 +05304384 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304385 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004386
Somnath Koture9e2a902013-10-24 14:37:53 +05304387 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304388 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304389 adapter->fw_ver);
4390 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4391 }
4392
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004393 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004394 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004395
4396 be_set_rx_mode(adapter->netdev);
4397
Kalesh AP00d594c2015-01-20 03:51:44 -05004398 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4399 adapter->rx_fc);
4400 if (status)
4401 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4402 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004403
Kalesh AP00d594c2015-01-20 03:51:44 -05004404 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4405 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004406
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304407 if (be_physfn(adapter))
4408 be_cmd_set_logical_link_config(adapter,
4409 IFLA_VF_LINK_STATE_AUTO, 0);
4410
Vasundhara Volambec84e62014-06-30 13:01:32 +05304411 if (adapter->num_vfs)
4412 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004413
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004414 status = be_cmd_get_phy_info(adapter);
4415 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004416 adapter->phy.fc_autoneg = 1;
4417
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304418 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304419 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004420 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004421err:
4422 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004423 return status;
4424}
4425
Ivan Vecera66268732011-12-08 01:31:21 +00004426#ifdef CONFIG_NET_POLL_CONTROLLER
4427static void be_netpoll(struct net_device *netdev)
4428{
4429 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004430 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004431 int i;
4432
Sathya Perlae49cc342012-11-27 19:50:02 +00004433 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004434 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004435 napi_schedule(&eqo->napi);
4436 }
Ivan Vecera66268732011-12-08 01:31:21 +00004437}
4438#endif
4439
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304440static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004441
Sathya Perla306f1342011-08-02 19:57:45 +00004442static bool phy_flashing_required(struct be_adapter *adapter)
4443{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004444 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004445 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004446}
4447
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004448static bool is_comp_in_ufi(struct be_adapter *adapter,
4449 struct flash_section_info *fsec, int type)
4450{
4451 int i = 0, img_type = 0;
4452 struct flash_section_info_g2 *fsec_g2 = NULL;
4453
Sathya Perlaca34fe32012-11-06 17:48:56 +00004454 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004455 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4456
4457 for (i = 0; i < MAX_FLASH_COMP; i++) {
4458 if (fsec_g2)
4459 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4460 else
4461 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4462
4463 if (img_type == type)
4464 return true;
4465 }
4466 return false;
4467
4468}
4469
Jingoo Han4188e7d2013-08-05 18:02:02 +09004470static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304471 int header_size,
4472 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004473{
4474 struct flash_section_info *fsec = NULL;
4475 const u8 *p = fw->data;
4476
4477 p += header_size;
4478 while (p < (fw->data + fw->size)) {
4479 fsec = (struct flash_section_info *)p;
4480 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4481 return fsec;
4482 p += 32;
4483 }
4484 return NULL;
4485}
4486
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304487static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4488 u32 img_offset, u32 img_size, int hdr_size,
4489 u16 img_optype, bool *crc_match)
4490{
4491 u32 crc_offset;
4492 int status;
4493 u8 crc[4];
4494
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004495 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4496 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304497 if (status)
4498 return status;
4499
4500 crc_offset = hdr_size + img_offset + img_size - 4;
4501
4502 /* Skip flashing, if crc of flashed region matches */
4503 if (!memcmp(crc, p + crc_offset, 4))
4504 *crc_match = true;
4505 else
4506 *crc_match = false;
4507
4508 return status;
4509}
4510
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004511static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004512 struct be_dma_mem *flash_cmd, int optype, int img_size,
4513 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004514{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004515 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004516 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304517 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004518
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004519 while (total_bytes) {
4520 num_bytes = min_t(u32, 32*1024, total_bytes);
4521
4522 total_bytes -= num_bytes;
4523
4524 if (!total_bytes) {
4525 if (optype == OPTYPE_PHY_FW)
4526 flash_op = FLASHROM_OPER_PHY_FLASH;
4527 else
4528 flash_op = FLASHROM_OPER_FLASH;
4529 } else {
4530 if (optype == OPTYPE_PHY_FW)
4531 flash_op = FLASHROM_OPER_PHY_SAVE;
4532 else
4533 flash_op = FLASHROM_OPER_SAVE;
4534 }
4535
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004536 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004537 img += num_bytes;
4538 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004539 flash_op, img_offset +
4540 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304541 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304542 optype == OPTYPE_PHY_FW)
4543 break;
4544 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004545 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004546
4547 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004548 }
4549 return 0;
4550}
4551
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004552/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004553static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304554 const struct firmware *fw,
4555 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004556{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004557 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304558 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004559 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304560 int status, i, filehdr_size, num_comp;
4561 const struct flash_comp *pflashcomp;
4562 bool crc_match;
4563 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004564
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004565 struct flash_comp gen3_flash_types[] = {
4566 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4567 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4568 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4569 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4570 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4571 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4572 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4573 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4574 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4575 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4576 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4577 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4578 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4579 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4580 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4581 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4582 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4583 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4584 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4585 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004586 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004587
4588 struct flash_comp gen2_flash_types[] = {
4589 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4590 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4591 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4592 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4593 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4594 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4595 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4596 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4597 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4598 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4599 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4600 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4601 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4602 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4603 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4604 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004605 };
4606
Sathya Perlaca34fe32012-11-06 17:48:56 +00004607 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004608 pflashcomp = gen3_flash_types;
4609 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004610 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004611 } else {
4612 pflashcomp = gen2_flash_types;
4613 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004614 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004615 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004616 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004617
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004618 /* Get flash section info*/
4619 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4620 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304621 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004622 return -1;
4623 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004624 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004625 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004626 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004627
4628 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4629 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4630 continue;
4631
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004632 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4633 !phy_flashing_required(adapter))
4634 continue;
4635
4636 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304637 status = be_check_flash_crc(adapter, fw->data,
4638 pflashcomp[i].offset,
4639 pflashcomp[i].size,
4640 filehdr_size +
4641 img_hdrs_size,
4642 OPTYPE_REDBOOT, &crc_match);
4643 if (status) {
4644 dev_err(dev,
4645 "Could not get CRC for 0x%x region\n",
4646 pflashcomp[i].optype);
4647 continue;
4648 }
4649
4650 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004651 continue;
4652 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004653
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304654 p = fw->data + filehdr_size + pflashcomp[i].offset +
4655 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004656 if (p + pflashcomp[i].size > fw->data + fw->size)
4657 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004658
4659 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004660 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004661 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304662 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004663 pflashcomp[i].img_type);
4664 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004665 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004666 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004667 return 0;
4668}
4669
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304670static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4671{
4672 u32 img_type = le32_to_cpu(fsec_entry.type);
4673 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4674
4675 if (img_optype != 0xFFFF)
4676 return img_optype;
4677
4678 switch (img_type) {
4679 case IMAGE_FIRMWARE_iSCSI:
4680 img_optype = OPTYPE_ISCSI_ACTIVE;
4681 break;
4682 case IMAGE_BOOT_CODE:
4683 img_optype = OPTYPE_REDBOOT;
4684 break;
4685 case IMAGE_OPTION_ROM_ISCSI:
4686 img_optype = OPTYPE_BIOS;
4687 break;
4688 case IMAGE_OPTION_ROM_PXE:
4689 img_optype = OPTYPE_PXE_BIOS;
4690 break;
4691 case IMAGE_OPTION_ROM_FCoE:
4692 img_optype = OPTYPE_FCOE_BIOS;
4693 break;
4694 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4695 img_optype = OPTYPE_ISCSI_BACKUP;
4696 break;
4697 case IMAGE_NCSI:
4698 img_optype = OPTYPE_NCSI_FW;
4699 break;
4700 case IMAGE_FLASHISM_JUMPVECTOR:
4701 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4702 break;
4703 case IMAGE_FIRMWARE_PHY:
4704 img_optype = OPTYPE_SH_PHY_FW;
4705 break;
4706 case IMAGE_REDBOOT_DIR:
4707 img_optype = OPTYPE_REDBOOT_DIR;
4708 break;
4709 case IMAGE_REDBOOT_CONFIG:
4710 img_optype = OPTYPE_REDBOOT_CONFIG;
4711 break;
4712 case IMAGE_UFI_DIR:
4713 img_optype = OPTYPE_UFI_DIR;
4714 break;
4715 default:
4716 break;
4717 }
4718
4719 return img_optype;
4720}
4721
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004722static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304723 const struct firmware *fw,
4724 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004725{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004726 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004727 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304728 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004729 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304730 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004731 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304732 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304733 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004734
4735 filehdr_size = sizeof(struct flash_file_hdr_g3);
4736 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4737 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304738 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304739 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004740 }
4741
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004742retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004743 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4744 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4745 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304746 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4747 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4748 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004749
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304750 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004751 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004752
4753 if (flash_offset_support)
4754 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4755 else
4756 flash_optype = img_optype;
4757
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304758 /* Don't bother verifying CRC if an old FW image is being
4759 * flashed
4760 */
4761 if (old_fw_img)
4762 goto flash;
4763
4764 status = be_check_flash_crc(adapter, fw->data, img_offset,
4765 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004766 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304767 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304768 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4769 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004770 /* The current FW image on the card does not support
4771 * OFFSET based flashing. Retry using older mechanism
4772 * of OPTYPE based flashing
4773 */
4774 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4775 flash_offset_support = false;
4776 goto retry_flash;
4777 }
4778
4779 /* The current FW image on the card does not recognize
4780 * the new FLASH op_type. The FW download is partially
4781 * complete. Reboot the server now to enable FW image
4782 * to recognize the new FLASH op_type. To complete the
4783 * remaining process, download the same FW again after
4784 * the reboot.
4785 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304786 dev_err(dev, "Flash incomplete. Reset the server\n");
4787 dev_err(dev, "Download FW image again after reset\n");
4788 return -EAGAIN;
4789 } else if (status) {
4790 dev_err(dev, "Could not get CRC for 0x%x region\n",
4791 img_optype);
4792 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004793 }
4794
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304795 if (crc_match)
4796 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004797
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304798flash:
4799 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004800 if (p + img_size > fw->data + fw->size)
4801 return -1;
4802
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004803 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4804 img_offset);
4805
4806 /* The current FW image on the card does not support OFFSET
4807 * based flashing. Retry using older mechanism of OPTYPE based
4808 * flashing
4809 */
4810 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4811 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4812 flash_offset_support = false;
4813 goto retry_flash;
4814 }
4815
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304816 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4817 * UFI_DIR region
4818 */
Kalesh AP4c600052014-05-30 19:06:26 +05304819 if (old_fw_img &&
4820 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4821 (img_optype == OPTYPE_UFI_DIR &&
4822 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304823 continue;
4824 } else if (status) {
4825 dev_err(dev, "Flashing section type 0x%x failed\n",
4826 img_type);
4827 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004828 }
4829 }
4830 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004831}
4832
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004833static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304834 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004835{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004836#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4837#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304838 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004839 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004840 const u8 *data_ptr = NULL;
4841 u8 *dest_image_ptr = NULL;
4842 size_t image_size = 0;
4843 u32 chunk_size = 0;
4844 u32 data_written = 0;
4845 u32 offset = 0;
4846 int status = 0;
4847 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004848 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004849
4850 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304851 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304852 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004853 }
4854
4855 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4856 + LANCER_FW_DOWNLOAD_CHUNK;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304857 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4858 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304859 if (!flash_cmd.va)
4860 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004861
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004862 dest_image_ptr = flash_cmd.va +
4863 sizeof(struct lancer_cmd_req_write_object);
4864 image_size = fw->size;
4865 data_ptr = fw->data;
4866
4867 while (image_size) {
4868 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4869
4870 /* Copy the image chunk content. */
4871 memcpy(dest_image_ptr, data_ptr, chunk_size);
4872
4873 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004874 chunk_size, offset,
4875 LANCER_FW_DOWNLOAD_LOCATION,
4876 &data_written, &change_status,
4877 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004878 if (status)
4879 break;
4880
4881 offset += data_written;
4882 data_ptr += data_written;
4883 image_size -= data_written;
4884 }
4885
4886 if (!status) {
4887 /* Commit the FW written */
4888 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004889 0, offset,
4890 LANCER_FW_DOWNLOAD_LOCATION,
4891 &data_written, &change_status,
4892 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004893 }
4894
Kalesh APbb864e02014-09-02 09:56:51 +05304895 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004896 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304897 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304898 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004899 }
4900
Kalesh APbb864e02014-09-02 09:56:51 +05304901 dev_info(dev, "Firmware flashed successfully\n");
4902
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004903 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304904 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004905 status = lancer_physdev_ctrl(adapter,
4906 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004907 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304908 dev_err(dev, "Adapter busy, could not reset FW\n");
4909 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004910 }
4911 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304912 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004913 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304914
4915 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004916}
4917
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004918/* Check if the flash image file is compatible with the adapter that
4919 * is being flashed.
4920 */
4921static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4922 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004923{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004924 if (!fhdr) {
4925 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
Vasundhara Volam887a65c2015-07-10 05:32:46 -04004926 return false;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004927 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004928
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004929 /* First letter of the build version is used to identify
4930 * which chip this image file is meant for.
4931 */
4932 switch (fhdr->build[0]) {
4933 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004934 if (!skyhawk_chip(adapter))
4935 return false;
4936 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004937 case BLD_STR_UFI_TYPE_BE3:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004938 if (!BE3_chip(adapter))
4939 return false;
4940 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004941 case BLD_STR_UFI_TYPE_BE2:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004942 if (!BE2_chip(adapter))
4943 return false;
4944 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004945 default:
4946 return false;
4947 }
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004948
4949 return (fhdr->asic_type_rev >= adapter->asic_rev);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004950}
4951
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004952static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4953{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004954 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004955 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004956 struct image_hdr *img_hdr_ptr;
4957 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004958 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004959
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004960 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4961 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4962 dev_err(dev, "Flash image is not compatible with adapter\n");
4963 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004964 }
4965
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004966 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304967 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4968 GFP_KERNEL);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004969 if (!flash_cmd.va)
4970 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004971
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004972 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4973 for (i = 0; i < num_imgs; i++) {
4974 img_hdr_ptr = (struct image_hdr *)(fw->data +
4975 (sizeof(struct flash_file_hdr_g3) +
4976 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004977 if (!BE2_chip(adapter) &&
4978 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4979 continue;
4980
4981 if (skyhawk_chip(adapter))
4982 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4983 num_imgs);
4984 else
4985 status = be_flash_BEx(adapter, fw, &flash_cmd,
4986 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004987 }
4988
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004989 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4990 if (!status)
4991 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004992
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004993 return status;
4994}
4995
4996int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4997{
4998 const struct firmware *fw;
4999 int status;
5000
5001 if (!netif_running(adapter->netdev)) {
5002 dev_err(&adapter->pdev->dev,
5003 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05305004 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005005 }
5006
5007 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5008 if (status)
5009 goto fw_exit;
5010
5011 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5012
5013 if (lancer_chip(adapter))
5014 status = lancer_fw_download(adapter, fw);
5015 else
5016 status = be_fw_download(adapter, fw);
5017
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005018 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05305019 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005020
Ajit Khaparde84517482009-09-04 03:12:16 +00005021fw_exit:
5022 release_firmware(fw);
5023 return status;
5024}
5025
Roopa Prabhuadd511b2015-01-29 22:40:12 -08005026static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5027 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005028{
5029 struct be_adapter *adapter = netdev_priv(dev);
5030 struct nlattr *attr, *br_spec;
5031 int rem;
5032 int status = 0;
5033 u16 mode = 0;
5034
5035 if (!sriov_enabled(adapter))
5036 return -EOPNOTSUPP;
5037
5038 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01005039 if (!br_spec)
5040 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005041
5042 nla_for_each_nested(attr, br_spec, rem) {
5043 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5044 continue;
5045
Thomas Grafb7c1a312014-11-26 13:42:17 +01005046 if (nla_len(attr) < sizeof(mode))
5047 return -EINVAL;
5048
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005049 mode = nla_get_u16(attr);
5050 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5051 return -EINVAL;
5052
5053 status = be_cmd_set_hsw_config(adapter, 0, 0,
5054 adapter->if_handle,
5055 mode == BRIDGE_MODE_VEPA ?
5056 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04005057 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005058 if (status)
5059 goto err;
5060
5061 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5062 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5063
5064 return status;
5065 }
5066err:
5067 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5068 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5069
5070 return status;
5071}
5072
5073static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02005074 struct net_device *dev, u32 filter_mask,
5075 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005076{
5077 struct be_adapter *adapter = netdev_priv(dev);
5078 int status = 0;
5079 u8 hsw_mode;
5080
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005081 /* BE and Lancer chips support VEB mode only */
5082 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5083 hsw_mode = PORT_FWD_TYPE_VEB;
5084 } else {
5085 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005086 adapter->if_handle, &hsw_mode,
5087 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005088 if (status)
5089 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04005090
5091 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5092 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005093 }
5094
5095 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5096 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005097 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07005098 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005099}
5100
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305101#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005102/* VxLAN offload Notes:
5103 *
5104 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5105 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5106 * is expected to work across all types of IP tunnels once exported. Skyhawk
5107 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305108 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5109 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5110 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005111 *
5112 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5113 * adds more than one port, disable offloads and don't re-enable them again
5114 * until after all the tunnels are removed.
5115 */
Sathya Perlac9c47142014-03-27 10:46:19 +05305116static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5117 __be16 port)
5118{
5119 struct be_adapter *adapter = netdev_priv(netdev);
5120 struct device *dev = &adapter->pdev->dev;
5121 int status;
5122
5123 if (lancer_chip(adapter) || BEx_chip(adapter))
5124 return;
5125
5126 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305127 dev_info(dev,
5128 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005129 dev_info(dev, "Disabling VxLAN offloads\n");
5130 adapter->vxlan_port_count++;
5131 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305132 }
5133
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005134 if (adapter->vxlan_port_count++ >= 1)
5135 return;
5136
Sathya Perlac9c47142014-03-27 10:46:19 +05305137 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5138 OP_CONVERT_NORMAL_TO_TUNNEL);
5139 if (status) {
5140 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5141 goto err;
5142 }
5143
5144 status = be_cmd_set_vxlan_port(adapter, port);
5145 if (status) {
5146 dev_warn(dev, "Failed to add VxLAN port\n");
5147 goto err;
5148 }
5149 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5150 adapter->vxlan_port = port;
5151
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005152 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5153 NETIF_F_TSO | NETIF_F_TSO6 |
5154 NETIF_F_GSO_UDP_TUNNEL;
5155 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305156 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005157
Sathya Perlac9c47142014-03-27 10:46:19 +05305158 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5159 be16_to_cpu(port));
5160 return;
5161err:
5162 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305163}
5164
5165static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5166 __be16 port)
5167{
5168 struct be_adapter *adapter = netdev_priv(netdev);
5169
5170 if (lancer_chip(adapter) || BEx_chip(adapter))
5171 return;
5172
5173 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005174 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305175
5176 be_disable_vxlan_offloads(adapter);
5177
5178 dev_info(&adapter->pdev->dev,
5179 "Disabled VxLAN offloads for UDP port %d\n",
5180 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005181done:
5182 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305183}
Joe Stringer725d5482014-11-13 16:38:13 -08005184
Jesse Gross5f352272014-12-23 22:37:26 -08005185static netdev_features_t be_features_check(struct sk_buff *skb,
5186 struct net_device *dev,
5187 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005188{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305189 struct be_adapter *adapter = netdev_priv(dev);
5190 u8 l4_hdr = 0;
5191
5192 /* The code below restricts offload features for some tunneled packets.
5193 * Offload features for normal (non tunnel) packets are unchanged.
5194 */
5195 if (!skb->encapsulation ||
5196 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5197 return features;
5198
5199 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5200 * should disable tunnel offload features if it's not a VxLAN packet,
5201 * as tunnel offloads have been enabled only for VxLAN. This is done to
5202 * allow other tunneled traffic like GRE work fine while VxLAN
5203 * offloads are configured in Skyhawk-R.
5204 */
5205 switch (vlan_get_protocol(skb)) {
5206 case htons(ETH_P_IP):
5207 l4_hdr = ip_hdr(skb)->protocol;
5208 break;
5209 case htons(ETH_P_IPV6):
5210 l4_hdr = ipv6_hdr(skb)->nexthdr;
5211 break;
5212 default:
5213 return features;
5214 }
5215
5216 if (l4_hdr != IPPROTO_UDP ||
5217 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5218 skb->inner_protocol != htons(ETH_P_TEB) ||
5219 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5220 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5221 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5222
5223 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005224}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305225#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305226
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305227static int be_get_phys_port_id(struct net_device *dev,
5228 struct netdev_phys_item_id *ppid)
5229{
5230 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5231 struct be_adapter *adapter = netdev_priv(dev);
5232 u8 *id;
5233
5234 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5235 return -ENOSPC;
5236
5237 ppid->id[0] = adapter->hba_port_num + 1;
5238 id = &ppid->id[1];
5239 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5240 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5241 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5242
5243 ppid->id_len = id_len;
5244
5245 return 0;
5246}
5247
stephen hemmingere5686ad2012-01-05 19:10:25 +00005248static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005249 .ndo_open = be_open,
5250 .ndo_stop = be_close,
5251 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005252 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005253 .ndo_set_mac_address = be_mac_addr_set,
5254 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005255 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005256 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005257 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5258 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005259 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005260 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005261 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005262 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305263 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005264 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005265#ifdef CONFIG_NET_POLL_CONTROLLER
5266 .ndo_poll_controller = be_netpoll,
5267#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005268 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5269 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305270#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305271 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305272#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305273#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305274 .ndo_add_vxlan_port = be_add_vxlan_port,
5275 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005276 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305277#endif
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305278 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005279};
5280
5281static void be_netdev_init(struct net_device *netdev)
5282{
5283 struct be_adapter *adapter = netdev_priv(netdev);
5284
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005285 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005286 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005287 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005288 if (be_multi_rxq(adapter))
5289 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005290
5291 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005292 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005293
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005294 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005295 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005296
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005297 netdev->priv_flags |= IFF_UNICAST_FLT;
5298
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005299 netdev->flags |= IFF_MULTICAST;
5300
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005301 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005303 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005304
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005305 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005306}
5307
Kalesh AP87ac1a52015-02-23 04:20:15 -05005308static void be_cleanup(struct be_adapter *adapter)
5309{
5310 struct net_device *netdev = adapter->netdev;
5311
5312 rtnl_lock();
5313 netif_device_detach(netdev);
5314 if (netif_running(netdev))
5315 be_close(netdev);
5316 rtnl_unlock();
5317
5318 be_clear(adapter);
5319}
5320
Kalesh AP484d76f2015-02-23 04:20:14 -05005321static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005322{
Kalesh APd0e1b312015-02-23 04:20:12 -05005323 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005324 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005325
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005326 status = be_setup(adapter);
5327 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005328 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005329
Kalesh APd0e1b312015-02-23 04:20:12 -05005330 if (netif_running(netdev)) {
5331 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005332 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005333 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005334 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005335
Kalesh APd0e1b312015-02-23 04:20:12 -05005336 netif_device_attach(netdev);
5337
Kalesh AP484d76f2015-02-23 04:20:14 -05005338 return 0;
5339}
5340
5341static int be_err_recover(struct be_adapter *adapter)
5342{
5343 struct device *dev = &adapter->pdev->dev;
5344 int status;
5345
5346 status = be_resume(adapter);
5347 if (status)
5348 goto err;
5349
Sathya Perla9fa465c2015-02-23 04:20:13 -05005350 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005351 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005352err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005353 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305354 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005355 else
5356 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005357
5358 return status;
5359}
5360
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005361static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005362{
5363 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005364 container_of(work, struct be_adapter,
5365 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005366 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005367
5368 be_detect_error(adapter);
5369
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305370 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005371 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005372
5373 /* As of now error recovery support is in Lancer only */
5374 if (lancer_chip(adapter))
5375 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005376 }
5377
Sathya Perla9fa465c2015-02-23 04:20:13 -05005378 /* Always attempt recovery on VFs */
5379 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005380 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005381}
5382
Vasundhara Volam21252372015-02-06 08:18:42 -05005383static void be_log_sfp_info(struct be_adapter *adapter)
5384{
5385 int status;
5386
5387 status = be_cmd_query_sfp_info(adapter);
5388 if (!status) {
5389 dev_err(&adapter->pdev->dev,
5390 "Unqualified SFP+ detected on %c from %s part no: %s",
5391 adapter->port_name, adapter->phy.vendor_name,
5392 adapter->phy.vendor_pn);
5393 }
5394 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5395}
5396
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005397static void be_worker(struct work_struct *work)
5398{
5399 struct be_adapter *adapter =
5400 container_of(work, struct be_adapter, work.work);
5401 struct be_rx_obj *rxo;
5402 int i;
5403
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005404 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005405 * mcc completions
5406 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005407 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005408 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005409 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005410 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005411 goto reschedule;
5412 }
5413
5414 if (!adapter->stats_cmd_sent) {
5415 if (lancer_chip(adapter))
5416 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305417 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005418 else
5419 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5420 }
5421
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305422 if (be_physfn(adapter) &&
5423 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005424 be_cmd_get_die_temperature(adapter);
5425
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005426 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305427 /* Replenish RX-queues starved due to memory
5428 * allocation failures.
5429 */
5430 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305431 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005432 }
5433
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005434 /* EQ-delay update for Skyhawk is done while notifying EQ */
5435 if (!skyhawk_chip(adapter))
5436 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005437
Vasundhara Volam21252372015-02-06 08:18:42 -05005438 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5439 be_log_sfp_info(adapter);
5440
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005441reschedule:
5442 adapter->work_counter++;
5443 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5444}
5445
Sathya Perla78fad34e2015-02-23 04:20:08 -05005446static void be_unmap_pci_bars(struct be_adapter *adapter)
5447{
5448 if (adapter->csr)
5449 pci_iounmap(adapter->pdev, adapter->csr);
5450 if (adapter->db)
5451 pci_iounmap(adapter->pdev, adapter->db);
5452}
5453
5454static int db_bar(struct be_adapter *adapter)
5455{
Kalesh AP18c57c72015-05-06 05:30:38 -04005456 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005457 return 0;
5458 else
5459 return 4;
5460}
5461
5462static int be_roce_map_pci_bars(struct be_adapter *adapter)
5463{
5464 if (skyhawk_chip(adapter)) {
5465 adapter->roce_db.size = 4096;
5466 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5467 db_bar(adapter));
5468 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5469 db_bar(adapter));
5470 }
5471 return 0;
5472}
5473
5474static int be_map_pci_bars(struct be_adapter *adapter)
5475{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005476 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005477 u8 __iomem *addr;
5478 u32 sli_intf;
5479
5480 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5481 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5482 SLI_INTF_FAMILY_SHIFT;
5483 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5484
5485 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005486 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005487 if (!adapter->csr)
5488 return -ENOMEM;
5489 }
5490
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005491 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005492 if (!addr)
5493 goto pci_map_err;
5494 adapter->db = addr;
5495
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005496 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5497 if (be_physfn(adapter)) {
5498 /* PCICFG is the 2nd BAR in BE2 */
5499 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5500 if (!addr)
5501 goto pci_map_err;
5502 adapter->pcicfg = addr;
5503 } else {
5504 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5505 }
5506 }
5507
Sathya Perla78fad34e2015-02-23 04:20:08 -05005508 be_roce_map_pci_bars(adapter);
5509 return 0;
5510
5511pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005512 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005513 be_unmap_pci_bars(adapter);
5514 return -ENOMEM;
5515}
5516
5517static void be_drv_cleanup(struct be_adapter *adapter)
5518{
5519 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5520 struct device *dev = &adapter->pdev->dev;
5521
5522 if (mem->va)
5523 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5524
5525 mem = &adapter->rx_filter;
5526 if (mem->va)
5527 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5528
5529 mem = &adapter->stats_cmd;
5530 if (mem->va)
5531 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5532}
5533
5534/* Allocate and initialize various fields in be_adapter struct */
5535static int be_drv_init(struct be_adapter *adapter)
5536{
5537 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5538 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5539 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5540 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5541 struct device *dev = &adapter->pdev->dev;
5542 int status = 0;
5543
5544 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305545 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5546 &mbox_mem_alloc->dma,
5547 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005548 if (!mbox_mem_alloc->va)
5549 return -ENOMEM;
5550
5551 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5552 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5553 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005554
5555 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5556 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5557 &rx_filter->dma, GFP_KERNEL);
5558 if (!rx_filter->va) {
5559 status = -ENOMEM;
5560 goto free_mbox;
5561 }
5562
5563 if (lancer_chip(adapter))
5564 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5565 else if (BE2_chip(adapter))
5566 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5567 else if (BE3_chip(adapter))
5568 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5569 else
5570 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5571 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5572 &stats_cmd->dma, GFP_KERNEL);
5573 if (!stats_cmd->va) {
5574 status = -ENOMEM;
5575 goto free_rx_filter;
5576 }
5577
5578 mutex_init(&adapter->mbox_lock);
5579 spin_lock_init(&adapter->mcc_lock);
5580 spin_lock_init(&adapter->mcc_cq_lock);
5581 init_completion(&adapter->et_cmd_compl);
5582
5583 pci_save_state(adapter->pdev);
5584
5585 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005586 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5587 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005588
5589 adapter->rx_fc = true;
5590 adapter->tx_fc = true;
5591
5592 /* Must be a power of 2 or else MODULO will BUG_ON */
5593 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005594
5595 return 0;
5596
5597free_rx_filter:
5598 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5599free_mbox:
5600 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5601 mbox_mem_alloc->dma);
5602 return status;
5603}
5604
5605static void be_remove(struct pci_dev *pdev)
5606{
5607 struct be_adapter *adapter = pci_get_drvdata(pdev);
5608
5609 if (!adapter)
5610 return;
5611
5612 be_roce_dev_remove(adapter);
5613 be_intr_set(adapter, false);
5614
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005615 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005616
5617 unregister_netdev(adapter->netdev);
5618
5619 be_clear(adapter);
5620
5621 /* tell fw we're done with firing cmds */
5622 be_cmd_fw_clean(adapter);
5623
5624 be_unmap_pci_bars(adapter);
5625 be_drv_cleanup(adapter);
5626
5627 pci_disable_pcie_error_reporting(pdev);
5628
5629 pci_release_regions(pdev);
5630 pci_disable_device(pdev);
5631
5632 free_netdev(adapter->netdev);
5633}
5634
Arnd Bergmann9a032592015-05-18 23:06:45 +02005635static ssize_t be_hwmon_show_temp(struct device *dev,
5636 struct device_attribute *dev_attr,
5637 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305638{
5639 struct be_adapter *adapter = dev_get_drvdata(dev);
5640
5641 /* Unit: millidegree Celsius */
5642 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5643 return -EIO;
5644 else
5645 return sprintf(buf, "%u\n",
5646 adapter->hwmon_info.be_on_die_temp * 1000);
5647}
5648
5649static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5650 be_hwmon_show_temp, NULL, 1);
5651
5652static struct attribute *be_hwmon_attrs[] = {
5653 &sensor_dev_attr_temp1_input.dev_attr.attr,
5654 NULL
5655};
5656
5657ATTRIBUTE_GROUPS(be_hwmon);
5658
Sathya Perlad3791422012-09-28 04:39:44 +00005659static char *mc_name(struct be_adapter *adapter)
5660{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305661 char *str = ""; /* default */
5662
5663 switch (adapter->mc_type) {
5664 case UMC:
5665 str = "UMC";
5666 break;
5667 case FLEX10:
5668 str = "FLEX10";
5669 break;
5670 case vNIC1:
5671 str = "vNIC-1";
5672 break;
5673 case nPAR:
5674 str = "nPAR";
5675 break;
5676 case UFP:
5677 str = "UFP";
5678 break;
5679 case vNIC2:
5680 str = "vNIC-2";
5681 break;
5682 default:
5683 str = "";
5684 }
5685
5686 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005687}
5688
5689static inline char *func_name(struct be_adapter *adapter)
5690{
5691 return be_physfn(adapter) ? "PF" : "VF";
5692}
5693
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005694static inline char *nic_name(struct pci_dev *pdev)
5695{
5696 switch (pdev->device) {
5697 case OC_DEVICE_ID1:
5698 return OC_NAME;
5699 case OC_DEVICE_ID2:
5700 return OC_NAME_BE;
5701 case OC_DEVICE_ID3:
5702 case OC_DEVICE_ID4:
5703 return OC_NAME_LANCER;
5704 case BE_DEVICE_ID2:
5705 return BE3_NAME;
5706 case OC_DEVICE_ID5:
5707 case OC_DEVICE_ID6:
5708 return OC_NAME_SH;
5709 default:
5710 return BE_NAME;
5711 }
5712}
5713
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005714static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005715{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005716 struct be_adapter *adapter;
5717 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005718 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005719
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305720 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5721
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005722 status = pci_enable_device(pdev);
5723 if (status)
5724 goto do_none;
5725
5726 status = pci_request_regions(pdev, DRV_NAME);
5727 if (status)
5728 goto disable_dev;
5729 pci_set_master(pdev);
5730
Sathya Perla7f640062012-06-05 19:37:20 +00005731 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305732 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005733 status = -ENOMEM;
5734 goto rel_reg;
5735 }
5736 adapter = netdev_priv(netdev);
5737 adapter->pdev = pdev;
5738 pci_set_drvdata(pdev, adapter);
5739 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005740 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005741
Russell King4c15c242013-06-26 23:49:11 +01005742 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005743 if (!status) {
5744 netdev->features |= NETIF_F_HIGHDMA;
5745 } else {
Russell King4c15c242013-06-26 23:49:11 +01005746 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005747 if (status) {
5748 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5749 goto free_netdev;
5750 }
5751 }
5752
Kalesh AP2f951a92014-09-12 17:39:21 +05305753 status = pci_enable_pcie_error_reporting(pdev);
5754 if (!status)
5755 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005756
Sathya Perla78fad34e2015-02-23 04:20:08 -05005757 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005758 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005759 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005760
Sathya Perla78fad34e2015-02-23 04:20:08 -05005761 status = be_drv_init(adapter);
5762 if (status)
5763 goto unmap_bars;
5764
Sathya Perla5fb379e2009-06-18 00:02:59 +00005765 status = be_setup(adapter);
5766 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005767 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005768
Sathya Perla3abcded2010-10-03 22:12:27 -07005769 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005770 status = register_netdev(netdev);
5771 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005772 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005773
Parav Pandit045508a2012-03-26 14:27:13 +00005774 be_roce_dev_add(adapter);
5775
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005776 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005777
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305778 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005779 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305780 adapter->hwmon_info.hwmon_dev =
5781 devm_hwmon_device_register_with_groups(&pdev->dev,
5782 DRV_NAME,
5783 adapter,
5784 be_hwmon_groups);
5785 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5786 }
5787
Sathya Perlad3791422012-09-28 04:39:44 +00005788 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005789 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005790
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005791 return 0;
5792
Sathya Perla5fb379e2009-06-18 00:02:59 +00005793unsetup:
5794 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005795drv_cleanup:
5796 be_drv_cleanup(adapter);
5797unmap_bars:
5798 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005799free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005800 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005801rel_reg:
5802 pci_release_regions(pdev);
5803disable_dev:
5804 pci_disable_device(pdev);
5805do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005806 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005807 return status;
5808}
5809
5810static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5811{
5812 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005813
Suresh Reddy76a9e082014-01-15 13:23:40 +05305814 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005815 be_setup_wol(adapter, true);
5816
Ajit Khaparded4360d62013-11-22 12:51:09 -06005817 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005818 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005819
Kalesh AP87ac1a52015-02-23 04:20:15 -05005820 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005821
5822 pci_save_state(pdev);
5823 pci_disable_device(pdev);
5824 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5825 return 0;
5826}
5827
Kalesh AP484d76f2015-02-23 04:20:14 -05005828static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005829{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005830 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005831 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005832
5833 status = pci_enable_device(pdev);
5834 if (status)
5835 return status;
5836
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005837 pci_restore_state(pdev);
5838
Kalesh AP484d76f2015-02-23 04:20:14 -05005839 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005840 if (status)
5841 return status;
5842
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005843 be_schedule_err_detection(adapter);
5844
Suresh Reddy76a9e082014-01-15 13:23:40 +05305845 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005846 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005847
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005848 return 0;
5849}
5850
Sathya Perla82456b02010-02-17 01:35:37 +00005851/*
5852 * An FLR will stop BE from DMAing any data.
5853 */
5854static void be_shutdown(struct pci_dev *pdev)
5855{
5856 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005857
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005858 if (!adapter)
5859 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005860
Devesh Sharmad114f992014-06-10 19:32:15 +05305861 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005862 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005863 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005864
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005865 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005866
Ajit Khaparde57841862011-04-06 18:08:43 +00005867 be_cmd_reset_function(adapter);
5868
Sathya Perla82456b02010-02-17 01:35:37 +00005869 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005870}
5871
Sathya Perlacf588472010-02-14 21:22:01 +00005872static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305873 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005874{
5875 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005876
5877 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5878
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305879 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5880 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005881
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005882 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005883
Kalesh AP87ac1a52015-02-23 04:20:15 -05005884 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005885 }
Sathya Perlacf588472010-02-14 21:22:01 +00005886
5887 if (state == pci_channel_io_perm_failure)
5888 return PCI_ERS_RESULT_DISCONNECT;
5889
5890 pci_disable_device(pdev);
5891
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005892 /* The error could cause the FW to trigger a flash debug dump.
5893 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005894 * can cause it not to recover; wait for it to finish.
5895 * Wait only for first function as it is needed only once per
5896 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005897 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005898 if (pdev->devfn == 0)
5899 ssleep(30);
5900
Sathya Perlacf588472010-02-14 21:22:01 +00005901 return PCI_ERS_RESULT_NEED_RESET;
5902}
5903
5904static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5905{
5906 struct be_adapter *adapter = pci_get_drvdata(pdev);
5907 int status;
5908
5909 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005910
5911 status = pci_enable_device(pdev);
5912 if (status)
5913 return PCI_ERS_RESULT_DISCONNECT;
5914
5915 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005916 pci_restore_state(pdev);
5917
5918 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005919 dev_info(&adapter->pdev->dev,
5920 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005921 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005922 if (status)
5923 return PCI_ERS_RESULT_DISCONNECT;
5924
Sathya Perlad6b6d982012-09-05 01:56:48 +00005925 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305926 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005927 return PCI_ERS_RESULT_RECOVERED;
5928}
5929
5930static void be_eeh_resume(struct pci_dev *pdev)
5931{
5932 int status = 0;
5933 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005934
5935 dev_info(&adapter->pdev->dev, "EEH resume\n");
5936
5937 pci_save_state(pdev);
5938
Kalesh AP484d76f2015-02-23 04:20:14 -05005939 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005940 if (status)
5941 goto err;
5942
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005943 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005944 return;
5945err:
5946 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005947}
5948
Vasundhara Volamace40af2015-03-04 00:44:34 -05005949static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5950{
5951 struct be_adapter *adapter = pci_get_drvdata(pdev);
5952 u16 num_vf_qs;
5953 int status;
5954
5955 if (!num_vfs)
5956 be_vf_clear(adapter);
5957
5958 adapter->num_vfs = num_vfs;
5959
5960 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5961 dev_warn(&pdev->dev,
5962 "Cannot disable VFs while they are assigned\n");
5963 return -EBUSY;
5964 }
5965
5966 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5967 * are equally distributed across the max-number of VFs. The user may
5968 * request only a subset of the max-vfs to be enabled.
5969 * Based on num_vfs, redistribute the resources across num_vfs so that
5970 * each VF will have access to more number of resources.
5971 * This facility is not available in BE3 FW.
5972 * Also, this is done by FW in Lancer chip.
5973 */
5974 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5975 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5976 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5977 adapter->num_vfs, num_vf_qs);
5978 if (status)
5979 dev_err(&pdev->dev,
5980 "Failed to optimize SR-IOV resources\n");
5981 }
5982
5983 status = be_get_resources(adapter);
5984 if (status)
5985 return be_cmd_status(status);
5986
5987 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5988 rtnl_lock();
5989 status = be_update_queues(adapter);
5990 rtnl_unlock();
5991 if (status)
5992 return be_cmd_status(status);
5993
5994 if (adapter->num_vfs)
5995 status = be_vf_setup(adapter);
5996
5997 if (!status)
5998 return adapter->num_vfs;
5999
6000 return 0;
6001}
6002
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006003static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006004 .error_detected = be_eeh_err_detected,
6005 .slot_reset = be_eeh_reset,
6006 .resume = be_eeh_resume,
6007};
6008
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006009static struct pci_driver be_driver = {
6010 .name = DRV_NAME,
6011 .id_table = be_dev_ids,
6012 .probe = be_probe,
6013 .remove = be_remove,
6014 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006015 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006016 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006017 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006018 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006019};
6020
6021static int __init be_init_module(void)
6022{
Joe Perches8e95a202009-12-03 07:58:21 +00006023 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6024 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006025 printk(KERN_WARNING DRV_NAME
6026 " : Module param rx_frag_size must be 2048/4096/8192."
6027 " Using 2048\n");
6028 rx_frag_size = 2048;
6029 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006030
Vasundhara Volamace40af2015-03-04 00:44:34 -05006031 if (num_vfs > 0) {
6032 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6033 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6034 }
6035
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006036 return pci_register_driver(&be_driver);
6037}
6038module_init(be_init_module);
6039
6040static void __exit be_exit_module(void)
6041{
6042 pci_unregister_driver(&be_driver);
6043}
6044module_exit(be_exit_module);